diff --git "a/781.jsonl" "b/781.jsonl" new file mode 100644--- /dev/null +++ "b/781.jsonl" @@ -0,0 +1,639 @@ +{"seq_id":"263291268","text":"from flask import json\nfrom dicttoxml import dicttoxml\nfrom xml.dom.minidom import parseString\nfrom xml.etree import ElementTree\nfrom collections import OrderedDict\nfrom workalendar.africa import SouthAfrica\nfrom datetime import datetime, timedelta\nfrom zeep import Client\nfrom passlib.hash import des_crypt\nimport hashlib\nimport configparser\nimport re\nimport pymysql\nimport threading\nimport emailclient\n\n# RUN SERVER\nhost_path = '/var/www/franc/franc/'\nmode = 'francdb'\n\n# RUN LOCALHOST\n# host_path = '/Users/tpb/dev/franc/server/'\n# mode = 'franctest'\n\n# CONFIG\nconfigParser = configparser.RawConfigParser()\nconfigParser.read(host_path+'config.ini')\n\ndbhost = configParser.get(mode,'host');\ndbuser = configParser.get(mode,'user');\ndbpswd = configParser.get(mode,'password');\ndbname = configParser.get(mode,'db');\n\nmode = 'email'\nemail_adm = configParser.get(mode,'email_adm')\nemail_err = configParser.get(mode,'email_err')\nemail_hlp = configParser.get(mode,'email_hlp')\nemail_usr = configParser.get(mode,'email_usr')\nemail_pwd = configParser.get(mode,'email_pwd')\nemail_host = configParser.get(mode,'email_host')\nemail_port = configParser.get(mode,'email_port')\n\nmode = 'debit'\ndebit_api = configParser.get(mode,'debit_api')\ndebit_mno = configParser.get(mode,'debit_mno')\ndebit_aid = configParser.get(mode,'debit_aid')\n\nmode = 'enum'\nfund_enum = json.loads(configParser.get(mode,'fund_enum'))\ntranstype_enum = json.loads(configParser.get(mode,'transtype_enum'))\ntransstatus_enum = json.loads(configParser.get(mode,'transstatus_enum'))\n\n# EMAIL CLIENT\neclient = emailclient.EmailClient(email_host, email_port, email_usr, email_pwd)\n\nif __name__ == '__main__':\n\n # INIT\n db = 0\n tid = 0\n todaydate = datetime.today()\n nowstr = (datetime.today()).strftime('%Y-%m-%d %H:%M:%S')\n todaystr = (datetime.today()).strftime('%Y-%m-%d')\n # actiondate = datetime.today()+timedelta(days=7)\n actiondate = datetime.today()+timedelta(days=7)\n cal = SouthAfrica()\n\n try: \n debitday = str(todaydate.day)\n\n db = pymysql.connect(dbhost,dbuser,dbpswd,dbname)\n cursor = db.cursor()\n query = \"SELECT u.id uid, u.firstname, u.lastname, d.amount, d.debitday, d.split, b.id bid, b.bank, b.accno, b.branch, c.balance cash, e.balance equity FROM (SELECT * FROM (SELECT uid, MAX(reg_date) AS reg_date FROM debitorders WHERE debitday=5 AND active=TRUE GROUP BY uid) as p JOIN debitorders USING (uid, reg_date)) AS d, (SELECT * FROM (SELECT uid, MAX(reg_date) AS reg_date FROM bankacc GROUP BY uid) as x LEFT JOIN bankacc USING (uid, reg_date)) AS b, (SELECT uid, balance FROM portfolio WHERE fid=1) AS c, (SELECT uid, balance FROM portfolio WHERE fid=2) AS e, users u WHERE d.uid=u.id AND d.uid=b.uid AND d.uid=c.uid AND d.uid=e.uid\".format(actiondate,fund_enum.index('cash')+1,fund_enum.index('equity')+1)\n print(query)\n cursor.execute(query)\n fields = [descr[0] for descr in cursor.description]\n results = [dict(zip(fields,row)) for row in cursor.fetchall()]\n \n print('{0}: {1} debitorders'.format(todaystr,len(results)))\n\n if len(results)>0:\n \n # ACTIONDATE\n while not cal.is_working_day(actiondate):\n actiondate += timedelta(days=1) \n \n client = Client(debit_api)\n\n header = OrderedDict([('merchantno', debit_mno), \\\n ('applicationid', debit_aid), \\\n ('servicetype', 3), \\\n ('totaltransactions', len(results)), \\\n ('firstactiondate', actiondate.strftime('%y%m%d')), \\\n ('lastactiondate', actiondate.strftime('%y%m%d')), \\\n ('merchantcellnotify', ''), \\\n ('merchantemailnotify',email_adm)])\n\n debitorder = OrderedDict([('header', header)])\n\n debittotal = 0\n seqno = 0\n ntrans = len(results)\n for t in results:\n\n trans='{0}-{1}-{2}-{3}'.format(t['uid'],t['amount'],t['bid'],todaystr)\n print(trans)\n # hash_object = hashlib.md5(trans.encode())\n # detail=hash_object.hexdigest()\n detail = des_crypt.hash(trans)\n print(detail)\n\n cash=round(t['amount']*(100-t['split'])/100)\n print('Cash deposit: R{0}'.format(cash))\n transaction=(t['uid'],t['bid'],fund_enum.index('cash')+1,transtype_enum.index('Debitorder')+1,cash,t['cash'],transstatus_enum.index('Submitted')+1,detail,nowstr)\n query=\"INSERT INTO transactions (uid, bid, fid, type, amount, balance, status, detail, reg_date) VALUES ({0},{1},{2},{3},{4},{5},{6},'{7}','{8}');\".format(*transaction)\n print(query)\n cursor.execute(query)\n db.commit()\n\n equity=round(t['amount']*t['split']/100)\n print('Equity deposit: R{0}'.format(equity))\n transaction=(t['uid'],t['bid'],fund_enum.index('equity')+1,transtype_enum.index('Debitorder')+1,equity,t['equity'],transstatus_enum.index('Submitted')+1,detail,nowstr)\n query=\"INSERT INTO transactions (uid, bid, fid, type, amount, balance, status, detail, reg_date) VALUES ({0},{1},{2},{3},{4},{5},{6},'{7}','{8}');\".format(*transaction)\n print(query)\n cursor.execute(query)\n db.commit()\n\n debittotal += t['amount']\n accountholder = t['firstname'] + ' ' + t['lastname']\n transaction = OrderedDict([('sequenceno', seqno+1), \\\n ('branchcode', t['branch']), \\\n ('accounttype', 1), \\\n ('accountno', t['accno']), \\\n ('debitamount', t['amount']),\\\n ('debitdate', actiondate.strftime('%y%m%d')),\\\n ('debitreference',detail),\\\n ('accountholder', accountholder),\\\n ('debitcellnotify', ''), \\\n ('debitemailnotify', ''), \\\n ('transactionrefno', detail)])\n debitorder.update(OrderedDict([('transaction%d'%(seqno), transaction)]))\n seqno+=1\n\n footer = OrderedDict([('totaltransactions', ntrans), \\\n ('firstactiondate', actiondate.strftime('%y%m%d')), \\\n ('lastactiondate', actiondate.strftime('%y%m%d')), \\\n ('debittotal', debittotal)])\n\n debitorder.update(OrderedDict({'footer':footer}))\n\n xml = dicttoxml(debitorder, custom_root='debitorder', attr_type=False)\n dom = parseString(xml)\n xml = dom.toxml()\n xml = re.sub(r\"transaction\\d+\", \"transaction\", xml)\n print(xml)\n\n filename = \"debit-%s.xml\"%(actiondate.strftime('%y%m%d'))\n print('Archving: '+filename) \n f = open(host_path+'assets/archive/debitorders/'+filename,\"w+\")\n f.write(xml)\n f.close()\n\n result = client.service.uploadDebitFile(xml)\n\n el = ElementTree.XML(result).find(\"transactionreference\")\n\n if el is not None: \n\n tref = el.text\n print('[INFO] Debit upload ref {0}'.format(tref))\n release = OrderedDict([('merchantno', debit_mno), \\\n ('applicationid', debit_aid), \\\n ('referenceno', tref)])\n xml = dicttoxml(release, custom_root='dorelease', attr_type=False)\n print(xml)\n result = client.service.releaseDebitFile(xml)\n success = ElementTree.XML(result).find(\"result\").text\n\n if success=='1':\n\n query = \"UPDATE transactions SET ref='{0}', status={1} WHERE type={2} and reg_date=STR_TO_DATE('{3}','%Y-%m-%d %H:%i:%s');\".format(tref,transstatus_enum.index('Processing')+1,transtype_enum.index('Debitorder')+1,nowstr)\n print(query)\n cursor.execute(query)\n tid = str(cursor.lastrowid)\n db.commit()\n\n subject = '[SUCCESS] Debit release'\n print(subject)\n body = 'Actiondate: {0}\\nNo transaction {1}\\nTotal: {2}'.format(actiondate.strftime('%y%m%d'),ntrans,debittotal)\n print(body)\n t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_adm))\n t.daemon = True\n t.start() \n\n else:\n subject = '[ERROR] Debit release'\n print(subject)\n body = ElementTree.XML(result).find('description').text\n print(body)\n t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_adm))\n t.daemon = True\n t.start() \n\n else:\n subject = '[ERROR] Debit upload'\n print(subject)\n body = ElementTree.XML(result).find('debituploaderror').find('description').text\n t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_adm))\n t.daemon = True\n t.start() \n\n except Exception as ex:\n query = \"DELETE FROM transactions WHERE type={0} and reg_date=STR_TO_DATE('{1}','%Y-%m-%d %H:%i:%s');\".format(transtype_enum.index('Debitorder')+1,nowstr)\n print(query)\n cursor.execute(query)\n db.commit()\n subject = '[ERROR] Debitorder'\n print(subject)\n body = '{0}'.format(ex)\n print(body)\n # t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_err))\n # t.daemon = True\n # t.start() \n\n finally:\n if (db):\n db.close()\n\n\n","sub_path":"server/clearance.py","file_name":"clearance.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"338676179","text":"#!/usr/bin/python\n# Filename: pr15.py\n\ngridSize = 20;\n\ngrid = list();\nfor i in range(gridSize + 1):\n grid.append(1);\n\nfor i in range(1, gridSize):\n for j in range(0, gridSize):\n grid[j] = sum(grid[j:]);\n\nprint(sum(grid));\n","sub_path":"pr15.py","file_name":"pr15.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"472281760","text":"import os\nimport urllib.request\nimport urllib.parse\nfrom bs4 import BeautifulSoup\n\nQUERY = str(input(\"SEARCH : \"))\n\nfile = open('./Crawler_KISTI_'+QUERY+'.txt','w', encoding='utf-8')\n\nArr_URL = ['Default', 'Key', 'Target', 'Search', 'Count', 'Start', 'Sort', 'Type', 'Res', 'Query']\n\nArr_Value = ['http://openapi.ndsl.kr/itemsearch.do', '?keyValue=03323430', '&target=ARTI', '&searchField=BI', '&displayCount=100', '&startPosition=1', '&sortby=', '&returnType=xml', '&responseGroup=advance', '&query='+QUERY]\n\nURL=''\n\nfor arr in range(len(Arr_URL)):\n URL += Arr_Value[arr]\nprint(URL)\nheaders = {\n'Host': 'openapi.ndsl.kr',\n'Connection': 'keep-alive',\n'Upgrade-Insecure-Requests': 1,\n'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36',\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',\n'Cookie': '_ga=GA1.2.886947753.1522289626; WMONID=sRG64EHzxAF; JSESSIONID=aeEYqiOaIN5u1jVvsSp3Ru9n0imSEBf7AkwuFBwWXNlzDQrZM1lI2itgzZmg6Nfs.ar228_servlet_engine12'\n}\nreq=urllib.request.Request(URL, headers=headers)\nf = urllib.request.urlopen(req)\nresultXML = f.read()\nxmlsoup = BeautifulSoup(resultXML, 'html.parser')\nitems = xmlsoup.find_all('dissertation')\nfor item in items:\n print(item.dissertationtitle)\n file.write('-------------------------------\\n')\n file.write(' 제목 : '+ item.dissertationtitle.get_text(strip=1)+'\\n')\n file.write(' 내용 : '+ item.abstract.get_text(strip=1)+'\\n')\n file.write(' 링크 : '+ item.deeplink.get_text(strip=1)+'\\n')#본문의 링크를 가져오기 위해\n file.write('-------------------------------\\n')\nfile.close()\n'''\nDefault_URL = 'http://openapi.ndsl.kr/itemsearch.do'\nKey_URL = '?keyValue=03323430'\nTarget_URL = '&target=ARTI'#ARTI=all/NART=학위제외/JAKO=국내/JAFO=해외/CFKO=국내회의/CFFO=해외회의/DIKO=국내학위\nSearch_URL = '&searchField=BI'#BI=all/TI=제목/SO=저널명/KW=키워드\nCount_URL = '&displayCount=100'\nStart_URL = '&startPosition=1'\nSort_URL = '&sortby='#default=정확도/title=논문명/jtitle=저널명/pubYear=발행일\nType_URL = '&returnType=xml'#xml/json\nRes_URL = '&responseGroup=advance'#simple=URLx/advance=URL\nQuery_URL = '&query='+QUERY#검색질의어(UTF-8)\n'''\n","sub_path":"Crawler_KISTI.py","file_name":"Crawler_KISTI.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"512248827","text":"# 1. Ref: https://www.jiuzhang.com/solution/two-sum-iv-input-is-a-bst/#tag-highlight-lang-python\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def findTarget(self, root: TreeNode, n: int) -> bool:\n if not root:\n return\n self.res = None\n node_set = set()\n self.inorder(root, n, node_set)\n return bool(self.res)\n \n def inorder(self, root, n, node_set):\n if not root:\n return\n self.inorder(root.left, n, node_set)\n if root.val in node_set:\n self.res = [n-root.val, root.val]\n else:\n node_set.add(n-root.val)\n self.inorder(root.right, n, node_set)","sub_path":"653_two_sum_iv.py","file_name":"653_two_sum_iv.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"424498824","text":"\n# -*- coding: utf-8 -*- \n\nimport sys\nimport __builtin__\nimport os\n\n\n# Globals\n#\n__builtin__.__dict__['cout'] = sys.stdout.write\n__builtin__.__dict__['flushout'] = sys.stdout.flush\n__builtin__.__dict__['cerr'] = sys.stderr.write\n__builtin__.__dict__['dbg'] = id\n\n\n# Platform adaptations\n#\nimport time\nimport platform\nsystem = platform.system()\nif system == \"Linux\":\n# __builtin__.__dict__['_'] = lambda x: x.encode('utf8')\n __builtin__.__dict__['_'] = lambda x: x\n __builtin__.__dict__['timer'] = time.time\nelif system == \"Windows\":\n __builtin__.__dict__['_'] = lambda x: x.encode('cp850')\n __builtin__.__dict__['timer'] = time.clock\nelse:\n# print \"Unknown platform %s\\n\" % system\n __builtin__.__dict__['_'] = lambda x: x\n __builtin__.__dict__['timer'] = time.clock\n\n\n# Trace function\n#\ntracetime = timer()\n\n# Disable trace\n#\nx_trace = False\n\ndef enable_trace():\n global x_trace\n x_trace = True\n\ndef disable_trace():\n global x_trace\n x_trace = False\n\ndef trace(arg=\"\"):\n global x_trace\n if not x_trace:\n return\n import inspect\n caller = inspect.stack()[1]\n global tracetime\n t = timer()\n dt = max(0, round((t-tracetime)*1000))\n tracetime = t\n cout(\"%08dms %s() %s:%d: %s\\n\" % \\\n (dt, caller[3], os.path.basename(caller[1]),\n caller[2], arg))\n__builtin__.__dict__['trace'] = trace\n__builtin__.__dict__['enable_trace'] = enable_trace\n__builtin__.__dict__['disable_trace'] = disable_trace\n\n\n# Print error message and exit with error code 1\n#\ndef die(s):\n sys.stderr.write(\"\\nFatal: \"+s+'\\n')\n sys.exit(1)\n\n__builtin__.__dict__['die'] = die\n","sub_path":"python/premain.py","file_name":"premain.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"486395530","text":"import discord\nfrom redbot.core import commands\nimport random\nimport re\nfrom functools import reduce\n\nBaseCog = getattr(commands, \"Cog\", object)\n\n\nclass Zalgo(BaseCog):\n def __init__(self, bot):\n self.bot = bot\n\n async def april_fools(message):\n # Prevent acting on DM's\n if (\n random.random() <= 0.999\n or (message.guild is None)\n or message.guild.name.lower() != \"cortex\"\n ):\n return\n\n clean_message = message.clean_content.lower()\n # MM: Added so list instead of string\n message_split = clean_message.split(\" \")\n # BLACKLIST CHANNELS\n blacklist = [\n \"news\",\n \"rpg\",\n \"events\",\n \"recommends\",\n \"politisophy\",\n \"eyebleach\",\n \"weeb-lyfe\",\n \"out-of-context\",\n \"jokes\",\n \"anime-club\",\n ]\n\n message_channel = message.channel.name.lower()\n\n if (\n # DO NOT RESPOND TO SELF MESSAGES\n (bot.user.id == message.author.id or message.content.startswith(\".\"))\n or (message.channel.name is None)\n or (\n reduce(\n lambda acc, n: acc or (n == message_channel), blacklist, False\n )\n )\n or (\"thank\" in clean_message)\n or (\"http\" in clean_message)\n ):\n return\n\n ctx = await bot.get_context(message)\n\n new_msg = self.uwuify(message.content)\n\n await ctx.message.delete()\n await ctx.send(new_msg)\n\n self.bot.add_listener(april_fools, \"on_message\")\n\n @commands.command()\n async def zalgo(self, ctx):\n \"\"\"Zalgo the text\"\"\"\n # first pull out the .zalgo part of the message\n raw_msg = \" \".join(ctx.message.clean_content.split(\" \")[1:])\n if raw_msg == \"\":\n raw_msg = \"HE COMES\"\n\n # random intensity\n intensity = random.randint(50, 150)\n\n # zalgo characters to fuck with\n zalgo_chrs = [chr(x) for x in range(0x0300, 0x036F + 1)]\n zalgo_chrs += [\"\\u0488\", \"\\u0489\"]\n\n msg_array = list(raw_msg)\n for i in range(intensity):\n index = random.randint(0, len(msg_array) - 1)\n msg_array.insert(index, random.choice(zalgo_chrs))\n\n zalgo_msg = \"\".join(msg_array)\n\n await ctx.message.delete()\n await ctx.send(zalgo_msg)\n\n def uwuify(self, msg):\n replacements = {\n \"r\": \"w\",\n \"R\": \"W\",\n \"l\": \"w\",\n \"L\": \"W\",\n \"this\": \"dis\",\n \"This\": \"Dis\",\n \"they\": \"dey\",\n \"They\": \"Dey\",\n \"there\": \"dere\",\n \"There\": \"Dere\",\n \"the\": \"da\",\n \"The\": \"Da\",\n }\n\n new_msg = msg\n for regex, replacement in replacements.items():\n new_msg, _ = re.subn(regex, replacement, new_msg)\n\n new_msg += \" *uwu*\"\n\n return new_msg\n\n @commands.command()\n async def uwu(self, ctx):\n \"\"\"uwu the text\"\"\"\n # first pull out the .zalgo part of the message\n raw_msg = \" \".join(ctx.message.content.split(\" \")[1:])\n if raw_msg == \"\":\n raw_msg = \"uwu\"\n\n new_msg = self.uwuify(raw_msg)\n\n await ctx.message.delete()\n await ctx.send(new_msg)\n\n @commands.command()\n async def spoilerify(self, ctx, *msg):\n new_msg = []\n do_it = False\n for word in msg:\n if do_it:\n new_msg.append(f\"||{word}||\")\n else:\n new_msg.append(word)\n do_it = not do_it\n new_msg = \" \".join(new_msg)\n\n await ctx.message.delete()\n await ctx.send(new_msg)\n","sub_path":"zalgo/zalgo.py","file_name":"zalgo.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"650451830","text":"\"\"\"\nA script having a set of simple functions to check graph properties.\n\n\"\"\"\n\n\nclass UndirectedGraph:\n def __init__(self, V, E):\n self.V = V\n self.E = E\n\n def __str__(self):\n graph_str = ''\n for start in self.E.keys():\n for stop in self.E[start]:\n graph_str += start + ' ' + stop + '\\n'\n return graph_str\n \n \ndef check_vertex_cover(G, S):\n \"\"\"\n Returns True if set S is a vertex cover of the edge set E,\n False otherwise.\n S is implemented as a set\n E is implemented as an adjacency list mapping a vertex to a list\n of edges\n \"\"\"\n\n for start in G.E.keys():\n if start in S:\n # vertex start is already in the cover\n # so we needn't check its corresponding edges\n continue\n for end in G.E[start]:\n if not (end in S):\n return False\n return True\n","sub_path":"GraphUtil.py","file_name":"GraphUtil.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"455997934","text":"import logging\n\n\nclass Browser:\n \"A class to fetch reports and dispatch to actors.\"\n def __init__(self, actors, subreddit, db, cursor):\n self.actors = actors\n self.subreddit = subreddit\n self.db = db\n self.cursor = cursor\n\n def check_command(self, command, mod, post):\n \"Check if any actor matches this report.\"\n for actor in self.actors:\n actor.parse(command, mod, post)\n\n def reports(self):\n \"\"\"Generator for mod reports in a subreddit.\n\n Yields tuple of report, mod name, and target.\n\n \"\"\"\n try:\n for post in self.subreddit.mod.reports(limit=None):\n for mod_report in post.mod_reports:\n yield (str(mod_report[0]), mod_report[1], post)\n except Exception as e:\n logging.error(\"Error fetching reports: {err}\".format(err=e))\n\n def run(self):\n \"Fetch reports and dispatch to actors.\"\n for command, mod, post in self.reports():\n self.check_command(command, mod, post)\n for actor in self.actors:\n actor.after()\n","sub_path":"bernard/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"488297317","text":"from dao import log_dao\nfrom handlers import installationHandler, logHandler\nfrom datetime import datetime\nimport time, config\n\n\ndef set_status_for_customers(customers):\n for customer in customers:\n set_status_for_customer(customer)\n\n\ndef set_status_for_customer(customer):\n # assume false\n customer['connected'] = False\n installations = installationHandler.get_by_customer(customer.get('id'))\n if installations:\n one_connected = set_status_for_installations(installations)\n customer['connected'] = one_connected\n customer['warnings'] = logHandler.get_warnings_customer(customer)\n customer['alarms'] = logHandler.get_alarms_customer(customer)\n\n\ndef set_status_for_installations(installations):\n one_connected = False\n for installation in installations:\n if set_status_for_installation(installation):\n one_connected = True\n return one_connected\n\n\ndef set_status_for_installation(installation):\n # assume false\n connected = installationHandler.get_connection_status_for(installation.get('serial_number'))\n installation['connected'] = connected\n installation['warnings'] = logHandler.get_warnings_installation(installation)\n installation['alarms'] = logHandler.get_alarms_installation(installation)\n return connected\n\n\ndef set_status_for_controllers(controllers):\n d2 = datetime.now()\n d2_ts = time.mktime(d2.timetuple())\n for controller in controllers:\n set_status_for_controller(controller, d2_ts)\n\n\ndef set_status_for_controller(controller, d2_ts=None):\n set_status_for_entity(controller, d2_ts)\n controller['warnings'] = logHandler.get_warnings_controller(controller)\n controller['alarms'] = logHandler.get_alarms_controller(controller)\n\n\ndef set_status_for_tags(tag_list):\n d2 = datetime.now()\n d2_ts = time.mktime(d2.timetuple())\n for tag in tag_list:\n set_status_for_entity(tag, d2_ts)\n return tag_list\n\n\n\ndef set_status_for_entity(entity, d2_ts=None):\n if not d2_ts:\n d2 = datetime.now()\n d2_ts = time.mktime(d2.timetuple())\n # assume false\n entity['connected'] = False\n d1 = entity.get('time')\n if d1:\n\n # convert to unix timestamp\n d1_ts = time.mktime(d1.timetuple())\n\n # they are now in seconds\n diff = int(d2_ts-d1_ts)\n\n # Two times the actual synch intervall\n if diff < config.RUN_SYNC_FREQUENCY * 3:\n entity['connected'] = True\n","sub_path":"portal/backend/handlers/statusHandler.py","file_name":"statusHandler.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"598483232","text":"from django.shortcuts import render, redirect, get_object_or_404, HttpResponse\nfrom django.contrib.auth.views import login\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.hashers import check_password\nfrom django.utils.text import slugify\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.views.decorators.cache import cache_control\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.utils import timezone\n\nfrom .models import Kategori, Kegiatan, Kasus, Anggota\nfrom perek.models import PersonilPerek\nfrom perek_tugas.models import BuktiLaporan\nfrom .forms import KategoriForm, KegiatanForm, KasusForm, SandiForm, ProfilForm, AnggotaForm, PersonilForm\n\n# import datetime\nimport json\n\n# data_waktu = datetime.datetime.now()\ndata_waktu = timezone.now()\n\n\ndef bantu_kegiatan(request):\n data_raw = [{'title': o.kegiatan_nama, 'description': o.kegiatan_deskripsi[:20]} for o in Kegiatan.objects.all()]\n data_json = json.dumps(data_raw)\n\n return HttpResponse(data_json, content_type='application/json')\n\n\ndef bantu_username(request):\n data_raw = [{'title': o.username} for o in User.objects.all()]\n data_json = json.dumps(data_raw)\n\n return HttpResponse(data_json, content_type='application/json')\n\n\ndef cari_kegiatan(request, slug):\n unslug = slug.replace('-', ' ')\n\n cari = Kegiatan.objects.select_related().filter(kegiatan_nama__icontains=unslug)\n data_kategori = Kategori.objects.all()\n return render(request, 'logapp/kegiatan_cari.html', {'waktu': data_waktu, 'kegiatan': cari,\n 'kategori': data_kategori})\n\n\ndef custom_login(request):\n if request.user.is_authenticated():\n return redirect('halaman_utama')\n else:\n return login(request)\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef kegiatan(request, slug=None):\n data_kategori = Kategori.objects.all()\n\n if slug is None:\n data_kegiatan = Kegiatan.objects.select_related().order_by('-kegiatan_waktu_aktif')\n else:\n data_kegiatan = Kegiatan.objects.select_related().filter(kegiatan_kategori__kategori_slug__contains=slug) \\\n .order_by('-kegiatan_waktu_aktif')\n\n paginator = Paginator(data_kegiatan, 14, 1)\n page = request.GET.get('halaman')\n\n try:\n keg = paginator.page(page)\n\n except PageNotAnInteger:\n keg = paginator.page(1)\n\n except EmptyPage:\n keg = paginator.page(paginator.num_pages)\n\n maks = len(paginator.page_range)\n\n start_number = keg.number - 3 if keg.number >= 4 else 0\n end_number = keg.number + 2 if keg.number <= maks else maks\n page_range = paginator.page_range[start_number:end_number]\n\n return render(request, 'logapp/kegiatan.html', {'kategori': data_kategori, 'kegiatan': keg,\n 'waktu': data_waktu, 'page_range': page_range})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kegiatan_tambah(request):\n if request.method == 'POST':\n # Slug operation\n max_length = Kegiatan._meta.get_field('kegiatan_slug').max_length\n slug_ori = slugify(request.POST['kegiatan_nama'])[:max_length]\n\n kgtn = Kegiatan(kegiatan_slug=slug_ori, kegiatan_waktu=data_waktu)\n\n form_kegiatan = KegiatanForm(request.POST, instance=kgtn)\n if form_kegiatan.is_valid():\n form_kegiatan.save()\n return redirect('halaman_kegiatan')\n\n else:\n form_kegiatan = KegiatanForm()\n form_kegiatan.fields['kegiatan_owner'].queryset = User.objects.filter(username=request.user.username)\n\n return render(request, 'logapp/kegiatan_tambah.html', {'FormKegiatan': form_kegiatan, 'waktu': data_waktu})\n\n\n@login_required\ndef kegiatan_edit(request, slug):\n kgtn_edit = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n\n if request.method == 'POST':\n # Slug operation\n max_length = Kegiatan._meta.get_field('kegiatan_slug').max_length\n slug_ori = slugify(request.POST['kegiatan_nama'])[:max_length]\n\n kgtn_edit.kegiatan_slug = slug_ori\n kgtn_edit.kegiatan_waktu_aktif = data_waktu\n\n form_kegiatan = KegiatanForm(request.POST, instance=kgtn_edit)\n if form_kegiatan.is_valid():\n if request.POST.get('kegiatan_ada_stkk') != 'on':\n kgtn_edit.kegiatan_kode_stkk = ''\n form_kegiatan.save()\n return redirect('halaman_kegiatan')\n\n else:\n form_kegiatan = KegiatanForm(instance=kgtn_edit)\n\n return render(request, 'logapp/kegiatan_tambah.html', {'FormKegiatan': form_kegiatan, 'waktu': data_waktu})\n\n\n@login_required\ndef kegiatan_hapus(request, slug):\n hapus_kegiatan = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n kegiatan_punya = hapus_kegiatan.kegiatan_owner\n kategori_bersangkutan = hapus_kegiatan.kegiatan_kategori.kategori_slug\n\n if request.user == kegiatan_punya or request.user.is_superuser is True:\n hapus_kegiatan.delete()\n\n return redirect('saring_kegiatan', slug=kategori_bersangkutan)\n\n else:\n return redirect('halaman_logout')\n\n\n@login_required\ndef kategori_pindah(request, nama_kegiatan, slug):\n kegiatan_pindah = get_object_or_404(Kegiatan, kegiatan_slug=nama_kegiatan)\n kegiatan_punya = kegiatan_pindah.kegiatan_owner\n kategori_tujuan = get_object_or_404(Kategori, kategori_slug=slug)\n\n if request.user == kegiatan_punya or request.user.is_superuser is True:\n kegiatan_pindah.kegiatan_kategori = kategori_tujuan\n kegiatan_pindah.save()\n return redirect('saring_kegiatan', slug=kategori_tujuan.kategori_slug)\n\n else:\n return redirect('halaman_logout')\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef kategori_lihat(request):\n pilih_kategori = Kategori.objects.all().order_by('kategori_nama')\n pilih_kategori_kegiatan = Kegiatan.objects.select_related()\n\n return render(request, 'logapp/kategori.html', {'kegiatan': pilih_kategori_kegiatan, 'kategori': pilih_kategori,\n 'waktu': data_waktu})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kategori_tambah(request):\n if request.method == 'POST':\n # Slug operation\n max_length = Kategori._meta.get_field('kategori_slug').max_length\n slug_ori = slugify(request.POST['kategori_nama'])[:max_length]\n\n slug = Kategori(kategori_slug=slug_ori)\n form_kategori = KategoriForm(request.POST, instance=slug)\n\n if form_kategori.is_valid():\n form_kategori.save()\n return redirect('lihat_kategori')\n else:\n form_kategori = KategoriForm()\n\n return render(request, 'logapp/kategori_tambah.html', {'FormKategori': form_kategori, 'waktu': data_waktu})\n\n\n@login_required\ndef kategori_edit(request, slug):\n kategori = get_object_or_404(Kategori, kategori_slug=slug)\n\n if request.method == 'POST':\n # Slug operation\n max_length = Kategori._meta.get_field('kategori_slug').max_length\n slug_ori = slugify(request.POST['kategori_nama'])[:max_length]\n\n kategori.kategori_slug = slug_ori\n form_kategori = KategoriForm(request.POST, instance=kategori)\n\n if form_kategori.is_valid():\n form_kategori.save()\n return redirect('lihat_kategori')\n\n else:\n form_kategori = KategoriForm(instance=kategori)\n\n return render(request, 'logapp/kategori_tambah.html', {'FormKategori': form_kategori, 'waktu': data_waktu})\n\n\n@login_required\ndef kategori_hapus(request, slug):\n kategori = get_object_or_404(Kategori, kategori_slug=slug)\n kategori.delete()\n\n return redirect('lihat_kategori')\n\n\ndef kasus_lihat(request, slug):\n data_kasus = Kasus.objects.select_related().filter(kasus_kegiatan__kegiatan_slug__contains=slug) \\\n .order_by('-kasus_dibuat')\n\n data_kegiatan = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n member = User.objects.filter(anggota__nama_kegiatan__id=data_kegiatan.pk)\n\n kasus_total = data_kasus.count()\n if kasus_total == 0:\n kasus_total = 1\n kasus_selesai = data_kasus.filter(kasus_status=1).count()\n persentase = (kasus_selesai / kasus_total) * 100\n\n paginator = Paginator(data_kasus, 15, 1)\n page = request.GET.get('halaman')\n\n try:\n kas = paginator.page(page)\n\n except PageNotAnInteger:\n kas = paginator.page(1)\n\n except EmptyPage:\n kas = paginator.page(paginator.num_pages)\n\n maks = len(paginator.page_range)\n\n start_number = kas.number - 3 if kas.number >= 4 else 0\n end_number = kas.number + 2 if kas.number <= maks else maks\n page_range = paginator.page_range[start_number:end_number]\n\n return render(request, 'logapp/kasus.html', {'waktu': data_waktu, 'kasus': kas, 'kegiatan': data_kegiatan,\n 'page_range': page_range, 'persentase': persentase, 'member': member})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kasus_tambah(request, slug):\n keg = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n member = User.objects.filter(anggota__nama_kegiatan__id=keg.pk)\n\n if request.method == 'POST':\n form_kasus = KasusForm(request.POST)\n\n if form_kasus.is_valid():\n tambahan = form_kasus.save(commit=False)\n tambahan.kasus_kegiatan = keg\n tambahan.kasus_anggota = request.user\n keg.kegiatan_waktu_aktif = timezone.now()\n\n if keg.kegiatan_owner == request.user or request.user in member:\n keg.save()\n tambahan.save()\n else:\n return redirect('halaman_logout')\n\n return redirect('halaman_kasus', slug=slug)\n else:\n form_kasus = KasusForm(instance=keg)\n\n if request.user == keg.kegiatan_owner or request.user in member:\n return render(request, 'logapp/kasus_tambah.html', {'waktu': data_waktu, 'FormKasus': form_kasus, 'keg': keg})\n else:\n return render(request, 'logapp/halaman_error.html', {'waktu': data_waktu})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kasus_ubah(request, pk):\n kas = get_object_or_404(Kasus, pk=pk)\n keg = kas.kasus_kegiatan\n slug = kas.kasus_kegiatan.kegiatan_slug\n member = User.objects.filter(anggota__nama_kegiatan__id=keg.pk)\n\n if request.method == 'POST':\n form_kasus = KasusForm(request.POST, instance=kas)\n\n if form_kasus.is_valid():\n tambahan = form_kasus.save(commit=False)\n tambahan.kasus_kegiatan = keg\n keg.kegiatan_waktu_aktif = timezone.now()\n\n if keg.kegiatan_owner == request.user or request.user in member:\n keg.save()\n tambahan.save()\n\n return redirect('halaman_kasus', slug=slug)\n else:\n form_kasus = KasusForm(instance=kas)\n\n if request.user == keg.kegiatan_owner or request.user in member:\n return render(request, 'logapp/kasus_tambah.html', {'waktu': data_waktu, 'FormKasus': form_kasus, 'keg': keg})\n else:\n return render(request, 'logapp/halaman_error.html', {'waktu': data_waktu})\n\n\n@login_required\ndef kasus_hapus(request, pk):\n kas = get_object_or_404(Kasus, pk=pk)\n slug = kas.kasus_kegiatan.kegiatan_slug\n keg = kas.kasus_kegiatan\n member = User.objects.filter(anggota__nama_kegiatan__id=keg.pk)\n\n if request.user == keg.kegiatan_owner or request.user == kas.kasus_anggota and request.user in member:\n kas.delete()\n else:\n return redirect('halaman_logout')\n\n return redirect('halaman_kasus', slug=slug)\n\n\n@login_required\ndef lihat_setel(request):\n kegiatan_owner = Kegiatan.objects.filter(kegiatan_owner=request.user)\n data_bukti_laporan = BuktiLaporan.objects.select_related().order_by('-laporan_tanggal').filter(\n laporan_jenis__in=['IJZ', 'SRF', 'MP', 'BUK', 'MKL', 'DKM', 'STG', 'PAK', 'TJ', 'PGN'],\n laporan_kepemilikan=request.user)\n\n paginator_bukti = Paginator(data_bukti_laporan, 5, 2)\n\n halaman_bukti = request.GET.get('halaman_bukti')\n\n paginator = Paginator(kegiatan_owner, 15, 1)\n page = request.GET.get('halaman')\n\n try:\n keg = paginator.page(page)\n\n except PageNotAnInteger:\n keg = paginator.page(1)\n\n except EmptyPage:\n keg = paginator.page(paginator.num_pages)\n\n try:\n bukti_laporan = paginator_bukti.page(halaman_bukti)\n\n except PageNotAnInteger:\n bukti_laporan = paginator_bukti.page(1)\n\n except EmptyPage:\n bukti_laporan = paginator_bukti.page(paginator_bukti.num_pages)\n\n maks = len(paginator.page_range)\n\n start_number = keg.number - 3 if keg.number >= 4 else 0\n end_number = keg.number + 2 if keg.number <= maks else maks\n page_range = paginator.page_range[start_number:end_number]\n\n data = {\n 'waktu': data_waktu,\n 'kegiatan': keg,\n 'page_range': page_range,\n 'bukti_laporan': bukti_laporan\n }\n\n return render(request, 'logapp/setel.html', data)\n\n\n@login_required\ndef ganti_sandi(request):\n if request.method == 'POST':\n form = SandiForm(request.POST)\n\n if form.is_valid():\n u = User.objects.get(username=request.user.username)\n p = u.password\n\n if check_password(request.POST['SandiLama'], p):\n sandi_baru = request.POST['SandiBaru']\n sandi_cek = request.POST['CekSandiBaru']\n\n if sandi_baru == sandi_cek:\n u.set_password(sandi_baru)\n u.save()\n\n messages.success(request, 'Selamat, sandi yang baru berhasil disimpan. Silahkan login kembali '\n 'dengan sandi baru')\n else:\n messages.info(request, 'Konfirmasi sandi baru gagal!')\n else:\n messages.info(request, 'Sandi lama salah!')\n\n return redirect('ganti_sandi')\n\n else:\n form = SandiForm()\n\n return render(request, 'registration/ganti_sandi.html', {'waktu': data_waktu, 'form': form})\n\n\n@login_required\ndef ganti_profil(request):\n data_profil = User.objects.get(username=request.user.username)\n cek = PersonilPerek.objects.filter(nama=request.user).count()\n\n if cek != 0:\n data_personil_perek = PersonilPerek.objects.get(nama=request.user)\n else:\n data_personil_perek = PersonilPerek(nama=request.user)\n\n if request.method == 'POST':\n form = ProfilForm(request.POST)\n\n # data_personil_perek.nama = request.user\n form_personil = PersonilForm(request.POST, instance=data_personil_perek)\n\n if form.is_valid() and form_personil.is_valid():\n data_profil.first_name = request.POST['NamaDepan']\n data_profil.last_name = request.POST['NamaBelakang']\n data_profil.email = request.POST['Email']\n data_profil.save()\n form_personil.save()\n\n '''if cek == 0:\n if form_personil.is_valid():\n form_personil.save()'''\n\n messages.success(request, 'Data profil berhasil diubah.')\n\n return redirect('halaman_setel')\n else:\n data = {\n 'NamaDepan': data_profil.first_name,\n 'NamaBelakang': data_profil.last_name,\n 'Email': data_profil.email,\n }\n form = ProfilForm(initial=data)\n\n if cek != 0:\n form_personil = PersonilForm(instance=data_personil_perek)\n else:\n form_personil = PersonilForm()\n\n return render(request, 'registration/ganti_profil.html', {'waktu': data_waktu, 'form': form,\n 'formPersonil': form_personil})\n\n\n@login_required\ndef anggota_tambah(request, slug):\n data_kegiatan = Kegiatan.objects.get(kegiatan_slug=slug)\n data_kegiatan_anggota = data_kegiatan.anggota_set.all().count()\n member = User.objects.filter(anggota__nama_kegiatan__id=data_kegiatan.pk)\n\n if data_kegiatan_anggota > 0:\n data_anggota = Anggota.objects.get(nama_kegiatan=data_kegiatan)\n else:\n data_anggota = Anggota(nama_kegiatan=data_kegiatan)\n\n if request.method == 'POST':\n form = AnggotaForm(request.POST, instance=data_anggota)\n if request.POST.get('anggota_kegiatan') is None:\n data_mau_hapus = Anggota.objects.filter(nama_kegiatan__pk=data_kegiatan.pk)\n\n if data_mau_hapus.count() != 0:\n data_mau_hapus.delete()\n return redirect('halaman_setel')\n else:\n messages.info(request, 'Isikan nama anggota terlebih dahulu.')\n else:\n if form.is_valid():\n form.save()\n\n return redirect('halaman_setel')\n\n else:\n form = AnggotaForm(instance=data_anggota)\n form.fields['anggota_kegiatan'].queryset = User.objects.exclude(username=request.user.username)\n\n if request.user == data_kegiatan.kegiatan_owner or request.user in member:\n return render(request, 'logapp/anggota_tambah.html', {'waktu': data_waktu, 'form': form})\n else:\n return render(request, 'logapp/halaman_error.html', {'waktu': data_waktu})\n","sub_path":"logapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"538162879","text":"# -*- coding: utf-8 -*-\nfrom alipay import AliPay\n\n\nclass MyAlipay(AliPay):\n def api_alipay_trade_wap_pay(\n self, subject, out_trade_no, total_amount,\n return_url=None, notify_url=None, **kwargs\n ):\n biz_content = {\n \"subject\": subject,\n \"out_trade_no\": out_trade_no,\n \"total_amount\": total_amount,\n \"product_code\": \"QUICK_WAP_PAY\"\n }\n biz_content.update(kwargs)\n data = self.build_body(\n \"alipay.trade.wap.pay\",\n biz_content,\n return_url=return_url,\n notify_url=notify_url\n )\n\n return self.sign_data(data)\n\n def api_alipay_trade_page_pay(self, subject, out_trade_no, total_amount,\n return_url=None, notify_url=None, **kwargs):\n biz_content = {\n \"subject\": subject,\n \"out_trade_no\": out_trade_no,\n \"total_amount\": total_amount,\n \"product_code\": \"FAST_INSTANT_TRADE_PAY\"\n }\n\n biz_content.update(kwargs)\n data = self.build_body(\n \"alipay.trade.page.pay\",\n biz_content,\n return_url=return_url,\n notify_url=notify_url\n )\n return self.sign_data(data)\n\n def sign_data(self, data):\n data.pop(\"sign\", None)\n # 排序后的字符串\n unsigned_items = self._ordered_data(data)\n unsigned_string = \"&\".join(\"{}={}\".format(k, v) for k, v in unsigned_items if v)\n # unsigned_string = \"&\".join(\"{}={}\".format(k, v) for k, v in unsigned_items)\n sign = self._sign(unsigned_string)\n data['sign'] = sign\n\n return data\n # ordered_items = self._ordered_data(data)\n # quoted_string = \"&\".join(\"{}={}\".format(k, quote_plus(v)) for k, v in ordered_items)\n #\n # # 获得最终的订单信息字符串\n # signed_string = quoted_string + \"&sign=\" + quote_plus(sign)\n # return signed_string\n\n def build_body(\n self, method, biz_content, return_url=None, notify_url=None, append_auth_token=False\n ):\n data = super(MyAlipay, self).build_body(method, biz_content, return_url, notify_url, append_auth_token)\n if not return_url:\n data.pop('return_url', None)\n\n return data\n\n\n # def _my_verify(self, raw_content, signature, charset):\n # # 开始计算签名\n # key = self.alipay_public_key\n # signer = PKCS1_v1_5.new(key)\n # if self._sign_type == \"RSA\":\n # digest = SHA.new()\n # else:\n # digest = SHA256.new()\n # digest.update(raw_content.encode(charset))\n # if signer.verify(digest, decodebytes(signature.encode(charset))):\n # return True\n # return False\n #\n # def my_verify(self, data, signature, charset):\n # if \"sign_type\" in data:\n # sign_type = data.pop(\"sign_type\")\n # if sign_type != self._sign_type:\n # raise AliPayException(None, \"Unknown sign type: {}\".format(sign_type))\n # # 排序后的字符串\n # unsigned_items = self._ordered_data(data)\n # message = u\"&\".join(u\"{}={}\".format(k, v) for k, v in unsigned_items)\n # return self._my_verify(message, signature, charset)\n\n\n","sub_path":"my_addons/anxe_customer/controllers/my_alipay.py","file_name":"my_alipay.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"303486253","text":"\"\"\" Read and preprocess video data.\n Video processing occurs on a single video at a time. Video are read and\n preprocessed in parallel across multiple threads. The resulting videos\n are concatenated together to form a single batch for training or \n evaluation.\n -- Provide processed video data for a network:\n inputs: Construct batches of evaluation examples of videos.\n distorted_inputs: Construct batches of training examples of videos.\n batch_inputs: Construct batches of training or evaluation examples of \n videos.\n -- Data processing:\n parse_example_proto: Parses an Example proto containing a training example\n of a video.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef decode_jpeg(image_buffer, scope=None):\n \"\"\"Decode a JPEG string into one 3-D float image Tensor.\n Args:\n image_buffer: scalar string Tensor.\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n\n # After this point, all image pixels reside in [0,1)\n # until the very end, when they're rescaled to (-1, 1). The various\n # adjust_* ops all require this range for dtype float.\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n # Crop the central region of the image with an area containing 87.5% of\n # the original image.\n image = tf.image.central_crop(image, central_fraction=0.875)\n\n # Resize the image to the original height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [FLAGS.image_height, FLAGS.image_width],\n align_corners=False)\n image = tf.squeeze(image, [0])\n return image\n\n\ndef decode_video(video_buffer):\n \"\"\"Decode list of string Tensor into list of 3-D float image Tensor.\n Args:\n video_buffer: tensor, shape [num_steps].\n Returns:\n list of 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n # Decode the images of one video\n return tf.map_fn(decode_jpeg, video_buffer, dtype=tf.float32)\n\n\ndef inputs(dataset, config, num_preprocess_threads=4):\n \"\"\" Generate batches of videos for evaluation.\n Use this function as the inputs for evaluating a network.\n Note that some (minimal) video preprocessing occurs during evaluation\n including central cropping and resizing of the video to fit the network.\n Args:\n dataset: instance of Dataset class specifying the dataset.\n config: class, the configuration setting\n num_preprocess_threads: integer, total number of preprocessing threads\n defaults to 4.\n Returns:\n videos: 2-D string Tensor of [batch_size, num_steps] a batch of \n video, each video is a dictionary containing strings providing \n JPEG encoding of all the images of a video clip\n labels: 1-D integer Tensor of [batch_size].\n filenames: 1-D integer Tensor of [batch_size].\n \"\"\"\n # Force all input processing onto CPU in order to reserve the GPU for\n # the forward inference and back-propagation.\n with tf.device('/cpu:0'):\n videos, labels, filenames = batch_inputs(\n dataset, config, train=False,\n num_preprocess_threads=num_preprocess_threads)\n return videos, labels, filenames\n\n\ndef distorted_inputs(dataset, config, num_preprocess_threads=4):\n \"\"\" Generate batches of distorted versions of videos.\n Use this function as the inputs for training a network.\n Distorting videos provides a useful technique for augmenting the data\n set during training in order to make the network invariant to aspects\n of the video that do not effect the label.\n Args:\n dataset: instance of Dataset class specifying the dataset.\n batch_size: integer, number of examples in batch\n num_preprocess_threads: integer, total number of preprocessing threads\n defaults to 4.\n Returns:\n videos: 2-D string Tensor of [batch_size, num_steps] a batch of \n video, each video is a dictionary containing strings providing \n JPEG encoding of all the images of a video clip\n labels: 1-D integer one host Tensor of [batch_size].\n filenames: 1-D integer Tensor of [batch_size].\n \"\"\"\n # Force all input processing onto CPU in order to reserve the GPU for\n # the forward inference and back-propagation.\n videos, labels_one_hot, filenames = batch_inputs(\n dataset, config, train=True,\n num_preprocess_threads=num_preprocess_threads)\n return videos, labels_one_hot, filenames\n\n\ndef video_preprocessing(image_features):\n \"\"\" Transfer dictionary to tensor type\n Args:\n image_features: dictionary contains, Tensor tf.string containing the \n contents of all the JPEG file of a video.\n Returns:\n resutl: 4-D float Tensor containing an appropriately list of scaled image\n [num_steps, encoded JPEG string]\n Raises:\n ValueError: if user does not provide bounding box\n \"\"\"\n # convert the image_features dictionary to array\n images = []\n tmp_dict = {}\n for key, value in image_features.items():\n tmp_dict[int(key[-3:])] = image_features[key]\n image_features.clear()\n for index in range(len(tmp_dict)):\n images.append(tmp_dict[index])\n\n # transfer the images list into a tensor\n for idx, image in enumerate(images):\n images[idx] = tf.expand_dims(image, 0)\n result = tf.concat(images, 0)\n return result\n\n\ndef parse_example_proto(example_serialized, num_steps):\n \"\"\" Parses an Example proto containing a training example of a video clip.\n The output of the convert_to_records.py video preprocessing script is a \n dataset containing serialized Example protocol buffers. Each Example proto \n contains the following fields:\n image/height: 200\n image/width: 100\n image/colorspace: 'RGB'\n image/channels: 3\n image/class/label: 2\n image/class/text: 'walking'\n image/format: 'JPEG'\n image/filename: '00001.JPEG'\n raw/image/001: \n ...\n raw/image/n: \n Args:\n example_serialized: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n Returns:\n image_features: A dictionary containing strings providing JPEG\n encoding of all the images of a video clip.\n label: Tensor tf.int32 containing the label.\n text: Tensor tf.string containing the human-readable label.\n filename: the filename of the image\n \"\"\"\n # Dense features in Example proto.\n feature_map = {\n 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,\n default_value=-1),\n 'image/class/text': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/filename': tf.FixedLenFeature([], dtype=tf.string,\n default_value='')\n }\n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n # subtract the label value by 1, becuae the previous label value range\n # from(1..n)\n label = tf.subtract(label, tf.constant(1))\n\n # images data in the Example proto\n image_map = {}\n for index in range(num_steps):\n image_map['raw/image/%03d' % index] = tf.FixedLenFeature(\n [],\n dtype=tf.string,\n default_value='')\n image_features = tf.parse_single_example(example_serialized, image_map)\n\n return (image_features,\n label,\n features['image/class/text'],\n features['image/filename'])\n\n\ndef batch_inputs(dataset, config, train, num_preprocess_threads=4):\n \"\"\"Contruct batches of training or evaluation examples from the video\n dataset.\n Args:\n dataset: instance of Dataset class specifying the dataset. See\n dataset.py for details.\n config: class, configuration\n train: boolean, shuffle the dataset or not\n num_preprocess_threads: integer, total number of preprocessing threads\n Returns:\n videos: 2-D string Tensor of [batch_size, num_steps] a batch of\n video, each video is a dictionary containing strings providing\n JPEG encoding of all the images of a video clip\n labels: 1-D integer Tensor of [batch_size].\n filenames: an array contains all the filenames\n Raises:\n ValueError: if data is not found\n \"\"\"\n with tf.name_scope('batch_processing'):\n batch_size = config['batch_size']\n data_files = dataset.data_files()\n if data_files is None:\n raise ValueError('No data files found for this dataset')\n\n # Create filename_queue\n if train:\n filename_queue = tf.train.string_input_producer(data_files,\n shuffle=True,\n capacity=16)\n else:\n filename_queue = tf.train.string_input_producer(data_files,\n shuffle=False,\n capacity=1)\n\n if num_preprocess_threads % 4:\n raise ValueError('Please make num_preprocess_threads a multiple '\n 'of 4 (%d % 4 != 0).', num_preprocess_threads)\n\n reader = dataset.reader()\n _, example_serialized = reader.read(filename_queue)\n\n videos_and_labels_and_filenames = []\n\n # Parse a serialized Example proto to extract the video and metadata.\n image_features, label_index, text, filename = parse_example_proto(\n example_serialized, config['num_steps'])\n video = video_preprocessing(image_features)\n videos_and_labels_and_filenames.append([video,\n label_index,\n filename])\n\n videos, label_index_batch, filename_batch = tf.train.batch_join(\n videos_and_labels_and_filenames,\n batch_size=batch_size,\n capacity=2 * num_preprocess_threads * batch_size)\n\n # Convert the label to one hot vector\n labels = tf.reshape(label_index_batch, [batch_size])\n labels_one_hot = tf.one_hot(labels, dataset.num_classes(), 1, 0)\n\n return (videos,\n labels_one_hot,\n tf.reshape(filename_batch, [batch_size]))","sub_path":"data/video_processing.py","file_name":"video_processing.py","file_ext":"py","file_size_in_byte":11007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"346058118","text":"import torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom trainers import ImageClassifierTrainer\nfrom utils import join_path\nfrom models import SmallClassifier\nfrom datasets import CIFAR10\n\n\ndef main():\n #########################\n # (0) hard code configs #\n #########################\n DATA_BASE_DIR = join_path('datasets', 'cifar10')\n DATA_SET_NAME = 'CIFAR10'\n BATCH_SIZE = 64\n NUM_WORKERS = 8\n TRAIN_STEPS = 1000\n VAL_EVERY = 100\n LOG_EVERY = 50\n NAME = 'SmallCNN'\n CUDA = 3\n RUN_ID = 'example'\n PLOT_EVERY = 500\n NUM_CLASSES = 10\n DEBUG = False\n\n #######################\n # (1) Define datasets #\n #######################\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize(\n mean=(0.4914, 0.4822, 0.4465),\n std=(0.2470, 0.2435, 0.2616)\n )])\n\n train_set = CIFAR10(root=DATA_BASE_DIR, train=True,\n download=True, transform=transform)\n train_loader = DataLoader(train_set, batch_size=BATCH_SIZE,\n shuffle=True, num_workers=NUM_WORKERS)\n\n test_set = CIFAR10(root=DATA_BASE_DIR, train=False,\n download=True, transform=transform)\n test_loader = DataLoader(test_set, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=NUM_WORKERS)\n\n ####################\n # (2) Define model #\n ####################\n model = SmallClassifier(num_classes=NUM_CLASSES)\n\n ###############################################################\n # (3) Define loss function inside the Trainer's step function #\n ###############################################################\n\n ########################\n # (4) Define optimizer #\n ########################\n optimizer = torch.optim.SGD(params=model.parameters(), lr=0.001)\n\n ####################\n # (5) Init trainer #\n ####################\n trainer = ImageClassifierTrainer(model=model,\n name=NAME,\n optimizer=optimizer,\n dataset_name=DATA_SET_NAME,\n train_loader=train_loader,\n train_steps=TRAIN_STEPS,\n val_every=VAL_EVERY,\n val_loader=test_loader, # use test set for validation\n log_every=LOG_EVERY,\n run_id=RUN_ID,\n cuda=CUDA,\n plot_every=PLOT_EVERY,\n debug=DEBUG)\n\n ######################\n # (6) Begin Training #\n ######################\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exp1_CNNImageClassifier.py","file_name":"exp1_CNNImageClassifier.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"205854565","text":"# 정은\n\nimport sys\nsys.stdin = open('input.txt','r')\n\nkeys = ['0001101', '0011001', '0010011', '0111101', '0100011',\n '0110001', '0101111', '0111011', '0110111', '0001011']\n\nT = int(input())\nfor t in range(1, T + 1):\n N, M = map(int, input().split())\n G = []\n for i in range(N):\n G.append(input())\n\n r = []\n for i in G:\n j = len(i)\n while j > 0:\n if int(i) == 0: break\n if i[j - 7:j] in keys:\n r.append(keys.index(i[j - 7:j]))\n j -= 7\n continue\n j -= 1\n else:\n break\n\n sum1 = result = ans = 0\n\n for i in range(len(r)):\n if i % 2 == 1:\n sum1 += r[i]\n else:\n result += r[i]\n result += sum1 * 3\n\n if result % 10 == 0:\n for i in r:\n ans += i\n else:\n ans = 0\n print(f\"#{t} {ans}\")","sub_path":"Algorithm/19.03/20/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"244576197","text":"import random\nimport string\nfrom random import randbytes\n\nimport base91\n\nTEXT = \"The quick brown\\r\\nfox\\tjumps\\nover\\rthe lazy\\n\\rdog!\"\nPANGRAM = \"Thequickbrownfoxjumpsoverthelazydog!\"\nDATA = [88, 136, 162, 112, 31, 156, 195, 75, 208, 5, 61, 106, 20, 163, 227, 172, 240, 150, 163, 100, 63, 170, 82,\n 175, 58, 17, 203, 5, 3]\n\n\ndef test_static_alphabet():\n assert len(base91.BASE91_ALPHABET) == base91.BASE91_LEN\n\n\ndef test_static_decode():\n data = base91.decode(TEXT)\n assert list(data) == DATA\n\n\ndef test_static_encode():\n text = base91.encode(bytes(DATA))\n assert text == PANGRAM\n\n\ndef test_refurbish_small():\n for n in range(33):\n original_data = randbytes(n)\n text = base91.encode(original_data)\n refurbish_data = base91.decode(text)\n assert len(original_data) == len(refurbish_data)\n assert original_data == refurbish_data, text\n\n\ndef test_refurbish_large():\n original_data = randbytes(65536)\n text = base91.encode(original_data)\n refurbish_data = base91.decode(text)\n assert len(original_data) == len(refurbish_data)\n assert original_data == refurbish_data, text\n\n\ndef test_stress_full_decode():\n text = \"\"\n text_size = random.randint(0, 65536)\n while text_size > len(text):\n text += chr(random.randint(0, 0x10FFFF))\n assert len(text) == text_size\n data = base91.decode(text)\n assert len(data) <= text_size\n\n\ndef test_stress_ascii_decode():\n text = \"\"\n text_size = random.randint(0, 65536)\n while text_size > len(text):\n text += random.choice(string.printable)\n assert len(text) == text_size\n data = base91.decode(text)\n assert len(data) <= text_size\n","sub_path":"test_base91.py","file_name":"test_base91.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"223423827","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api\n\n\nclass EvaluationLineBtp(models.Model):\n _name = 'btp_price_survey.abstract_evaluation.line'\n _inherit = 'btp_price_survey.weekly_planning'\n\n stage_id = fields.Many2one('btp_price_survey.stage', string='Étape')\n execution_id = fields.Many2one('btp_price_survey.execution', string='Suivi de chantier', ondelete=\"cascade\")\n forecast_id = fields.Many2one('btp_price_survey.forecast', string='Prévision', ondelete=\"cascade\")\n analytical_account = fields.Many2one(related='stage_id.analytical_account', string='N° compte analytique')\n unit = fields.Many2one(related='stage_id.unit', string='Unité')\n unit_price = fields.Float('Prix unitaire')\n weekly_quantity = fields.Float('Qté hebdo', compute='_compute_weekly_quantity', store=True)\n weekly_amount = fields.Float('Mnt hebdo', compute='_compute_weekly_amount', store=True)\n global_quantity = fields.Float('Qté globale', compute='_compute_global_quantity', store=True)\n global_amount = fields.Float('Mnt global', compute='_compute_global_amount', store=True)\n\n @api.one\n @api.depends('monday_quantity', 'tuesday_quantity', 'wednesday_quantity', 'thursday_quantity', 'friday_quantity',\n 'saturday_quantity', 'sunday_quantity')\n def _compute_weekly_quantity(self):\n self.weekly_quantity = self.monday_quantity + self.tuesday_quantity + self.wednesday_quantity + self.thursday_quantity + self.friday_quantity + self.saturday_quantity + self.sunday_quantity\n\n @api.one\n @api.depends('weekly_quantity', 'unit_price')\n def _compute_weekly_amount(self):\n self.weekly_amount = self.unit_price * self.weekly_quantity\n\n @api.one\n @api.depends('stage_id', 'weekly_quantity')\n def _compute_global_quantity(self):\n evaluation_obj = None\n if self.forecast_id:\n evaluation_obj = self.env['btp_price_survey.forecast'].search(\n [('construction_site_id', '=', self.forecast_id.construction_site_id.id), ('state', '=', 'close')],\n limit=1,\n order='id desc')\n if self.execution_id:\n evaluation_obj = self.env['btp_price_survey.execution'].search(\n [('construction_site_id', '=', self.execution_id.construction_site_id.id), ('state', '=', 'close')],\n limit=1,\n order='id desc')\n if not evaluation_obj:\n self.global_quantity = self.weekly_quantity\n else:\n for line in evaluation_obj.line_ids:\n if line.stage_id == self.stage_id:\n self.global_quantity = line.global_quantity + self.weekly_quantity\n\n @api.onchange('stage_id')\n def _onchange_unit_price(self):\n if self.stage_id:\n self.unit_price = self.stage_id.price_total\n\n @api.one\n @api.depends('global_quantity', 'unit_price')\n def _compute_global_amount(self):\n self.global_amount = self.unit_price * self.global_quantity\n","sub_path":"Btp/addons/btp_price_survey/models/evaluation_line.py","file_name":"evaluation_line.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"460125137","text":"import pandas as pd\n# import geopandas as gpd\n\n\nclass TrackConverter():\n\n \"\"\"Handles the envirocar Tracks\"\"\"\n\n def __init__(self):\n print(\"Initializing TrackConverter class\")\n # self.track = track\n # self.crs = track.crs\n\n \"\"\" Returns a geoDataFrame object with the movingpandas plain format\"\"\"\n\n def to_movingpandas(self, track):\n\n # gdf = self.track.copy()\n gdf = track\n gdf = gdf.reindex(columns=(['geometry'] + list([a for a in sorted(\n gdf.columns) if a != 'geometry'])), copy=True)\n gdf['time'] = gdf['time'].astype('datetime64[ns]')\n gdf.set_index('time', inplace=True)\n gdf.index.rename('t', inplace=True)\n return (gdf)\n\n \"\"\" Returns a dataFrame object with the scikitmobility plain format\"\"\"\n\n def to_scikitmobility(self):\n gdf = self.track.copy()\n gdf['lat'] = gdf.geometry.x\n gdf['lng'] = gdf.geometry.y\n gdf.rename(columns=({\"time\": \"datetime\", 'sensor.id': 'uid',\n 'track.id': 'tid'}), inplace=True)\n gdf['datetime'] = gdf['datetime'].astype('datetime64[ns]')\n gdf['tid'] = gdf['tid'].astype(str)\n gdf['uid'] = gdf['uid'].astype(str)\n columns = ['uid', 'tid', 'lat', 'lng', 'datetime']\n gdf = gdf.reindex(columns=(columns + list([a for a in sorted(\n gdf.columns) if a not in columns])), copy=True)\n df = pd.DataFrame(gdf)\n return(df)\n","sub_path":"envirocar/trajectories/track_converter.py","file_name":"track_converter.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"130575822","text":"import conf\r\nimport json,time\r\nimport requests\r\nfrom boltiot import Sms,Bolt\r\nselling_price = 600000.00\r\napi_key = \"3504af71-041a-45c4-b79f-27aa81c2e572\"\r\ndevice_id = \"BOLT3851580\"\r\nsms = Sms(conf.SID, conf.AUTH_TOKEN, conf.TO_NUMBER, conf.FROM_NUMBER)\r\nmybolt = Bolt(api_key, device_id)\r\n\r\ndef price_check():\r\n url = \"https://min-api.cryptocompare.com/data/price\"\r\n querystring = {\"fsym\":\"BTC\",\"tsyms\":\"INR\"}\r\n response = requests.request(\"GET\", url, params=querystring)\r\n response = json.loads(response.text)\r\n current_price = response['INR']\r\n return current_price\r\n\r\nwhile True:\r\n market_price = price_check()\r\n print ('market price is: ',market_price)\r\n print ('Selling price is: ',selling_price)\r\n time.sleep(10)\r\n if market_price > selling_price:\r\n print(\"Making request to Twilio to send a SMS\")\r\n response = sms.send_sms(\"The Current market value of bitcoin is\"+str(market_price)+ \" it is higher than fixed Selling price so time to Sell now\")\r\n print(\"Response received from Twilio is: \" + str(response))\r\n print(\"Status of SMS at Twilio is :\" + str(response.status))\r\n mybolt.digitalWrite(\"0\",\"HIGH\")\r\n time.sleep(5)\r\n mybolt.digitalWrite(\"0\",\"LOW\")\r\n print(\"Process completed.\")\r\n ","sub_path":"Bitcoin price alert using python/cryptoAlert.py","file_name":"cryptoAlert.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"54989957","text":"from create_singly_list_node import ListNode, listNodeToString, stringToListNode\n\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"\n https://leetcode.com/problems/remove-nth-node-from-end-of-list/\n // Time Complexity : O(n)\n // Space Complexity : O(1)\n // Did this code successfully run on Leetcode : Yes\n // Three line explanation of solution in plain english :\n - Make dummy node for edge case, when we want to delete\n - Make the gap required unless count is not greater than\n - Move both slow and fast pointer together\n \"\"\"\n # edge case\n if not head:\n return head\n\n # fore edge case when n = 1\n dummy = ListNode(-1)\n dummy.next = head\n\n slow = fast = dummy\n # traverse until count (the gap)\n # is <= the required n\n count = 0\n while count <= n:\n fast = fast.next\n count += 1\n # move both slow and fast simultaneously\n while fast:\n slow = slow.next\n fast = fast.next\n slow.next = slow.next.next\n return dummy.next\n\n def removeNthFromEndBruteForce(self, head: ListNode, n: int) -> ListNode:\n # edge case\n if not head:\n return head\n\n dummy = ListNode(-1)\n dummy.next = head\n length = 1\n cur = dummy\n while cur and cur.next:\n length += 1\n cur = cur.next\n cur = dummy\n count = 1\n while count < length - n:\n cur = cur.next\n count += 1\n cur.next = cur.next.next\n return dummy.next\n\n\nif __name__ == '__main__':\n h = Solution()\n head = stringToListNode([3, 2, 0, -4])\n new_head = removeNthFromEnd(head)\n print(listNodeToString(new_head))\n","sub_path":"19_remove_nth_node_from_end_of_list.py","file_name":"19_remove_nth_node_from_end_of_list.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"76445390","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom random import sample\r\ndef split_data_set(x,y,ratio=[0.7,0.3]):\r\n data_len=len(x)\r\n lens=[int(data_len*rati) for rati in ratio]\r\n trainx,trainy=x[:lens[0]],y[:lens[0]]\r\n testx,testy=x[lens[0]:],y[lens[0]:]\r\n return (trainx,trainy), (testx,testy)\r\n\r\ndef batch_gen(x, y, batch_size):\r\n while True:\r\n for i in range(0, len(x), batch_size):\r\n if (i+1)*batch_size < len(x):\r\n yield x[i : (i+1)*batch_size ].T, y[i : (i+1)*batch_size ].T\r\n\r\ndef rand_batch_gen(x, y, batch_size):\r\n while True:\r\n sample_idx = sample(list(np.arange(len(x))), 7)\r\n yield x[sample_idx].T, y[sample_idx].T\r\n\r\ndef decode(sequence, lookup, separator=''):\r\n return separator.join([ lookup[element] for element in sequence if element ])\r\n","sub_path":"seq-research/data_utills.py","file_name":"data_utills.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"498262920","text":"from datetime import datetime\nfrom flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:build-a-blog@localhost:8889/build-a-blog'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\n\n\n# create db\nclass Blog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120), unique=True)\n body = db.Column(db.Text)\n date = db.Column(db.DateTime)\n\n def __init__(self, title):\n self.title = title\n self.date = datetime.utcnow()\n\n def __repr__(self):\n return '' % self.title\n\n\n@app.route('/')\ndef index():\n return redirect('/blog')\n\n\n@app.route('/posts', methods=['GET'])\ndef get_posts():\n return Blog.query.all()\n\n\n# order post\n@app.route('/get_ordered_posts', methods=['GET'])\ndef get_ordered_posts():\n return Blog.query.order_by(\"date desc\").all()\n\n\n# homepage\n@app.route('/blog', methods=['GET'])\ndef blog():\n id = request.args.get('id', None)\n\n if id:\n post = Blog.query.filter_by(id=id).first()\n return render_template('posts.html', post=post)\n\n posts = get_ordered_posts()\n return render_template('blog.html', posts=posts)\n\n\n# Add new post\n@app.route('/newpost', methods=['GET'])\ndef newpost():\n title = request.args.get('title', '')\n body = request.args.get('body', '')\n title_error = request.args.get('title_error', '')\n body_error = request.args.get('body_error', '')\n\n return render_template('newpost.html', title=title, body=body, title_error=title_error, body_error=body_error)\n\n\n# Route after adding new post\n@app.route('/post', methods=['POST'])\ndef post():\n title = request.form.get('title', '')\n body = request.form.get('body', '')\n title_error = request.form.get('title_error', '')\n body_error = request.form.get('body_error', '')\n\n # validation for empty values\n if not title or not body:\n if title == '':\n title_error = \"Please provide a title\"\n if body == '':\n body_error = \"Please write your blog\"\n return redirect(f'/newpost?title={title}&body={body}&title_error={title_error}&body_error={body_error}')\n\n # Add new post\n new_post = Blog(title)\n new_post.body = body\n db.session.add(new_post)\n db.session.commit()\n\n # Load the new post in an individual page\n id = new_post.id\n posts = Blog.query.filter_by(id=id).all()\n return render_template('blog.html', posts=posts)\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"17316570","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'member'\n\nurlpatterns = [\n url(regex=r'^login$', view=views.login, name='login'),\n url(regex=r'^logout$', view=views.logout, name='logout'),\n url(regex=r'^registration$', view=views.registration, name='registration'),\n url(regex=r'^model/registration$', view=views.model_registration, name='model_registration'),\n url(regex=r'^company/registration$', view=views.company_registration, name='company_registration'),\n url(regex=r'^model/mypage$', view=views.model_mypage, name='model_mypage'),\n # url(regex=r'^company/mypage$', view=views.company_mypage, name='company_mypage'),\n]\n","sub_path":"ssProject/member/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"164356278","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom tkinter.messagebox import *\r\nfrom tkinter.filedialog import (askopenfilename,\r\n askopenfilenames,\r\n askdirectory,\r\n asksaveasfilename)\r\n# from WebFlash import *\r\nfrom WebFlash.WebBase import *\r\n\r\nclass MainPage(object):\r\n def __init__(self,master,driver):\r\n self.root = master\r\n self.root.geometry('%dx%d' % (400, 180)) # 设置窗口大小\r\n self.driver = driver\r\n self.updatetype = StringVar()\r\n self.filepath = StringVar()\r\n self.createPage()\r\n self.flag = False\r\n\r\n def createPage(self):\r\n self.page = Frame(self.root)\r\n self.page.pack()\r\n Label(self.page).grid(row=0, stick=W)\r\n Label(self.page,text=\"Update Type:\").grid(row=1,stick=W,pady=10)\r\n self.selecttype = Combobox(self.page,values=[\"BIOS\",\"BMC\"])\r\n self.selecttype.grid(row=1,column=1,stick=E)\r\n Button(self.page,text=\"select image\",command=self.selectimage).grid(row=2,stick=W,pady=10)\r\n Entry(self.page,textvariable=self.filepath,width=25).grid(row=2,column=1,stick=E)\r\n Button(self.page,text=\"Start update\",command=self.startupdate).grid(row=3,stick=W,pady=10)\r\n Button(self.page,text=\"Sign out\",command=self.signout).grid(row=3,column=1,stick=E)\r\n\r\n def signout(self):\r\n self.page.quit()\r\n self.driver.close()\r\n #self.page.destroy()\r\n # LoginPage(self.root,self.driver)\r\n\r\n def selectimage(self):\r\n if self.selecttype.get() == \"BIOS\":\r\n filetype=[(\"BIOS\",\"*.BIN\"),(\"BIOS\",\"*.ROM\")]\r\n if self.selecttype.get() == \"BMC\":\r\n filetype = [(\"BMC\",\"*.ima\")]\r\n if len(self.selecttype.get()) == 0:\r\n showinfo(\"Warning\",\"Please select update type\")\r\n self.filepath.set(askopenfilename(title=\"选择文件\", filetypes=filetype))\r\n self.flag = True\r\n\r\n def veiw(self):\r\n top = Toplevel(self.root)\r\n top.geometry(\"300x100+520+260\")\r\n self.canvas = Canvas(top, width=170, height=26, bg=\"white\")\r\n # 创建一个矩形外边框(距离左边,距离顶部,矩形宽度,矩形高度),线型宽度,颜色\r\n self.out_line = self.canvas.create_rectangle(2, 2, 180, 27, width=1, outline=\"black\")\r\n self.canvas.grid(row=0, column=1, ipadx=5)\r\n\r\n def startupdate(self):\r\n if self.flag:\r\n if WebUpdate(self.driver).ready(self.selecttype.get(),self.filepath.get()):\r\n self.veiw()\r\n else:\r\n showinfo(\"Warning\",\"Please try again\")\r\n else:\r\n showwarning(message=\"Please select image file\")\r\n\r\n\r\n# root = Tk()\r\n# MainPage(root)\r\n# root.mainloop()\r\n\r\n\r\n","sub_path":"WebFlash/MainPage.py","file_name":"MainPage.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"315751258","text":"\"\"\"Base grammar, Ref, Anything and Nothing.\"\"\"\n\nimport copy\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, List, Optional, Union, Type, Tuple, Any\n\nfrom sqlfluff.core.errors import SQLParseError\nfrom sqlfluff.core.string_helpers import curtail_string\n\nfrom sqlfluff.core.parser.segments import BaseSegment, BracketedSegment, allow_ephemeral\nfrom sqlfluff.core.parser.helpers import trim_non_code_segments, iter_indices\nfrom sqlfluff.core.parser.match_result import MatchResult\nfrom sqlfluff.core.parser.match_logging import (\n parse_match_logging,\n LateBoundJoinSegmentsCurtailed,\n)\nfrom sqlfluff.core.parser.match_wrapper import match_wrapper\nfrom sqlfluff.core.parser.matchable import Matchable\nfrom sqlfluff.core.parser.context import ParseContext\nfrom sqlfluff.core.parser.parsers import BaseParser\n\n# Either a Matchable (a grammar or parser) or a Segment CLASS\nMatchableType = Union[Matchable, Type[BaseSegment]]\n\nif TYPE_CHECKING:\n from sqlfluff.core.dialects.base import Dialect # pragma: no cover\n\n\n@dataclass\nclass BracketInfo:\n \"\"\"BracketInfo tuple for keeping track of brackets during matching.\n\n This is used in BaseGrammar._bracket_sensitive_look_ahead_match but\n defined here for type checking.\n \"\"\"\n\n bracket: BaseSegment\n segments: Tuple[BaseSegment, ...]\n\n def to_segment(self, end_bracket):\n \"\"\"Turn the contained segments into a bracketed segment.\"\"\"\n return BracketedSegment(\n segments=self.segments,\n start_bracket=(self.bracket,),\n end_bracket=end_bracket,\n )\n\n\ndef cached_method_for_parse_context(func):\n \"\"\"A decorator to cache the output of this method for a given parse context.\n\n This cache automatically invalidates if the uuid\n of the parse context changes. The value is store\n in the __dict__ attribute of the class against a\n key unique to that function.\n \"\"\"\n cache_key = \"__cache_\" + func.__name__\n\n def wrapped_method(self, parse_context: ParseContext, **kwargs):\n \"\"\"Cache the output of the method against a given parse context.\n\n Note: kwargs are not taken into account in the caching, but\n for the current use case of dependency loop debugging that's\n ok.\n \"\"\"\n cache_tuple: Tuple = self.__dict__.get(cache_key, (None, None))\n # Do we currently have a cached value?\n if cache_tuple[0] == parse_context.uuid:\n return cache_tuple[1]\n # Generate a new value, cache it and return\n result = func(self, parse_context=parse_context, **kwargs)\n self.__dict__[cache_key] = (parse_context.uuid, result)\n return result\n\n return wrapped_method\n\n\nclass BaseGrammar(Matchable):\n \"\"\"Grammars are a way of composing match statements.\n\n Any grammar must implement the `match` function. Segments can also be\n passed to most grammars. Segments implement `match` as a classmethod. Grammars\n implement it as an instance method.\n\n \"\"\"\n\n is_meta = False\n # Are we allowed to refer to keywords as strings instead of only passing\n # grammars or segments?\n allow_keyword_string_refs = True\n equality_kwargs: Tuple[str, ...] = (\"optional\", \"allow_gaps\")\n\n @staticmethod\n def _resolve_ref(elem):\n \"\"\"Resolve potential string references to things we can match against.\"\"\"\n initialisers = [\n # t: instance / f: class, ref, func\n (True, str, Ref.keyword),\n (True, BaseGrammar, lambda x: x),\n (True, BaseParser, lambda x: x),\n (False, BaseSegment, lambda x: x),\n ]\n # Get-out clause for None\n if elem is None:\n return None\n\n for instance, init_type, init_func in initialisers:\n if (instance and isinstance(elem, init_type)) or (\n not instance and issubclass(elem, init_type)\n ):\n return init_func(elem)\n raise TypeError(\n \"Grammar element [{!r}] was found of unexpected type [{}] was \"\n \"found.\".format(elem, type(elem)) # pragma: no cover\n )\n\n def __init__(\n self,\n *args: Union[MatchableType, str],\n allow_gaps=True,\n optional=False,\n ephemeral_name=None,\n ):\n \"\"\"Deal with kwargs common to all grammars.\n\n Args:\n *args: Any number of elements which because the subjects\n of this grammar. Optionally these elements may also be\n string references to elements rather than the Matchable\n elements themselves.\n allow_gaps (:obj:`bool`, optional): Does this instance of the\n grammar allow gaps between the elements it matches? This\n may be exhibited slightly differently in each grammar. See\n that grammar for details. Defaults `True`.\n optional (:obj:`bool`, optional): In the context of a sequence,\n is this grammar *optional*, i.e. can it be skipped if no\n match is found. Outside of a Sequence, this option does nothing.\n Defaults `False`.\n ephemeral_name (:obj:`str`, optional): If specified this allows\n the grammar to match anything, and create an EphemeralSegment\n with the given name in its place. The content of this grammar\n is passed to the segment, and will become the parse grammar\n for it. If used widely this is an excellent way of breaking\n up the parse process and also signposting the name of a given\n chunk of code that might be parsed separately.\n \"\"\"\n # We provide a common interface for any grammar that allows positional elements.\n # If *any* for the elements are a string and not a grammar, then this is a\n # shortcut to the Ref.keyword grammar by default.\n if self.allow_keyword_string_refs:\n self._elements = []\n for elem in args:\n self._elements.append(self._resolve_ref(elem))\n else:\n self._elements = list(args)\n\n # Now we deal with the standard kwargs\n self.allow_gaps = allow_gaps\n self.optional = optional\n # ephemeral_name is a flag to indicate whether we need to make an\n # EphemeralSegment class. This is effectively syntactic sugar\n # to allow us to avoid specifying a EphemeralSegment directly in a dialect.\n # If this is the case, the actual segment construction happens in the\n # match_wrapper.\n self.ephemeral_name = ephemeral_name\n\n def is_optional(self):\n \"\"\"Return whether this segment is optional.\n\n The optional attribute is set in the __init__ method.\n \"\"\"\n return self.optional\n\n @match_wrapper()\n @allow_ephemeral\n def match(self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext):\n \"\"\"Match a list of segments against this segment.\n\n Matching can be done from either the raw or the segments.\n This raw function can be overridden, or a grammar defined\n on the underlying class.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__.__name__} has no match function implemented\"\n ) # pragma: no cover\n\n @cached_method_for_parse_context\n def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:\n \"\"\"Does this matcher support a lowercase hash matching route?\"\"\"\n return None\n\n @classmethod\n def _longest_trimmed_match(\n cls,\n segments: Tuple[BaseSegment, ...],\n matchers: List[MatchableType],\n parse_context: ParseContext,\n trim_noncode=True,\n terminators: List[MatchableType] = None,\n ) -> Tuple[MatchResult, Optional[MatchableType]]:\n \"\"\"Return longest match from a selection of matchers.\n\n Prioritise the first match, and if multiple match at the same point the longest.\n If two matches of the same length match at the same time, then it's the first in\n the iterable of matchers.\n\n Returns:\n `tuple` of (match_object, matcher).\n\n \"\"\"\n terminated = False\n\n # Have we been passed an empty list?\n if len(segments) == 0: # pragma: no cover\n return MatchResult.from_empty(), None\n\n # If gaps are allowed, trim the ends.\n if trim_noncode:\n pre_nc, segments, post_nc = trim_non_code_segments(segments)\n\n best_match_length = 0\n # iterate at this position across all the matchers\n for matcher in matchers:\n # MyPy seems to require a type hint here. Not quite sure why.\n res_match: MatchResult = matcher.match(\n segments, parse_context=parse_context\n )\n if res_match.is_complete():\n # Just return it! (WITH THE RIGHT OTHER STUFF)\n if trim_noncode:\n return (\n MatchResult.from_matched(\n pre_nc + res_match.matched_segments + post_nc\n ),\n matcher,\n )\n else:\n return res_match, matcher\n elif res_match:\n # We've got an incomplete match, if it's the best so far keep it.\n if res_match.trimmed_matched_length > best_match_length:\n best_match = res_match, matcher\n best_match_length = res_match.trimmed_matched_length\n\n if terminators:\n\n _, segs, _ = trim_non_code_segments(\n best_match[0].unmatched_segments\n )\n for terminator in terminators:\n terminator_match: MatchResult = terminator.match(\n segs, parse_context=parse_context\n )\n\n if terminator_match.matched_segments:\n terminated = True\n break\n\n if terminated:\n break\n\n # We could stash segments here, but given we might have some successful\n # matches here, we shouldn't, because they'll be mutated in the wrong way.\n # Eventually there might be a performance gain from doing that sensibly\n # here.\n\n # If we get here, then there wasn't a complete match. If we\n # has a best_match, return that.\n if best_match_length > 0:\n if trim_noncode:\n return (\n MatchResult(\n pre_nc + best_match[0].matched_segments,\n best_match[0].unmatched_segments + post_nc,\n ),\n best_match[1],\n )\n else:\n return best_match\n # If no match at all, return nothing\n return MatchResult.from_unmatched(segments), None\n\n @classmethod\n def _look_ahead_match(\n cls,\n segments: Tuple[BaseSegment, ...],\n matchers: List[MatchableType],\n parse_context: ParseContext,\n ) -> Tuple[Tuple[BaseSegment, ...], MatchResult, Optional[MatchableType]]:\n \"\"\"Look ahead for matches beyond the first element of the segments list.\n\n This function also contains the performance improved hash-matching approach to\n searching for matches, which should significantly improve performance.\n\n Prioritise the first match, and if multiple match at the same point the longest.\n If two matches of the same length match at the same time, then it's the first in\n the iterable of matchers.\n\n Returns:\n `tuple` of (unmatched_segments, match_object, matcher).\n\n \"\"\"\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"IN\",\n parse_context=parse_context,\n v_level=4,\n ls=len(segments),\n seg=LateBoundJoinSegmentsCurtailed(segments),\n )\n\n # Have we been passed an empty tuple?\n if not segments: # pragma: no cover TODO?\n return ((), MatchResult.from_empty(), None)\n\n # Here we enable a performance optimisation. Most of the time in this cycle\n # happens in loops looking for simple matchers which we should\n # be able to find a shortcut for.\n # First: Assess the matchers passed in, if any are\n # \"simple\", then we effectively use a hash lookup across the\n # content of segments to quickly evaluate if the segment is present.\n # Matchers which aren't \"simple\" still take a slower route.\n _matchers = [\n (matcher, matcher.simple(parse_context=parse_context))\n for matcher in matchers\n ]\n simple_matchers = [matcher for matcher in _matchers if matcher[1]]\n non_simple_matchers = [matcher[0] for matcher in _matchers if not matcher[1]]\n best_simple_match = None\n if simple_matchers:\n # If they're all simple we can use a hash match to identify the first one.\n # Build a buffer of all the upper case raw segments ahead of us.\n str_buff = []\n # For existing compound segments, we should assume that within\n # that segment, things are internally consistent, that means\n # rather than enumerating all the individual segments of a longer\n # one we just dump out the whole segment, but splitting off the\n # first element separated by whitespace. This is a) faster and\n # also b) prevents some really horrible bugs with bracket matching.\n # See https://github.com/sqlfluff/sqlfluff/issues/433\n\n def _trim_elem(seg):\n s = seg.raw_upper.split(maxsplit=1)\n return s[0] if s else \"\"\n\n str_buff = [_trim_elem(seg) for seg in segments]\n match_queue = []\n\n for matcher, simple in simple_matchers:\n # Simple will be a tuple of options\n assert simple\n for simple_option in simple:\n # NOTE: We use iter_indices to make sure we capture\n # all instances of potential matches if there are many.\n # This is important for bracket counting.\n for buff_pos in iter_indices(str_buff, simple_option):\n match_queue.append((matcher, buff_pos, simple_option))\n\n # Sort the match queue. First to process AT THE END.\n # That means we pop from the end.\n match_queue = sorted(match_queue, key=lambda x: x[1])\n\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"SI\",\n parse_context=parse_context,\n v_level=4,\n mq=match_queue,\n sb=str_buff,\n )\n\n while match_queue:\n # We've managed to match. We can shortcut home.\n # NB: We may still need to deal with whitespace.\n queued_matcher, queued_buff_pos, queued_option = match_queue.pop()\n # Here we do the actual transform to the new segment.\n match = queued_matcher.match(segments[queued_buff_pos:], parse_context)\n if not match:\n # We've had something match in simple matching, but then later\n # excluded. Log but then move on to the next item on the list.\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"NM\",\n parse_context=parse_context,\n v_level=4,\n _so=queued_option,\n )\n continue\n # Ok we have a match. Because we sorted the list, we'll take it!\n best_simple_match = (segments[:queued_buff_pos], match, queued_matcher)\n\n if not non_simple_matchers:\n # There are no other matchers, we can just shortcut now.\n\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"SC\",\n parse_context=parse_context,\n v_level=4,\n bsm=None\n if not best_simple_match\n else (\n len(best_simple_match[0]),\n len(best_simple_match[1]),\n best_simple_match[2],\n ),\n )\n\n if best_simple_match:\n return best_simple_match\n else:\n return ((), MatchResult.from_unmatched(segments), None)\n\n # Make some buffers\n seg_buff = segments\n pre_seg_buff: Tuple[BaseSegment, ...] = ()\n\n # Loop\n while True:\n # Do we have anything left to match on?\n if seg_buff:\n # Great, carry on.\n pass\n else:\n # We've got to the end without a match, return empty\n return ((), MatchResult.from_unmatched(segments), None)\n\n # We only check the NON-simple ones here for brevity.\n mat, m = cls._longest_trimmed_match(\n seg_buff,\n non_simple_matchers,\n parse_context=parse_context,\n trim_noncode=False,\n )\n\n if mat and not best_simple_match:\n return (pre_seg_buff, mat, m)\n elif mat:\n # Given we have mat - we should always have these two.\n assert m\n assert best_simple_match\n # It will be earlier than the simple one if we've even checked,\n # but there's a chance that this might be *longer*, or just FIRST.\n pre_lengths = (len(pre_seg_buff), len(best_simple_match[0]))\n mat_lengths = (len(mat), len(best_simple_match[1]))\n mat_indexes = (matchers.index(m), matchers.index(best_simple_match[2]))\n if (\n (pre_lengths[0] < pre_lengths[1])\n or (\n pre_lengths[0] == pre_lengths[1]\n and mat_lengths[0] > mat_lengths[1]\n )\n or (\n pre_lengths[0] == pre_lengths[1]\n and mat_lengths[0] == mat_lengths[1]\n and mat_indexes[0] < mat_indexes[1]\n )\n ):\n return (pre_seg_buff, mat, m)\n else:\n # TODO: Make a test case to cover this.\n return best_simple_match # pragma: no cover\n else:\n # If there aren't any matches, then advance the buffer and try again.\n # Two improvements:\n # 1) if we get as far as the first simple match, then return that.\n # 2) be eager in consuming non-code segments if allowed\n if best_simple_match and len(pre_seg_buff) >= len(best_simple_match[0]):\n return best_simple_match\n\n pre_seg_buff += (seg_buff[0],)\n seg_buff = seg_buff[1:]\n\n @classmethod\n def _bracket_sensitive_look_ahead_match(\n cls,\n segments: Tuple[BaseSegment, ...],\n matchers: List[MatchableType],\n parse_context: ParseContext,\n start_bracket: Optional[Matchable] = None,\n end_bracket: Optional[Matchable] = None,\n bracket_pairs_set: str = \"bracket_pairs\",\n ) -> Tuple[Tuple[BaseSegment, ...], MatchResult, Optional[MatchableType]]:\n \"\"\"Same as `_look_ahead_match` but with bracket counting.\n\n NB: Given we depend on `_look_ahead_match` we can also utilise\n the same performance optimisations which are implemented there.\n\n bracket_pairs_set: Allows specific segments to override the available\n bracket pairs. See the definition of \"angle_bracket_pairs\" in the\n BigQuery dialect for additional context on why this exists.\n\n Returns:\n `tuple` of (unmatched_segments, match_object, matcher).\n\n \"\"\"\n # Have we been passed an empty tuple?\n if not segments:\n return ((), MatchResult.from_unmatched(segments), None)\n\n # Get hold of the bracket matchers from the dialect, and append them\n # to the list of matchers. We get them from the relevant set on the\n # dialect. We use zip twice to \"unzip\" them. We ignore the first\n # argument because that's just the name.\n _, start_bracket_refs, end_bracket_refs, persists = zip(\n *parse_context.dialect.sets(bracket_pairs_set)\n )\n # These are matchables, probably StringParsers.\n start_brackets = [\n parse_context.dialect.ref(seg_ref) for seg_ref in start_bracket_refs\n ]\n end_brackets = [\n parse_context.dialect.ref(seg_ref) for seg_ref in end_bracket_refs\n ]\n # Add any bracket-like things passed as arguments\n if start_bracket:\n start_brackets += [start_bracket]\n if end_bracket:\n end_brackets += [end_bracket]\n bracket_matchers = start_brackets + end_brackets\n\n # Make some buffers\n seg_buff: Tuple[BaseSegment, ...] = segments\n pre_seg_buff: Tuple[BaseSegment, ...] = ()\n bracket_stack: List[BracketInfo] = []\n\n # Iterate\n while True:\n # Do we have anything left to match on?\n if seg_buff:\n # Yes we have buffer left to work with.\n # Are we already in a bracket stack?\n if bracket_stack:\n # Yes, we're just looking for the closing bracket, or\n # another opening bracket.\n pre, match, matcher = cls._look_ahead_match(\n seg_buff,\n bracket_matchers,\n parse_context=parse_context,\n )\n\n if match:\n # NB: We can only consider this as a nested bracket if the start\n # and end tokens are not the same. If a matcher is both a start\n # and end token we cannot deepen the bracket stack. In general,\n # quoted strings are a typical example where the start and end\n # tokens are the same. Currently, though, quoted strings are\n # handled elsewhere in the parser, and there are no cases where\n # *this* code has to handle identical start and end brackets.\n # For now, consider this a small, speculative investment in a\n # possible future requirement.\n if matcher in start_brackets and matcher not in end_brackets:\n # Add any segments leading up to this to the previous\n # bracket.\n bracket_stack[-1].segments += pre\n # Add a bracket to the stack and add the matches from the\n # segment.\n bracket_stack.append(\n BracketInfo(\n bracket=match.matched_segments[0],\n segments=match.matched_segments,\n )\n )\n seg_buff = match.unmatched_segments\n continue\n elif matcher in end_brackets:\n # Found an end bracket. Does its type match that of\n # the innermost start bracket? E.g. \")\" matches \"(\",\n # \"]\" matches \"[\".\n # For the start bracket we don't have the matcher\n # but we can work out the type, so we use that for\n # the lookup.\n start_index = [\n bracket.type for bracket in start_brackets\n ].index(bracket_stack[-1].bracket.get_type())\n # For the end index, we can just look for the matcher\n end_index = end_brackets.index(matcher)\n bracket_types_match = start_index == end_index\n if bracket_types_match:\n # Yes, the types match. So we've found a\n # matching end bracket. Pop the stack, construct\n # a bracketed segment and carry\n # on.\n\n # Complete the bracketed info\n bracket_stack[-1].segments += (\n pre + match.matched_segments\n )\n # Construct a bracketed segment (as a tuple) if allowed.\n persist_bracket = persists[end_brackets.index(matcher)]\n if persist_bracket:\n new_segments: Tuple[BaseSegment, ...] = (\n bracket_stack[-1].to_segment(\n end_bracket=match.matched_segments\n ),\n )\n else:\n new_segments = bracket_stack[-1].segments\n # Remove the bracket set from the stack\n bracket_stack.pop()\n # If we're still in a bracket, add the new segments to\n # that bracket, otherwise add them to the buffer\n if bracket_stack:\n bracket_stack[-1].segments += new_segments\n else:\n pre_seg_buff += new_segments\n seg_buff = match.unmatched_segments\n continue\n else:\n # The types don't match. Error.\n raise SQLParseError(\n f\"Found unexpected end bracket!, \"\n f\"was expecting \"\n f\"{end_brackets[start_index]}, \"\n f\"but got {matcher}\",\n segment=match.matched_segments[0],\n )\n\n else: # pragma: no cover\n raise RuntimeError(\"I don't know how we get here?!\")\n else: # pragma: no cover\n # No match, we're in a bracket stack. Error.\n raise SQLParseError(\n \"Couldn't find closing bracket for opening bracket.\",\n segment=bracket_stack[-1].bracket,\n )\n else:\n # No, we're open to more opening brackets or the thing(s)\n # that we're otherwise looking for.\n pre, match, matcher = cls._look_ahead_match(\n seg_buff,\n matchers + bracket_matchers,\n parse_context=parse_context,\n )\n\n if match:\n if matcher in matchers:\n # It's one of the things we were looking for!\n # Return.\n return (pre_seg_buff + pre, match, matcher)\n elif matcher in start_brackets:\n # We've found the start of a bracket segment.\n # NB: It might not *actually* be the bracket itself,\n # but could be some non-code element preceding it.\n # That's actually ok.\n\n # Add the bracket to the stack.\n bracket_stack.append(\n BracketInfo(\n bracket=match.matched_segments[0],\n segments=match.matched_segments,\n )\n )\n # The matched element has already been added to the bracket.\n # Add anything before it to the pre segment buffer.\n # Reset the working buffer.\n pre_seg_buff += pre\n seg_buff = match.unmatched_segments\n continue\n elif matcher in end_brackets:\n # We've found an unexpected end bracket! This is likely\n # because we're matching a section which should have ended.\n # If we had a match, it would have matched by now, so this\n # means no match.\n parse_match_logging(\n cls.__name__,\n \"_bracket_sensitive_look_ahead_match\",\n \"UEXB\",\n parse_context=parse_context,\n v_level=3,\n got=matcher,\n )\n # From here we'll drop out to the happy unmatched exit.\n else: # pragma: no cover\n # This shouldn't happen!?\n raise NotImplementedError(\n \"This shouldn't happen. Panic in \"\n \"_bracket_sensitive_look_ahead_match.\"\n )\n # Not in a bracket stack, but no match.\n # From here we'll drop out to the happy unmatched exit.\n else:\n # No we're at the end:\n # Now check have we closed all our brackets?\n if bracket_stack: # pragma: no cover\n # No we haven't.\n raise SQLParseError(\n \"Couldn't find closing bracket for opened brackets: \"\n f\"`{bracket_stack}`.\",\n segment=bracket_stack[-1].bracket,\n )\n\n # This is the happy unmatched path. This occurs when:\n # - We reached the end with no open brackets.\n # - No match while outside a bracket stack.\n # - We found an unexpected end bracket before matching something\n # interesting. We return with the mutated segments so we can reuse any\n # bracket matching.\n return ((), MatchResult.from_unmatched(pre_seg_buff + seg_buff), None)\n\n def __str__(self): # pragma: no cover TODO?\n return repr(self)\n\n def __repr__(self):\n return \"<{}: [{}]>\".format(\n self.__class__.__name__,\n curtail_string(\n \", \".join(curtail_string(repr(elem), 40) for elem in self._elements),\n 100,\n ),\n )\n\n def __eq__(self, other):\n \"\"\"Two grammars are equal if their elements and types are equal.\n\n NOTE: We use the equality_kwargs tuple on the class to define\n other kwargs which should also be checked so that things like\n \"optional\" is also taken into account in considering equality.\n \"\"\"\n return (\n type(self) is type(other)\n and self._elements == other._elements\n and all(\n getattr(self, k, None) == getattr(other, k, None)\n for k in self.equality_kwargs\n )\n )\n\n def copy(\n self,\n insert: Optional[list] = None,\n at: Optional[int] = None,\n before: Optional[Any] = None,\n remove: Optional[list] = None,\n **kwargs,\n ):\n \"\"\"Create a copy of this grammar, optionally with differences.\n\n This is mainly used in dialect inheritance.\n\n\n Args:\n insert (:obj:`list`, optional): Matchable elements to\n insert. This is inserted pre-expansion so can include\n unexpanded elements as normal.\n at (:obj:`int`, optional): The position in the elements\n to insert the item. Defaults to `None` which means\n insert at the end of the elements.\n before (optional): An alternative to _at_ to determine the\n position of an insertion. Using this inserts the elements\n immediately before the position of this element.\n Note that this is not an _index_ but an element to look\n for (i.e. a Segment or Grammar which will be compared\n with other elements for equality).\n remove (:obj:`list`, optional): A list of individual\n elements to remove from a grammar. Removal is done\n *after* insertion so that order is preserved.\n Elements are searched for individually.\n\n \"\"\"\n # Copy only the *grammar* elements. The rest comes through\n # as is because they should just be classes rather than\n # instances.\n new_elems = [\n elem.copy() if isinstance(elem, BaseGrammar) else elem\n for elem in self._elements\n ]\n if insert:\n if at is not None and before is not None: # pragma: no cover\n raise ValueError(\n \"Cannot specify `at` and `before` in BaseGrammar.copy().\"\n )\n if before is not None:\n try:\n idx = new_elems.index(before)\n except ValueError: # pragma: no cover\n raise ValueError(\n \"Could not insert {} in copy of {}. {} not Found.\".format(\n insert, self, before\n )\n )\n new_elems = new_elems[:idx] + insert + new_elems[idx:]\n elif at is None:\n new_elems = new_elems + insert\n else:\n new_elems = new_elems[:at] + insert + new_elems[at:]\n if remove:\n for elem in remove:\n try:\n new_elems.remove(elem)\n except ValueError: # pragma: no cover\n raise ValueError(\n \"Could not remove {} from copy of {}. Not Found.\".format(\n elem, self\n )\n )\n new_seg = copy.copy(self)\n new_seg._elements = new_elems\n return new_seg\n\n\nclass Ref(BaseGrammar):\n \"\"\"A kind of meta-grammar that references other grammars by name at runtime.\"\"\"\n\n # We can't allow keyword refs here, because it doesn't make sense\n # and it also causes infinite recursion.\n allow_keyword_string_refs = False\n\n def __init__(self, *args: str, **kwargs):\n # Any patterns to _prevent_ a match.\n self.exclude = kwargs.pop(\"exclude\", None)\n super().__init__(*args, **kwargs)\n\n @cached_method_for_parse_context\n def simple(\n self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None\n ) -> Optional[List[str]]:\n \"\"\"Does this matcher support a uppercase hash matching route?\n\n A ref is simple, if the thing it references is simple.\n \"\"\"\n ref = self._get_ref()\n if crumbs and ref in crumbs: # pragma: no cover\n loop = \" -> \".join(crumbs)\n raise RecursionError(f\"Self referential grammar detected: {loop}\")\n return self._get_elem(dialect=parse_context.dialect).simple(\n parse_context=parse_context,\n crumbs=(crumbs or ()) + (ref,),\n )\n\n def _get_ref(self) -> str:\n \"\"\"Get the name of the thing we're referencing.\"\"\"\n # Unusually for a grammar we expect _elements to be a list of strings.\n # Notable ONE string for now.\n if len(self._elements) == 1:\n # We're good on length. Get the name of the reference\n ref = self._elements[0]\n if not isinstance(ref, str): # pragma: no cover\n raise ValueError(\n \"Ref Grammar expects elements to be strings. \"\n f\"Found {ref!r} instead.\"\n )\n return self._elements[0]\n else: # pragma: no cover\n raise ValueError(\n \"Ref grammar can only deal with precisely one element for now. Instead \"\n \"found {!r}\".format(self._elements)\n )\n\n def _get_elem(self, dialect: \"Dialect\") -> Union[Type[BaseSegment], Matchable]:\n \"\"\"Get the actual object we're referencing.\"\"\"\n if dialect:\n # Use the dialect to retrieve the grammar it refers to.\n return dialect.ref(self._get_ref())\n else: # pragma: no cover\n raise ReferenceError(\"No Dialect has been provided to Ref grammar!\")\n\n def __repr__(self):\n return \"\".format(\n \", \".join(self._elements), \" [opt]\" if self.is_optional() else \"\"\n )\n\n @match_wrapper(v_level=4) # Log less for Ref\n @allow_ephemeral\n def match(\n self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext\n ) -> \"MatchResult\":\n \"\"\"Match a list of segments against this segment.\n\n Matching can be done from either the raw or the segments.\n This raw function can be overridden, or a grammar defined\n on the underlying class.\n\n The match element of Ref, also implements the caching\n using the parse_context `denylist` methods.\n \"\"\"\n elem = self._get_elem(dialect=parse_context.dialect)\n\n # First if we have an *exclude* option, we should check that\n # which would prevent the rest of this grammar from matching.\n if self.exclude:\n with parse_context.deeper_match() as ctx:\n if self.exclude.match(segments, parse_context=ctx):\n return MatchResult.from_unmatched(segments)\n\n # First check against the efficiency Cache.\n # We rely on segments not being mutated within a given\n # match cycle and so the ids should continue to refer to unchanged\n # objects.\n seg_tuple = (id(seg) for seg in segments)\n self_name = self._get_ref()\n if parse_context.denylist.check(self_name, seg_tuple): # pragma: no cover TODO?\n # This has been tried before.\n parse_match_logging(\n self.__class__.__name__,\n \"match\",\n \"SKIP\",\n parse_context=parse_context,\n v_level=3,\n self_name=self_name,\n )\n return MatchResult.from_unmatched(segments)\n\n # Match against that. NB We're not incrementing the match_depth here.\n # References shouldn't really count as a depth of match.\n with parse_context.matching_segment(self._get_ref()) as ctx:\n resp = elem.match(segments=segments, parse_context=ctx)\n if not resp:\n parse_context.denylist.mark(self_name, seg_tuple)\n return resp\n\n @classmethod\n def keyword(cls, keyword, **kwargs):\n \"\"\"Generate a reference to a keyword by name.\n\n This function is entirely syntactic sugar, and designed\n for more readable dialects.\n\n Ref.keyword('select') == Ref('SelectKeywordSegment')\n\n \"\"\"\n name = keyword.capitalize() + \"KeywordSegment\"\n return cls(name, **kwargs)\n\n\nclass Anything(BaseGrammar):\n \"\"\"Matches anything.\"\"\"\n\n def match(\n self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext\n ) -> \"MatchResult\":\n \"\"\"Matches... Anything.\n\n Most useful in match grammars, where a later parse grammar\n will work out what's inside.\n \"\"\"\n return MatchResult.from_matched(segments)\n\n\nclass Nothing(BaseGrammar):\n \"\"\"Matches nothing.\n\n Useful for placeholders which might be overwritten by other\n dialects.\n \"\"\"\n\n def match(\n self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext\n ) -> \"MatchResult\":\n \"\"\"Matches... nothing.\n\n Useful for placeholders which might be overwritten by other\n dialects.\n \"\"\"\n return MatchResult.from_unmatched(segments)\n","sub_path":"src/sqlfluff/core/parser/grammar/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":41083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"28971666","text":"'''\r\nCreated on 2017年6月21日\r\n\r\n@author: iBook\r\n'''\r\n#!/usr/bin/python3\r\n# -*- coding: UTF-8 -*-\r\nimport MySQLdb\r\n\r\n\r\n# 打开数据库连接\r\ndb = MySQLdb.connect(\"localhost\",\"root\",\"1234\",\"test\" )\r\n# 使用cursor()方法获取操作游标\r\ncursor = db.cursor()\r\n# 使用execute方法执行SQL语句\r\ncursor.execute(\"SELECT VERSION()\")\r\n# 使用 fetchone() 方法获取一条数据库。\r\ndata = cursor.fetchone()\r\nprint(\"Database version : %s \" % data)\r\n# 关闭数据库连接\r\ndb.close()\r\n","sub_path":"PythonDemo/com/weduoo/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"65765204","text":"from gcloud import storage\r\nimport os\r\nimport json\r\n\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"./dbtest-301709-b8daa273ad42.json\"\r\n\r\nos.environ.setdefault(\"GCLOUD_PROJECT\", \"dbtest\") # 프로젝트 id를 넣어줌\r\n\r\nclient = storage.Client()\r\n\r\nbucket = client.get_bucket('store_video2') # 버켓 이름 넣어줌\r\n\r\n\r\n# 프레임 파일 / 초기 파일 지우기\r\n\r\nwith open('upload/emotion/emotions.json') as json_file:\r\n json_data = json.load(json_file)\r\n\r\nfor key in json_data.keys():\r\n if( key == 'total' or key == 'time'):\r\n continue \r\n print(key)\r\n frame = bucket.blob(key+\".jpg\") # 어떤 파일을 지울건지\r\n try:\r\n frame.delete()\r\n except Exception:\r\n print(\"can not detect such file\")\r\n\r\n# 동영상 초기 파일 지우기\r\npre_video = bucket.blob('test.mp4') # 어떤 파일을 지울건지\r\ntry:\r\n pre_video.delete()\r\nexcept Exception:\r\n print(\"no such file\")\r\n\r\npre_result_video = bucket.blob('result.mp4') # 어떤 파일을 지울건지\r\ntry:\r\n pre_result_video.delete()\r\nexcept Exception:\r\n print(\"no such file\")\r\n\r\n\r\n\r\n\r\n# 해야할 것\r\n\r\n# 구글 클라우드 프로젝트 생성\r\n# 생성한 프로젝트의 버켓 생성 -> 세분화된 액세스 제어 선택\r\n# 버켓의 사용자 계정 생성\r\n\r\n# pip install gcloud\r\n# pip install --upgrade google-cloud-storage\r\n\r\n\r\n# export GOOGLE_APPLICATION_CREDENTIALS=\"C:\\Users\\CHOISAYWHY\\jsons\\dbtest-301709-b8daa273ad42.json\" \r\n# ㄴ자기가 json 파일 저장한 경로 쓰기 \r\n# 이후 실행\r\n# pip install -U httplib2==0.15.0\r\n\r\n# https://github.com/googleapis/google-api-python-client/issues/803\r\n","sub_path":"GoogleStorageDefault.py","file_name":"GoogleStorageDefault.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"625385303","text":"# Copyright 2012-2013 OpenStack, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"Network action implementations\"\"\"\n\nimport logging\n\nfrom cliff import command\n\nfrom openstackclient.network import common\n\n\nclass CreateNetwork(common.CreateCommand):\n \"\"\"Create a network\"\"\"\n\n resource = 'network'\n\n def get_parser(self, prog_name):\n parser = super(CreateNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--admin-state-down',\n dest='admin_state', action='store_false',\n default=True, help='Set Admin State Up to false')\n parser.add_argument(\n '--shared',\n action='store_true',\n default=False, help='Set the network as shared')\n parser.add_argument(\n 'name', metavar='NAME',\n help='Name of network to create')\n return parser\n\n def get_body(self, parsed_args):\n return {self.resource: {\n 'name': str(parsed_args.name),\n 'admin_state_up': str(parsed_args.admin_state),\n 'shared': str(parsed_args.shared) } }\n\n\nclass DeleteNetwork(common.DeleteCommand):\n \"\"\"Delete a network\"\"\"\n\n resource = 'network'\n\n\nclass ListNetwork(common.ListCommand):\n \"\"\"List networks\"\"\"\n\n resource = \"network\"\n list_columns = ['id', 'name', 'subnets']\n\n def get_parser(self, prog_name):\n parser = super(ListNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--external',\n action='store_true',\n default=False,\n help='List external networks',\n )\n parser.add_argument(\n '--dhcp',\n dest='dhcp_agent',\n help='ID of the DHCP agent')\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action(%s)' % parsed_args)\n if parsed_args.external:\n self.report_filter = {'router:external': True}\n elif parsed_args.dhcp_agent:\n self.func = 'networks_on_dhcp_agent'\n self.resources = 'networks_on_dhcp_agent'\n self.report_filter = {'dhcp_agent': parsed_args.dhcp_agent}\n return super(ListNetwork, self).take_action(parsed_args)\n\n\nclass SetNetwork(common.SetCommand):\n \"\"\"Set network values\"\"\"\n\n resource = 'network'\n\n\nclass ShowNetwork(common.ShowCommand):\n \"\"\"Show network details\"\"\"\n\n resource = 'network'\n\n\nclass AddGatewayNetwork(command.Command, common.BaseCommand):\n \"\"\"Add a gateway to a network\"\"\"\n\n log = logging.getLogger(__name__ + '.AddGatewayNetwork')\n resource = 'network'\n resources = 'networks'\n\n def get_parser(self, prog_name):\n parser = super(AddGatewayNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--segmentation-type',\n help=('L2 segmentation strategy on the external side of '\n 'the gateway (e.g.: VLAN, FLAT)'))\n parser.add_argument(\n '--segmentation-id',\n help=('Identifier for the L2 segment on the external side '\n 'of the gateway'))\n parser.add_argument(\n 'network',\n metavar='',\n help='Name or identifier of the internal network'\n )\n parser.add_argument(\n 'gateway',\n metavar='',\n help='Name or identifier of the gatway'\n )\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action(%s)' % parsed_args)\n client = self.app.client_manager.network\n network_id = self.find_resource(parsed_args.network)\n gateway_id = self.find('network_gateway', 'network_gateways',\n parsed_args.gateway)\n body = {'network_id': network_id,\n 'segmentation_type': parsed_args.segmentation_type,\n 'segmentation_id': parsed_args.segmentation_id}\n client.connect_network_gateway(gateway_id, body)\n print ('Connected network to gateway %s' % gateway_id)\n\n\nclass RemoveGatewayNetwork(command.Command, common.BaseCommand):\n \"\"\"Remove a gateway from a network\"\"\"\n\n log = logging.getLogger(__name__ + '.RemoveGatewayNetwork')\n resource = 'network'\n resources = 'networks'\n\n def get_parser(self, prog_name):\n parser = super(RemoveGatewayNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--segmentation-type',\n help=('L2 segmentation strategy on the external side of '\n 'the gateway (e.g.: VLAN, FLAT)'))\n parser.add_argument(\n '--segmentation-id',\n help=('Identifier for the L2 segment on the external side '\n 'of the gateway'))\n parser.add_argument(\n 'network',\n metavar='',\n help='Name or identifier of the internal network'\n )\n parser.add_argument(\n 'gateway',\n metavar='',\n help='Name or identifier of the gatway'\n )\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action(%s)' % parsed_args)\n client = self.app.client_manager.network\n network_id = self.find_resource(parsed_args.network)\n gateway_id = self.find('network_gateway', 'network_gateways',\n parsed_args.gateway)\n body = {'network_id': network_id,\n 'segmentation_type': parsed_args.segmentation_type,\n 'segmentation_id': parsed_args.segmentation_id}\n client.disconnect_network_gateway(gateway_id, body)\n print ('Disconnected network from gateway %s' % gateway_id)\n","sub_path":"openstackclient/network/v2_0/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"641357977","text":"# -*- coding: utf-8 -*-\n\nimport socket\nimport platform\n\n\nclass Network():\n def network(self):\n net = {\n 'hostname': socket.gethostname(),\n }\n return net\n\n\nclass Linux(Network):\n pass\n\n\nplatforms = {\n 'Linux': Linux()\n }\n\n\nclass BaseInfo(object):\n \"\"\" The base info class \"\"\"\n\n def __init__(self):\n \"\"\" Try to detect os type \"\"\"\n self.os = platform.system()\n self.hostname = socket.gethostname()\n try:\n self.info = platforms[self.os]\n except KeyError:\n raise Exception('Platform \"{}\" does not supported'.format(self.os))\n","sub_path":"aboutme/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"272671027","text":"# -*- coding: utf-8 -*-\nimport sys, os \nsys.path.append(os.path.dirname(__file__))\n\nfrom PyQt4 import QtCore, QtGui, uic\n\nGUI, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__),\n 'ui',\n 'merge_dialog.ui'), \n resource_suffix=''\n)\n\nclass Merge_Dialog(QtGui.QDialog, GUI):\n def __init__(self, iface):\n super(Merge_Dialog, self).__init__()\n self.setupUi(self)\n \n def loadCombo(self,itens):\n self.comboBox_branchAtivo.addItems(itens)\n self.comboBox_branch2Merge.addItems(itens) \n\n def getSelectedBranches(self):\n selectedBranches = {\n \"head\":self.comboBox_branchAtivo.currentText(),\n \"mergeHead\":self.comboBox_branch2Merge.currentText()\n }\n return selectedBranches\n \n\n\n \n","sub_path":"geogig_interface/merge_dialog.py","file_name":"merge_dialog.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"489187707","text":"\n# Copyright (C) 2021 Data Mining Group\n# \n# This file is part of POIE\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version_info__ = (0,9,0)\n\n__version__ = \".\".join(map(str, __version_info__))\n\npypoieVersion = __version__ # the poie version is for informational purposes only; doesn't affect behavior\n\ndefaultPFAVersion = \"0.8.1\" # the PFA version determines how poie will interpret PFA documents (can be overridden)\n # must always be in the form [1-9][0-9]*\\.[1-9][0-9]*\\.[1-9][0-9]*\n","sub_path":"py-poie/poie/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"183685370","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth import get_user_model\nfrom django.core import signing\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom business_logic import LogicErrors, LogicException\n\nfrom pca.utils.config import get_setting\n\nUser = get_user_model()\n\n\n_REGISTRATION_SALT = get_setting('REGISTRATION_SALT', 'registration')\n_ACCOUNT_ACTIVATION_DAYS = get_setting('ACCOUNT_ACTIVATION_DAYS', 30)\n_EMAIL_SUBJECT_TEMPLATE = ''\n_EMAIL_BODY_TEMPLATE = ''\n_DEFAULT_FROM_EMAIL = get_setting('GET_OWNER_EMAIL')\n\n\nclass ActivationErrors(LogicErrors):\n ALREADY_ACTIVATED = LogicException(\n _(\"The account you tried to activate has already been activated.\"))\n BAD_USERNAME = LogicException(_(\"The account you attempted to activate is invalid.\"))\n TOKEN_EXPIRED = LogicException(_(\"This account has expired.\"))\n INVALID_TOKEN = LogicException(_(\"The activation token you provided is invalid: {token}\"))\n\n\ndef send_activation_email(user, site, request_scheme):\n \"\"\"\n Send the activation email. The activation key is the username,\n signed using TimestampSigner.\n\n \"\"\"\n activation_token = _get_activation_token(user)\n context = {\n 'user': user,\n 'scheme': request_scheme,\n 'activation_token': activation_token,\n 'expiration_days': _ACCOUNT_ACTIVATION_DAYS,\n 'site': site,\n }\n subject = render_to_string(\n template_name=_EMAIL_SUBJECT_TEMPLATE,\n context=context,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = ''.join(subject.splitlines())\n message = render_to_string(\n template_name=_EMAIL_BODY_TEMPLATE,\n context=context,\n )\n user.email_user(subject, message, _DEFAULT_FROM_EMAIL)\n\n\ndef _get_activation_token(user):\n \"\"\"Generate the activation key which will be emailed to the user.\"\"\"\n return signing.dumps(\n obj=user.get_username(),\n salt=_REGISTRATION_SALT\n )\n\n\ndef activate(activation_token, user_activated):\n username = validate_activation_token(activation_token)\n user = get_user_to_activate(username)\n user.is_active = True\n user.save()\n user_activated.send(user)\n return user\n\n\ndef validate_activation_token(activation_token):\n \"\"\"\n Verify that the activation token is valid and within the\n permitted activation time window, returning the username if\n valid or raising ``ActivationError`` if not.\n \"\"\"\n try:\n username = signing.loads(\n activation_token,\n salt=_REGISTRATION_SALT,\n max_age=_ACCOUNT_ACTIVATION_DAYS * 86400\n )\n return username\n except signing.SignatureExpired:\n raise ActivationErrors.TOKEN_EXPIRED\n except signing.BadSignature:\n raise ActivationErrors.BAD_USERNAME\n\n\ndef get_user_to_activate(username):\n \"\"\"\n Given the verified username, look up and return the corresponding user\n account if it exists, or raising one of ``ActivationErrors`` if it doesn't.\n \"\"\"\n try:\n user = User.objects.get(**{User.USERNAME_FIELD: username})\n if user.is_active:\n raise ActivationErrors.ALREADY_ACTIVATED\n return user\n except User.DoesNotExist:\n raise ActivationErrors.BAD_USERNAME\n","sub_path":"pca/users/services/activation.py","file_name":"activation.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"621905486","text":"class Solution(object):\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n addOne, maxLen = 0, max(len(a), len(b))\n res = []\n i = 1\n while i<=maxLen or addOne:\n aDigit = int(a[-i]) if i<=len(a) else 0\n bDigit = int(b[-i]) if i<=len(b) else 0\n res.append( str((aDigit+bDigit+addOne)%2) )\n addOne = (aDigit+bDigit+addOne)//2\n i += 1\n list.reverse(res)\n return ''.join(res)\n","sub_path":"Leetcode/string/67_AddBinary/iterative/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"638217667","text":"### Simulating Randomness\nimport random\n\nrandom.choice(['Head', 'Tails'])\n\nroll_a_die = random.choice([1, 2, 3, 4, 5, 6])\nprint(roll_a_die)\n\nrandom.choice(range(1, 7))\n\nrandom.choice(random.choice([range(1, 7), range(1, 9), range(1, 11)]))\n\n\nrolls = [random.choice([1, 2, 3, 4, 5, 6]) for i in range(100)]\nplt.hist(rolls, bins=np.linspace(0.5, 6.5, 7))\n\nmore_rolls = [random.choice([1, 2, 3, 4, 5, 6]) for i in range(10000)]\nplt.hist(more_rolls, bins=np.linspace(0.5, 6.5, 7))\n\n### Generate a random number from the standard uniform dist\nf = np.random.random()\nprint(f)\n\nf_arr = np.random.random(50)\nprint (f_arr)\n\nf_mat = np.random.random((5, 3))\nprint(f_mat)\n\n### Generate random number from standard normal dist\ng = np.random.normal(0, 1) # where 0 = mean and 1 = diviation\nprint(g)\ng_arr = np.random.normal(0, 1, 50)\nprint(g_arr)\n\n# Random array of ints\nrarr = np.random.randint(1, 7, (10,3))\nprint(rarr)\nprint(rarr.shape)","sub_path":"HarvardX-PH526x-Py4Research/random_playground.py","file_name":"random_playground.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"625751218","text":"import sympy as smp\nfrom sympy import *\nimport matplotlib.pyplot as plt\nfrom scipy import *\nimport numpy as np\nfrom sympy.plotting import plot\nfrom scipy.integrate import *\nimport scipy as sp\nimport matplotlib as mpl\n\nmpl = \"qt4gg\"\n\n#sybolic part of the code\n\ndef Full():\n\n#define constants, variables and functions\n\n R=Symbol('R')\n r=Symbol('r')\n M=Function('M')\n Rho=Function('Rho')\n theta = Symbol('theta')\n #following values set for NFW profile in Milky Way\n alpha=1\n beta=3\n gamma=1\n Rho_0= 1 #2.8*10**37 #units: kg/kpc\n Rs = 2#16.1 #in kpc\n h = 2 #height of milky way disk, currently in kpc\n G = 1 #6.67191*10**-11 # m**3/kg*s**2 +_ 0.000009*10**-11\n\n #density profile\n #Rho =Rho_0/ ((1+(r/Rs)**2)**(3/2))\n Rho = Rho_0/ ( (r/Rs)**gamma * (1+(r/Rs)**alpha)**((beta-gamma)/alpha) )\n\n print(\"This is the Rho\", Rho)\n #print(Rho)\n\n #Having trouble with the integral using partial fraction\n #supposed to integrate Rho times the cylindrical jacobian over R to get the\n #mass as a function of the Radius Assuming I for a think disk\n #try adding in the central bluge. Assume that the buldge is a sphere of same density profile\n M = smp.integrate(2*smp.pi*r**2*h*Rho,(r,0,R)) + 2*smp.pi*smp.integrate(r**2*Rho,(r,0,R))*smp.integrate(smp.sin(theta),(theta,0,2*smp.pi))\n print(\"this is the Mass\", M)\n #print(M)\n\n #send it through the symbolic integrator\n #set the limits of the Radius here\n Rmin=2\n Rmax=60\n\n #this show the function we wish to plot\n #RH = integrate(M,(R))\n #print \"Our Mass-Radius Equation is:\"\n #print(RH)\n #define the velocity function as a functions\n #of R is\n #V is only a first order Newtonian approax\n V = smp.sqrt(G*M/R)\n print (\"Our Velocity Equation is:\", V)\n #print(V)\n#now we have to prepare the antiderivative of the equation for plotting\n#using the sympy plotting package.\n plot(V,(R,Rmin,Rmax),nb_of_points = 500,xlabel = (\"Radius\"), ylabel = (\"Velocity\"),title = (\"Velocity Profile for Galatic Mass Distributions in Sprial Galaxies\"))\n#this returns a plot of Velocity V and Radius This is good for the Star and Gas\n#distributions\n\n#time to use numpy to do evalutation numercially\n#need to get this working, the integrator is not working. I want to put\n#in a list and get an integrated list back. That way I can plot the function\n#plus the integral\n\n\n#Goal: do the same thing as the symbolic program but numerically\n# take a given density profile and integrate it to get the\n# mass, then take that mass and solve fot the velocity, plot the velocity as a function\n# of radius, show that the symbolic and Numeric parts coincide\n\n# define the numerical function\n #define R as a linear space\n R_min = 2\n R_max = 60\n R_step = 10000\n R = np.linspace(R_min,R_max,R_step)\n\n\n #define the density profile\n Rho =Rho_0/ ( (R/Rs)**gamma * (1+(R/Rs)**alpha)**((beta-gamma)/alpha) )\n #Somethings worng with this one\n\n #Rho_0/ ( (R/Rs)**gamma * (1+(R/Rs)**alpha)**((beta-gamma)/alpha) ) Somethings worng with this one\n\n #Rho_0/ ((1+(R/Rs)**2)**(3/2)) works well\n\n #Get Rho as a list\n #Rho = Rho(R,params)\n\n J = 2*np.pi*R #The Jacodian for the integral\n U = J*Rho #the integrand\n #print(U)\n\n #Get the mass as a list\n #requieres an integral\n M = cumtrapz(U, x=R, dx = R_step, initial = 0)\n #M = M(Rho,R,params)\n #print M\n\n #velocity given by Assuming that the gravational protential is constant\n # and the is a first order Newtionian approax\n V = np.sqrt((G*M)/R)\n\n #plotting the result\n #get plots to appear together\n plt.figure\n plt.plot(R,V,label = \"Velocity Profile Curve\")\n #plt.plot(R,U, label = 'RU curve ')\n #plt.plot(R,M, label = 'RM curve ')\n plt.legend()\n plt.title(\"Velocity Profile for Galatic Mass Distributions in Sprial Galaxies\")\n plt.xlabel(\"Radius\")\n plt.ylabel(\"Velocity as a Function of R\")\n plt.show()\n\nif __name__ == '__Full__':\n Full()\nFull()\n\n#Numerical()\n","sub_path":"Complete_11_30.py","file_name":"Complete_11_30.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"564320426","text":"# Test export module\nimport sys\n\nsys.path.insert(0, '..')\nimport copy\nimport os\nimport shutil\nimport numpy as np\nimport flopy\n\npth = os.path.join('..', 'examples', 'data', 'mf2005_test')\nnamfiles = [namfile for namfile in os.listdir(pth) if namfile.endswith('.nam')]\n# skip = [\"MNW2-Fig28.nam\", \"testsfr2.nam\", \"testsfr2_tab.nam\"]\nskip = []\n\n\ntpth = os.path.join('temp', 't007')\n# make the directory if it does not exist\nif not os.path.isdir(tpth):\n os.makedirs(tpth)\n\nnpth = os.path.join('temp', 't007', 'netcdf')\n# delete the directory if it exists\nif os.path.isdir(npth):\n shutil.rmtree(npth)\n# make the directory\nos.makedirs(npth)\n\nspth = os.path.join('temp', 't007', 'shapefile')\n# make the directory if it does not exist\nif not os.path.isdir(spth):\n os.makedirs(spth)\n\n\ndef export_netcdf(namfile):\n if namfile in skip:\n return\n print(namfile)\n m = flopy.modflow.Modflow.load(namfile, model_ws=pth, verbose=False)\n if m.sr.lenuni == 0:\n m.sr.lenuni = 1\n # print('skipping...lenuni==0 (undefined)')\n # return\n # if sum(m.dis.laycbd) != 0:\n if m.dis.botm.shape[0] != m.nlay:\n print('skipping...botm.shape[0] != nlay')\n return\n assert m, 'Could not load namefile {}'.format(namfile)\n assert isinstance(m, flopy.modflow.Modflow)\n\n # Do not fail if netCDF4 not installed\n try:\n import netCDF4\n import pyproj\n except:\n return\n\n fnc = m.export(os.path.join(npth, m.name + '.nc'))\n fnc.write()\n fnc_name = os.path.join(npth, m.name + '.nc')\n try:\n fnc = m.export(fnc_name)\n fnc.write()\n except Exception as e:\n raise Exception(\n 'ncdf export fail for namfile {0}:\\n{1} '.format(namfile, str(e)))\n try:\n nc = netCDF4.Dataset(fnc_name, 'r')\n except Exception as e:\n raise Exception('ncdf import fail for nc file {0}'.format(fnc_name))\n return\n\n\ndef export_shapefile(namfile):\n try:\n import shapefile as shp\n except:\n return\n\n print(namfile)\n m = flopy.modflow.Modflow.load(namfile, model_ws=pth, verbose=False)\n\n assert m, 'Could not load namefile {}'.format(namfile)\n assert isinstance(m, flopy.modflow.Modflow)\n fnc_name = os.path.join(spth, m.name + '.shp')\n try:\n fnc = m.export(fnc_name)\n #fnc2 = m.export(fnc_name, package_names=None)\n #fnc3 = m.export(fnc_name, package_names=['DIS'])\n\n\n except Exception as e:\n raise Exception(\n 'shapefile export fail for namfile {0}:\\n{1} '.format(namfile,\n str(e)))\n try:\n s = shp.Reader(fnc_name)\n except Exception as e:\n raise Exception(\n ' shapefile import fail for {0}:{1}'.format(fnc_name, str(e)))\n assert s.numRecords == m.nrow * m.ncol, \"wrong number of records in \" + \\\n \"shapefile {0}:{1:d}\".format(\n fnc_name, s.numRecords)\n return\n\ndef test_freyberg_export():\n namfile = 'freyberg.nam'\n model_ws = '../examples/data/freyberg_multilayer_transient/'\n m = flopy.modflow.Modflow.load(namfile, model_ws=model_ws, verbose=False,\n load_only=['DIS', 'BAS6', 'NWT', 'OC',\n 'RCH',\n 'WEL',\n 'DRN',\n 'UPW'])\n m.drn.stress_period_data.export(os.path.join(spth, namfile[:-4]+'.shp'), sparse=True)\n\ndef test_export_output():\n import os\n import numpy as np\n import flopy\n\n # Do not fail if netCDF4 not installed\n try:\n import netCDF4\n import pyproj\n except:\n return\n\n model_ws = os.path.join(\"..\", \"examples\", \"data\", \"freyberg\")\n ml = flopy.modflow.Modflow.load(\"freyberg.nam\", model_ws=model_ws)\n hds_pth = os.path.join(model_ws, \"freyberg.githds\")\n hds = flopy.utils.HeadFile(hds_pth)\n\n out_pth = os.path.join(npth, \"freyberg.out.nc\")\n nc = flopy.export.utils.output_helper(out_pth, ml,\n {\"freyberg.githds\": hds})\n var = nc.nc.variables.get(\"head\")\n arr = var[:]\n ibound_mask = ml.bas6.ibound.array == 0\n arr_mask = arr.mask[0]\n assert np.array_equal(ibound_mask, arr_mask)\n\n\ndef test_mbase_sr():\n import numpy as np\n import flopy\n\n ml = flopy.modflow.Modflow(modelname=\"test\", xul=1000.0,\n rotation=12.5, start_datetime=\"1/1/2016\")\n try:\n print(ml.sr.xcentergrid)\n except:\n pass\n else:\n raise Exception(\"should have failed\")\n\n dis = flopy.modflow.ModflowDis(ml, nrow=10, ncol=5, delr=np.arange(5),\n xul=500)\n print(ml.sr)\n assert ml.sr.xul == 500\n assert ml.sr.yll == -10\n ml.model_ws = tpth\n\n ml.write_input()\n ml1 = flopy.modflow.Modflow.load(\"test.nam\", model_ws=ml.model_ws)\n assert ml1.sr == ml.sr\n assert ml1.start_datetime == ml.start_datetime\n\n\ndef test_free_format_flag():\n import flopy\n Lx = 100.\n Ly = 100.\n nlay = 1\n nrow = 51\n ncol = 51\n delr = Lx / ncol\n delc = Ly / nrow\n top = 0\n botm = [-1]\n ms = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc, top=top, botm=botm)\n bas = flopy.modflow.ModflowBas(ms, ifrefm=True)\n assert ms.free_format_input == bas.ifrefm\n ms.free_format_input = False\n assert ms.free_format_input == bas.ifrefm\n ms.free_format_input = True\n bas.ifrefm = False\n assert ms.free_format_input == bas.ifrefm\n bas.ifrefm = True\n assert ms.free_format_input == bas.ifrefm\n\n ms.model_ws = tpth\n ms.write_input()\n ms1 = flopy.modflow.Modflow.load(ms.namefile, model_ws=ms.model_ws)\n assert ms1.free_format_input == ms.free_format_input\n assert ms1.free_format_input == ms1.bas6.ifrefm\n ms1.free_format_input = False\n assert ms1.free_format_input == ms1.bas6.ifrefm\n bas.ifrefm = False\n assert ms1.free_format_input == ms1.bas6.ifrefm\n bas.ifrefm = True\n assert ms1.free_format_input == ms1.bas6.ifrefm\n\n\ndef test_sr():\n import flopy\n Lx = 100.\n Ly = 100.\n nlay = 1\n nrow = 51\n ncol = 51\n delr = Lx / ncol\n delc = Ly / nrow\n top = 0\n botm = [-1]\n ms = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr, delc=delc, top=top, botm=botm)\n bas = flopy.modflow.ModflowBas(ms, ifrefm=True)\n\n # test instantiation of an empty sr object\n sr = flopy.utils.reference.SpatialReference()\n\n # test instantiation of SR with xul, yul and no grid\n sr = flopy.utils.reference.SpatialReference(xul=1, yul=1)\n\n xul, yul = 321., 123.\n sr = flopy.utils.SpatialReference(delr=ms.dis.delr.array,\n delc=ms.dis.delc.array, lenuni=3,\n xul=xul, yul=yul, rotation=20)\n\n #txt = 'yul does not approximately equal 100 - ' + \\\n # '(xul, yul) = ({}, {})'.format( ms.sr.yul, ms.sr.yul)\n assert abs(ms.sr.yul - Ly) < 1e-3#, txt\n ms.sr.xul = 111\n assert ms.sr.xul == 111\n\n # test that transform for arbitrary coordinates\n # is working in same as transform for model grid\n x, y = ms.sr.xcenter, ms.sr.ycenter[0]\n xt, yt = sr.transform(x, y)\n assert np.sum(xt - sr.xcentergrid[0]) < 1e-3\n x, y = ms.sr.xcenter[0], ms.sr.ycenter\n xt, yt = sr.transform(x, y)\n assert np.sum(yt - sr.ycentergrid[:, 0]) < 1e-3\n\n # test inverse transform\n x0, y0 = 9.99, 2.49\n x1, y1 = sr.transform(x0, y0)\n x2, y2 = sr.transform(x1, y1, inverse=True)\n assert np.abs(x2-x0) < 1e-6\n assert np.abs(y2-y0) < 1e6\n\n # test input using ul vs ll\n xll, yll = sr.xll, sr.yll\n sr2 = flopy.utils.SpatialReference(delr=ms.dis.delr.array,\n delc=ms.dis.delc.array, lenuni=3,\n xll=xll, yll=yll, rotation=20)\n assert sr2.xul == sr.xul\n assert sr2.yul == sr.yul\n assert np.array_equal(sr.xcentergrid, sr2.xcentergrid)\n assert np.array_equal(sr.ycentergrid, sr2.ycentergrid)\n\n ms.sr.lenuni = 1\n assert ms.sr.lenuni == 1\n\n ms.sr.units = \"feet\"\n assert ms.sr.units == \"feet\"\n\n ms.sr = sr\n assert ms.sr == sr\n assert ms.sr.lenuni != ms.dis.lenuni\n\n try:\n ms.sr.units = \"junk\"\n except:\n pass\n else:\n raise Exception(\"should have failed\")\n\n ms.start_datetime = \"1-1-2016\"\n assert ms.start_datetime == \"1-1-2016\"\n assert ms.dis.start_datetime == \"1-1-2016\"\n\n ms.model_ws = tpth\n ms.write_input()\n ms1 = flopy.modflow.Modflow.load(ms.namefile, model_ws=ms.model_ws)\n assert ms1.sr == ms.sr\n assert ms1.dis.sr == ms.dis.sr\n assert ms1.start_datetime == ms.start_datetime\n assert ms1.sr.units == ms.sr.units\n assert ms1.dis.lenuni == ms1.sr.lenuni\n #assert ms1.sr.lenuni != sr.lenuni\n ms1.sr = sr\n assert ms1.sr == ms.sr\n\n\ndef test_sr_scaling():\n nlay, nrow, ncol = 1, 10, 5\n delr, delc = 250, 500\n xll, yll = 286.80, 29.03\n\n print(np.__version__)\n # test scaling of length units\n ms2 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc)\n ms2.sr = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=3,\n xll=xll, yll=yll, rotation=0)\n ms2.sr.epsg = 26715\n ms2.dis.export(os.path.join(spth, 'dis2.shp'))\n ms3 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms3, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc)\n ms3.sr = flopy.utils.SpatialReference(delr=ms3.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n length_multiplier=2.,\n xll=xll, yll=yll, rotation=0)\n ms3.dis.export(os.path.join(spth, 'dis3.shp'), epsg=26715)\n\n # check that the origin(s) are maintained\n assert np.array_equal(ms3.sr.get_vertices(nrow - 1, 0)[1],\n [ms3.sr.xll, ms3.sr.yll])\n\n assert np.allclose(ms3.sr.get_vertices(nrow - 1, 0)[1],\n ms2.sr.get_vertices(nrow - 1, 0)[1])\n\n # check that the upper left corner is computed correctly\n # in this case, length_multiplier overrides the given units\n def check_size(sr):\n xur, yur = sr.get_vertices(0, ncol - 1)[3]\n assert np.abs(xur - (xll + sr.length_multiplier * delr * ncol)) < 1e-4\n assert np.abs(yur - (yll + sr.length_multiplier * delc * nrow)) < 1e-4\n check_size(ms3.sr)\n\n # run the same tests but with units specified instead of a length multiplier\n ms2 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr, delc=delc,\n lenuni=1 # feet; should have no effect on SR\n # (model not supplied to SR)\n )\n ms2.sr = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array,\n lenuni=2, # meters\n epsg=26715, # meters, listed on spatialreference.org\n xll=xll, yll=yll, rotation=0)\n assert ms2.sr.model_length_units == 'meters'\n assert ms2.sr.length_multiplier == 1.\n ms2.sr.lenuni = 1 # feet; test dynamic setting\n assert ms2.sr.model_length_units == 'feet'\n check_size(ms2.sr)\n assert ms2.sr.length_multiplier == .3048\n ms2.sr.lenuni = 3 # centimeters\n assert ms2.sr.model_length_units == 'centimeters'\n check_size(ms2.sr)\n assert ms2.sr.length_multiplier == 0.01\n ms2.sr.lenuni = 2 # meters\n check_size(ms2.sr)\n ms2.sr.units = 'meters'\n ms2.sr.proj4_str = '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'\n assert ms2.sr.proj4_str == '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'\n assert ms2.sr.units == 'feet'\n assert ms2.sr.length_multiplier == 1/.3048\n check_size(ms2.sr)\n ms2.sr.epsg = 6610 # meters, not listed on spatialreference.org but understood by pyproj\n assert ms2.sr.units == 'meters'\n assert ms2.sr.proj4_str is not None\n check_size(ms2.sr)\n\ndef test_dynamic_xll_yll():\n nlay, nrow, ncol = 1, 10, 5\n delr, delc = 250, 500\n xll, yll = 286.80, 29.03\n # test scaling of length units\n ms2 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc)\n sr1 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xll=xll, yll=yll, rotation=30)\n xul, yul = sr1.xul, sr1.yul\n sr1.length_multiplier = 1.0 / 3.281\n assert sr1.xll == xll\n assert sr1.yll == yll\n sr2 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xul=xul, yul=yul, rotation=30)\n sr2.length_multiplier = 1.0 / 3.281\n assert sr2.xul == xul\n assert sr2.yul == yul\n\n # test resetting of attributes\n sr3 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xll=xll, yll=yll, rotation=30)\n # check that xul, yul and xll, yll are being recomputed\n sr3.xll += 10.\n sr3.yll += 21.\n assert np.abs(sr3.xul - (xul + 10.)) < 1e-6\n assert np.abs(sr3.yul - (yul + 21.)) < 1e-6\n sr4 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xul=xul, yul=yul, rotation=30)\n assert sr4.origin_loc == 'ul'\n sr4.xul += 10.\n sr4.yul += 21.\n assert np.abs(sr4.xll - (xll + 10.)) < 1e-6\n assert np.abs(sr4.yll - (yll + 21.)) < 1e-6\n sr4.rotation = 0.\n assert np.abs(sr4.xul - (xul + 10.)) < 1e-6 # these shouldn't move because ul has priority\n assert np.abs(sr4.yul - (yul + 21.)) < 1e-6\n assert np.abs(sr4.xll - sr4.xul) < 1e-6\n assert np.abs(sr4.yll - (sr4.yul - sr4.yedge[0])) < 1e-6\n sr4.xll = 0.\n sr4.yll = 10.\n assert sr4.origin_loc == 'll'\n assert sr4.xul == 0.\n assert sr4.yul == sr4.yedge[0] + 10.\n sr4.xul = xul\n sr4.yul = yul\n assert sr4.origin_loc == 'ul'\n sr4.rotation = 30.\n assert np.abs(sr4.xll - xll) < 1e-6\n assert np.abs(sr4.yll - yll) < 1e-6\n\n sr5 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xll=xll, yll=yll,\n rotation=0, epsg=26915)\n sr5.lenuni = 1\n assert sr5.length_multiplier == .3048\n assert sr5.yul == sr5.yll + sr5.yedge[0] * sr5.length_multiplier\n sr5.lenuni = 2\n assert sr5.length_multiplier == 1.\n assert sr5.yul == sr5.yll + sr5.yedge[0]\n sr5.proj4_str = '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'\n assert sr5.units == 'feet'\n assert sr5.length_multiplier == 1/.3048\n\ndef test_namfile_readwrite():\n nlay, nrow, ncol = 1, 30, 5\n delr, delc = 250, 500\n xll, yll = 272300, 5086000\n fm = flopy.modflow\n m = fm.Modflow(modelname='junk', model_ws=os.path.join('temp', 't007'))\n dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,\n delc=delc)\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array, lenuni=3,\n length_multiplier=.3048,\n xll=xll, yll=yll, rotation=30)\n\n # test reading and writing of SR information to namfile\n m.write_input()\n m2 = fm.Modflow.load('junk.nam', model_ws=os.path.join('temp', 't007'))\n assert abs(m2.sr.xll - xll) < 1e-2\n assert abs(m2.sr.yll - yll) < 1e-2\n assert m2.sr.rotation == 30\n assert abs(m2.sr.length_multiplier - .3048) < 1e-10\n\n model_ws = os.path.join(\"..\", \"examples\", \"data\", \"freyberg_multilayer_transient\")\n ml = flopy.modflow.Modflow.load(\"freyberg.nam\", model_ws=model_ws, verbose=False,\n check=False, exe_name=\"mfnwt\")\n assert ml.sr.xul == 619653\n assert ml.sr.yul == 3353277\n assert ml.sr.rotation == 15.\n\ndef test_read_usgs_model_reference():\n nlay, nrow, ncol = 1, 30, 5\n delr, delc = 250, 500\n #xll, yll = 272300, 5086000\n model_ws = os.path.join('temp', 't007')\n shutil.copy('../examples/data/usgs.model.reference', model_ws)\n fm = flopy.modflow\n m = fm.Modflow(modelname='junk', model_ws=model_ws)\n # feet and days\n dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,\n delc=delc, lenuni=1, itmuni=4)\n m.write_input()\n\n # test reading of SR information from usgs.model.reference\n m2 = fm.Modflow.load('junk.nam', model_ws=os.path.join('temp', 't007'))\n from flopy.utils.reference import SpatialReference\n d = SpatialReference.read_usgs_model_reference_file(os.path.join('temp', 't007', 'usgs.model.reference'))\n assert m2.sr.xul == d['xul']\n assert m2.sr.yul == d['yul']\n assert m2.sr.rotation == d['rotation']\n assert m2.sr.lenuni == d['lenuni']\n assert m2.sr.epsg == d['epsg']\n # have to delete this, otherwise it will mess up other tests\n if os.path.exists(os.path.join(tpth, 'usgs.model.reference')):\n os.remove(os.path.join(tpth, 'usgs.model.reference'))\n\n\ndef test_rotation():\n m = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(m, nlay=1, nrow=40, ncol=20,\n delr=250.,\n delc=250., top=10, botm=0)\n xul, yul = 500000, 2934000\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xul=xul, yul=yul, rotation=45.)\n xll, yll = m.sr.xll, m.sr.yll\n assert np.abs(m.dis.sr.xgrid[0, 0] - xul) < 1e-4\n assert np.abs(m.dis.sr.ygrid[0, 0] - yul) < 1e-4\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xul=xul, yul=yul, rotation=-45.)\n assert m.dis.sr.xgrid[0, 0] == xul\n assert m.dis.sr.ygrid[0, 0] == yul\n xll2, yll2 = m.sr.xll, m.sr.yll\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xll=xll2, yll=yll2, rotation=-45.)\n assert m.dis.sr.xgrid[0, 0] == xul\n assert m.dis.sr.ygrid[0, 0] == yul\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xll=xll, yll=yll, rotation=45.)\n assert m.dis.sr.xgrid[0, 0] == xul\n assert m.dis.sr.ygrid[0, 0] == yul\n\n\ndef test_sr_with_Map():\n import matplotlib.pyplot as plt\n m = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(m, nlay=1, nrow=40, ncol=20,\n delr=250.,\n delc=250., top=10, botm=0)\n # transformation assigned by arguments\n xul, yul, rotation = 500000., 2934000., 45.\n modelmap = flopy.plot.ModelMap(model=m, xul=xul, yul=yul,\n rotation=rotation)\n lc = modelmap.plot_grid()\n xll, yll = modelmap.sr.xll, modelmap.sr.yll\n plt.close()\n\n def check_vertices():\n xllp, yllp = lc._paths[0].vertices[0]\n xulp, yulp = lc._paths[0].vertices[1]\n assert np.abs(xllp - xll) < 1e-6\n assert np.abs(yllp - yll) < 1e-6\n assert np.abs(xulp - xul) < 1e-6\n assert np.abs(yulp - yul) < 1e-6\n\n check_vertices()\n\n modelmap = flopy.plot.ModelMap(model=m, xll=xll, yll=yll,\n rotation=rotation)\n lc = modelmap.plot_grid()\n check_vertices()\n plt.close()\n\n # transformation in m.sr\n sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xll=xll, yll=yll, rotation=rotation)\n m.sr = copy.deepcopy(sr)\n modelmap = flopy.plot.ModelMap(model=m)\n lc = modelmap.plot_grid()\n check_vertices()\n plt.close()\n\n # transformation assign from sr instance\n m.sr._reset()\n m.sr.set_spatialreference()\n modelmap = flopy.plot.ModelMap(model=m, sr=sr)\n lc = modelmap.plot_grid()\n check_vertices()\n plt.close()\n\n # test plotting of line with specification of xul, yul in Dis/Model Map\n mf = flopy.modflow.Modflow()\n\n # Model domain and grid definition\n dis = flopy.modflow.ModflowDis(mf, nlay=1, nrow=10, ncol=20, delr=1., delc=1., xul=100, yul=210)\n #fig, ax = plt.subplots()\n verts = [[101., 201.], [119., 209.]]\n modelxsect = flopy.plot.ModelCrossSection(model=mf, line={'line': verts},\n xul=mf.dis.sr.xul, yul=mf.dis.sr.yul)\n linecollection = modelxsect.plot_grid()\n plt.close()\n\n\ndef test_netcdf_classmethods():\n import os\n import flopy\n\n # Do not fail if netCDF4 not installed\n try:\n import netCDF4\n import pyproj\n except:\n return\n\n nam_file = \"freyberg.nam\"\n model_ws = os.path.join('..', 'examples', 'data',\n 'freyberg_multilayer_transient')\n ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,\n verbose=True, load_only=[])\n\n f = ml.export(os.path.join(npth, \"freyberg.nc\"))\n v1_set = set(f.nc.variables.keys())\n fnc = os.path.join(npth, \"freyberg.new.nc\")\n new_f = flopy.export.NetCdf.zeros_like(f, output_filename=fnc)\n v2_set = set(new_f.nc.variables.keys())\n diff = v1_set.symmetric_difference(v2_set)\n assert len(diff) == 0, str(diff)\n\n# def test_netcdf_overloads():\n# import os\n# import flopy\n# nam_file = \"freyberg.nam\"\n# model_ws = os.path.join('..', 'examples', 'data', 'freyberg_multilayer_transient')\n# ml = flopy.modflow.Modflow.load(nam_file,model_ws=model_ws,check=False,\n# verbose=False,load_only=[])\n#\n# f = ml.export(os.path.join(\"temp\",\"freyberg.nc\"))\n# fzero = flopy.export.NetCdf.zeros_like(f)\n# assert fzero.nc.variables[\"model_top\"][:].sum() == 0\n# print(f.nc.variables[\"model_top\"][0,:])\n# fplus1 = f + 1\n# assert fplus1.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] + 1\n# assert (f + fplus1).nc.variables[\"model_top\"][0,0] ==\\\n# f.nc.variables[\"model_top\"][0,0] + \\\n# fplus1.nc.variables[\"model_top\"][0,0]\n#\n# fminus1 = f - 1\n# assert fminus1.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] - 1\n# assert (f - fminus1).nc.variables[\"model_top\"][0,0]==\\\n# f.nc.variables[\"model_top\"][0,0] - \\\n# fminus1.nc.variables[\"model_top\"][0,0]\n#\n# ftimes2 = f * 2\n# assert ftimes2.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] * 2\n# assert (f * ftimes2).nc.variables[\"model_top\"][0,0] ==\\\n# f.nc.variables[\"model_top\"][0,0] * \\\n# ftimes2.nc.variables[\"model_top\"][0,0]\n#\n# fdiv2 = f / 2\n# assert fdiv2.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] / 2\n# assert (f / fdiv2).nc.variables[\"model_top\"][0,0] == \\\n# f.nc.variables[\"model_top\"][0,0] / \\\n# fdiv2.nc.variables[\"model_top\"][0,0]\n#\n# assert f.nc.variables[\"ibound\"][0,0,0] == 1\n\n\ndef test_shapefile_ibound():\n import os\n import flopy\n try:\n import shapefile\n except:\n return\n\n shape_name = os.path.join(spth, \"test.shp\")\n nam_file = \"freyberg.nam\"\n model_ws = os.path.join('..', 'examples', 'data',\n 'freyberg_multilayer_transient')\n ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,\n verbose=True, load_only=[])\n ml.export(shape_name)\n shp = shapefile.Reader(shape_name)\n field_names = [item[0] for item in shp.fields][1:]\n ib_idx = field_names.index(\"ibound_001\")\n txt = \"should be int instead of {0}\".format(type(shp.record(0)[ib_idx]))\n assert type(shp.record(0)[ib_idx]) == int, txt\n\n\ndef test_shapefile():\n for namfile in namfiles:\n yield export_shapefile, namfile\n return\n\ndef test_netcdf():\n for namfile in namfiles:\n yield export_netcdf, namfile\n\n return\n\n\ndef build_netcdf():\n for namfile in namfiles:\n export_netcdf(namfile)\n return\n\n\ndef build_sfr_netcdf():\n namfile = 'testsfr2.nam'\n export_netcdf(namfile)\n return\n\n\nif __name__ == '__main__':\n #test_shapefile()\n # test_shapefile_ibound()\n # test_netcdf_overloads()\n #test_netcdf_classmethods()\n # build_netcdf()\n # build_sfr_netcdf()\n #test_sr()\n #test_mbase_sr()\n #test_rotation()\n test_sr_with_Map()\n #test_sr_scaling()\n #test_read_usgs_model_reference()\n #test_dynamic_xll_yll()\n #test_namfile_readwrite()\n # test_free_format_flag()\n # test_export_output()\n #for namfile in namfiles:\n # for namfile in [\"fhb.nam\"]:\n # export_netcdf(namfile)\n #test_freyberg_export()\n pass\n","sub_path":"autotest/t007_test.py","file_name":"t007_test.py","file_ext":"py","file_size_in_byte":26204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"543692453","text":"import flopy as fp\nimport numpy as np\nimport geopandas as gp\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom flopy.utils.gridgen import Gridgen \nfrom flopy.utils.gridintersect import GridIntersect\nfrom flopy.utils import Raster\nimport shapely\nfrom shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon\nfrom shapely.strtree import STRtree \n\n#1\ndef gp2cellids (grid, gp, idomain, idomain_active=True, type = \"polygon\",layer=0,areas=3):\n \n \"\"\"\n this function extract the cellids of the intersection between a geopandas object and a grid \n grid : modelgrid with flopy.discretisation !\n gp : geopandas object with one entity only\n idomain : array, the idomain array to update it\n idomain_active : bool, if true the idomain is update (cells intersect by the gp will be noted as active), prevents some issues\n type : str, features type (polygon or line)\n layer : int, the layer on which is the gp\n areas : factor that determine if a cell is accounted as intersected or not based on the total area intersected\n (a value of 3, for example, means only cells which have 1/3 of their area intersected by the polygon will be taken into account)\n \"\"\"\n \n ix = GridIntersect(grid)\n if type == \"polygon\":\n result = ix.intersect(gp.geometry[0])\n result = result[result.areas>(np.nanmax(result.areas)/3)] # only take into account cells that have a least 1/3 intersected \n \n \n if type == \"boundary\" :\n result = ix.intersect(gp.geometry[0].boundary)\n \n if type == \"line\" :\n result = ix.intersect(gp.geometry[0])\n \n result = result[result.areas!=0] # fix bug with some null areas\n \n lst=[]\n for irow, icol in result.cellids:\n lst.append(((layer,irow,icol)))\n if idomain_active:\n idomain[irow*grid.ncol+icol] = 1\n return lst\n\n#2\ndef cellidBD(idomain, layer=0): \n \n \"\"\"\n extract the cellids at the boundary of the domain at a given layer\n idomain : 3D array, idomain array which determine if a cell is active or not (1 active, 0 inactive)\n layer : int, layer on which the boundary cells are extract\n \"\"\"\n lst_cellBD=[]\n\n for irow in range(idomain.shape[1]):\n for icol in range(idomain.shape[2]):\n if idomain[layer][irow,icol]==1:\n #check neighbours\n if np.sum(idomain[layer][irow-1:irow+2,icol-1:icol+2]==1) < 8:\n lst_cellBD.append((layer,irow,icol))\n return lst_cellBD\n\n\n#3 get functions\ndef get_heads(model_name,workspace,obj=False):\n \"\"\"\n Function that returns the heads from the headfile\n model_name : str, the name of the current model\n workspace : str, the path to workspace (where output files are stored)\n obj : bool, if we want to retrieve the head object rather than the computed heads for the last stress period\n \"\"\"\n headfile = '{}.hds'.format(model_name)\n fname = os.path.join(workspace,headfile) \n hdobj = fp.utils.HeadFile(fname, precision='double') \n head = hdobj.get_data()\n \n if obj:\n return hdobj\n else:\n return head\n\ndef get_spdis(model_name,workspace):\n \"\"\"\n Function that returns the specific discharge from the cbcfile\n \"\"\"\n spdfile = '{}.cbc'.format(model_name)\n fname = os.path.join(workspace,spdfile) \n spdobj = fp.utils.CellBudgetFile(fname, precision='double') \n spd = spdobj.get_data(text=\"SPDIS\")\n return spd\n\ndef get_cbc(model_name,workspace):\n cbcfile = '{}.cbc'.format(model_name)\n fname = os.path.join(workspace,cbcfile) \n cbcobj = fp.utils.CellBudgetFile(fname, precision='double') \n return cbcobj\n\ndef get_budgetobj(model_name,workspace):\n \"\"\"\n Function that returns the budget file as an object\n \"\"\"\n lstBudgetfile = \"{}.lst\".format(model_name)\n fname = os.path.join(workspace,lstBudgetfile)\n Budgetobj = fp.utils.Mf6ListBudget(fname)\n return Bugetobj\n\n\n#4\ndef inter_lst (lst1,lst2,typ = \"intersection\"):\n \n \"\"\"\n return the intersection/unique values of the list1 compared to list2\n lst1 and lst2 : list\n typ : type of comparison (intersection or unique)\n \"\"\"\n \n if typ == \"intersection\":\n return [i for i in lst1 if i in lst2]\n if typ == \"unique\":\n return [i for i in lst1 if i not in lst2]\n\n#5 \ndef import_riv(grid,gp,lst_domain):\n \n \"\"\"\n This function extract infos about a river (geopandas object, LINESTRING),cellids + lengths of in each cells in the right order. \n Format : \n \n grid : from the gwf model, gwf.modelgrid for ex. or flopy.discretisation)\n gp : a geopandas object containing a single Linestring(which can have multiple segements however)\n lst_domain : list of all active cells\n\n Return a dataframe containing these datas, post-processing necessary to remove cells that are already counted as BC in the model\n \"\"\"\n \n nlay = np.max(np.array(lst_domain)[:,0])+1 #nlay\n \n ix = GridIntersect(grid)\n coord_riv=[]\n for x,y in zip(gp.geometry[0].xy[0],gp.geometry[0].xy[1]): # extract river coord\n coord_riv.append((x,y))\n\n verti=[]\n df_tot_ord = pd.DataFrame() # empty DF\n for i in range(len(coord_riv)):\n if i < len(coord_riv)-1:\n lsi = LineString([coord_riv[i],coord_riv[i+1]]) # create the linestring btw point i and i+1\n res = ix.intersect(lsi) # do the intersection\n res = res[res[\"lengths\"]!=0] # remove a bug issue on Linux with lengths == 0\n cellids = res.cellids # extract cellids (row,col, only)\n\n if len(cellids)>1: # if more than one cells is intersected --> we need to order them\n\n dirx = coord_riv[i+1][0]-coord_riv[i][0] # variation of x (to know if the segment go to right or left)\n\n for x,y in res.vertices: \n verti.append(x)\n vertix = np.array(verti)[:,0] # extract the 1st vertice of the intersections in order to organize the cells\n df = pd.DataFrame({\"cellids\":cellids,\"vertix\":vertix,\"lengths\":res.lengths}) # create a temp DF to order\n verti=[]\n\n #organize the cells given the direction\n if dirx > 0:\n df.sort_values(by=[\"vertix\"],ascending=True,inplace=True) \n if dirx < 0:\n df.sort_values(by=[\"vertix\"],ascending=False,inplace=True) \n\n # append these data in a big DF\n df_tot_ord = df_tot_ord.append(df,sort=True).drop([\"vertix\"],axis=1)\n\n else : # if only one cell is intersected by the linestring\n df_tot_ord = df_tot_ord.append(pd.DataFrame({\"cellids\":cellids,\"lengths\":res.lengths}))\n\n df_riv = df_tot_ord.groupby([\"cellids\"],sort=False).sum() # regroup river within the same cells and sum the lengths\n\n # retrieve data\n lst_len_Riv = df_riv[\"lengths\"].values\n\n # attribute on which layer these cells are active\n cellids_Riv=[]; # list of all the cells intersected by the river\n cellids = df_riv.index\n for irow,icol in cellids:\n for layer in range(nlay):\n cell = (layer,irow,icol)\n if cell in lst_domain: #attribute the river to the uppermost active cell\n break\n if cell not in cellids_Riv:\n cellids_Riv.append(cell)\n\n df_riv = pd.DataFrame({\"cellids\":cellids_Riv,\"lengths\":lst_len_Riv}) \n return df_riv\n\n \n#6\ndef Complete_riv(riv_path,stations_csv,us,ds,lst_chd,lst_domain,grid):\n \n \"\"\"\n a complete function that import a river and return the stress data.\n the river path, the station path and the upstream and downstream head as number of layers must be provided\n \n riv_path : the path to the shapefile of the river (one linestring only)\n stations_csv : path to the csv file containing the infos about the stations (x,y,elevation)\n lst_chd : a list of every cells constant heads\n lst_domain : a list of each active cell\n grid : grid of the model\n \"\"\"\n \n BC_riv = gp.read_file(riv_path) # read shp, linestring from ups to dws\n df_riv = import_riv(grid,BC_riv,lst_domain) # extract cellids intersected + lengths in each cells\n df_riv[\"xc\"],df_riv[\"yc\"] = get_cellcenters(grid,df_riv.cellids)\n df_riv[\"head\"] = np.zeros([df_riv.shape[0]]) # create a new column for the heads\n\n # us and ds heads\n df_riv.loc[0,\"head\"] = us\n df_riv.loc[df_riv.index[-1],\"head\"] = ds\n \n # ref points and assignement of heads\n riv_stations = pd.read_csv(stations_csv,sep=\";\")\n for i in riv_stations.index:\n xs = riv_stations.loc[i].x\n ys = riv_stations.loc[i].y\n elev = riv_stations.loc[i].elev\n dist = ((df_riv[\"xc\"] - xs)**2 + (df_riv[\"yc\"] - ys)**2)**0.5\n df_riv.loc[dist==np.min(dist),\"head\"] = elev\n\n # interpolation of the heads btw ups,stations and ds\n # linInt_Dfcol(df_riv,col=\"head\")\n \n # length cumulated\n lcm=0\n l_cum=[]\n for l in df_riv.lengths:\n lcm += l/2\n l_cum.append(lcm)\n lcm += l/2\n df_riv[\"l_cum\"] = l_cum\n \n # linear interp (0 as a null value)\n yp = df_riv[\"head\"][df_riv[\"head\"]!=0]\n xp = df_riv[\"l_cum\"][df_riv[\"head\"]!=0]\n df_riv[\"head\"] = np.interp(df_riv[\"l_cum\"],xp,yp)\n \n # drop cells outside domain or already chd\n for cellid in df_riv.cellids:\n if (cellid in lst_chd) | (cellid not in lst_domain): \n df_riv = df_riv.drop(df_riv[df_riv[\"cellids\"] == cellid].index)\n \n # create the stress package\n df_riv= df_riv.reset_index()\n H_riv = df_riv[\"head\"]\n riv_chd=[]; o =-1;\n for x in df_riv.cellids:\n o = o + 1\n riv_chd.append((x,H_riv[o]))\n lst_chd.append(x) # update chd list\n return riv_chd\n\n\n#7\ndef get_cellcenters (grid,cellids): \n \"\"\"\n This function return the x and y coordinates of a given cellid and a grid (dis only)\n \"\"\"\n xc=[];yc=[]\n \n for i,j,k in cellids:\n xc.append(grid.xcellcenters[j,k])\n yc.append(grid.ycellcenters[j,k])\n return xc,yc\n \n#8\ndef ra_pack(pack,ibd,iper=0,value=-1):\n \n \"\"\"\n Return an array containing position of cells from a certain package\n Can be used to plot the bc zones of a certain package (pack)\n pack : a bc package which possess a stress_period_data attribute\n ibd : 3D array on which the value will be change \n iper : int, stress period\n value : int, value of replacement in ibd\n \"\"\"\n \n ra = pack.stress_period_data.get_data(key=iper)\n for k, i, j in ra['cellid']:\n ibd[k, i, j] = value \n\n#9\ndef importControlPz (file_path,grid,sheetName=\"1990\",np_col = \"NP\",x_col=\"x\",y_col=\"y\"):\n \n \"\"\"\n For 2D models ! \n return an array (nrow,ncol) containing infos about pz observations in control pz\n file_path : the file path to the excel sheet\n grid : modelgrid (flopy.discretization.structuredgrid object)\n sheetName : the name of the data sheet \n np_col : the name of the column in the file containing infos about the PL\n x_col,y_col : the name of the columns containings geo infos (x and y coordinates)\n \"\"\"\n \n DB = pd.read_excel(file_path,sheet_name = sheetName) # read the file with pandas\n \n Control_pz = np.zeros([grid.nrow,grid.ncol]) #ini list\n lstIDpz=[];Pz = [];\n \n for o in np.arange(DB.shape[0]): # loop to iterate through the data and returns the intersected cellids\n xc = DB[\"x\"][o]\n yc = DB[\"y\"][o] \n cellid = grid.intersect(xc,yc)\n \n if DB[np_col][o]: # check that a head data is available\n lstIDpz.append(cellid) # list of cellids\n Pz.append(DB[np_col][o]) # list of value\n \n df = pd.DataFrame()\n df[\"cellid\"]=lstIDpz\n df[\"Pz\"] = Pz\n df = df.groupby([\"cellid\"]).mean().reset_index() # regroup pz in the same cells and apply mean\n \n #create the obs array\n for i in df.index:\n j,k = df.loc[i,\"cellid\"] #extract cellids\n Control_pz[j,k] = df.loc[i,\"Pz\"] # change pz value\n \n return Control_pz\n\n#10\ndef importWells(GDB,grid,lst_domain,fac=1/365/86400,V_col=\"V Bancaris\",layer=0):\n \n \"\"\"\n 2D only !\n extract the infos about the uptake of wells\n path : path to the shp (multi points required)\n grid : the modelgrid\n fac : the factor to apply on the Volume to get m3/s\n V_col : the column name containing info about Volume\n layer : the layer on which the wells are active\n \"\"\"\n \n\n stress_data_well=[]\n ix = GridIntersect(grid)\n\n for o in GDB.index:\n Vw = GDB[V_col][o]\n if not (np.isnan(Vw)) | (Vw == 0):\n try:\n cellidx = ix.intersect(GDB.geometry[o]).cellids[0][0]\n cellidy = ix.intersect(GDB.geometry[o]).cellids[0][1]\n cellid = (layer,cellidx,cellidy)\n if cellid in lst_domain:\n stress_data_well.append((cellid,-fac*Vw))\n except:\n pass\n return stress_data_well\n\n#11\ndef coor_convert(x,y,epsgin,epsgout):\n \n \"\"\"\n Function that converts coordinates\n x,y : coordinates from epsgin\n epsgin : actual epsg system \n epsgout : the epsg goal\n \"\"\"\n \n from pyproj import Proj, transform\n inproj = Proj(init=\"epsg:{}\".format(epsgin))\n outproj = Proj(init=\"epsg:{}\".format(epsgout))\n xp,yp = transform(inproj,outproj,x,y)\n return xp,yp\n\n#12\ndef chd2riv(riv_chd,cond,rdepth,stage_var=1):\n \n \"\"\"\n Transform a chd stress period data into a riv stress period data\n riv_chd : list, chd spd (cellid,stage)\n cond : float, conducance of the riverbed\n rdepth : float, depth of the river botom (from the stage)\n \"\"\"\n \n Riv=[]\n for cellid,stage in riv_chd:\n Riv.append((cellid,(stage-rdepth)+rdepth*stage_var,cond,stage-rdepth))\n riv_chd[:] = Riv\n\n#13\ndef nn2kij(n,nlay,nrow,ncol):\n \n \"\"\"\n from a node number to ilay,irow and icol (dis)\n \"\"\"\n \n return fp.utils.gridintersect.ModflowGridIndices.kij_from_nn0(n,nlay,nrow,ncol)\n\n#14\ndef get_Total_Budget(model_name,model_dir,kstpkper=(0,0)):\n\n \"\"\"\n Return a DF containing Budget data for the entire model by searching in the LST file. Budget should have been Printed in Output Control\n model_name : str, name of the model given in the gwf pack\n model_dir : str, path to workspace\n \"\"\"\n \n file = os.path.join(model_dir,\"{}.lst\".format(model_name)) \n with open(file) as f:\n doc = f.readlines()\n i=-1\n tmstp=0;sp=0;inf=0\n for ilin in doc: # iterate through lines\n i += 1 # idx line\n info=\"\"\n try:\n tmstp = int(ilin[52:58].split(\",\")[0])\n sp = int(ilin[73:-1])\n info = ilin[2:15]\n except:\n pass\n if (info == \"VOLUME BUDGET\") & (tmstp == kstpkper[0]+1) & (sp == kstpkper[1]+1): #if this line is encountered --> break\n break\n \n if i == len(doc):\n raise Exception (\"No Budget info found ! Check Output Control or stress period \")\n \n ###number of packages\n npack=0\n for o in range(1000):\n if doc[i+8+o]==\"\\n\":\n break\n npack += 1\n ###number of packages\n \n # retrieve data\n lst_val_IN =[]\n lst_val_OUT = []\n lst_nam_pak = []\n pak_type=[]\n for ipak in range(npack): # ipak --> line indice for a specific package\n ipak += 8 # packages begin 8 lines after i\n\n lst_nam_pak.append(doc[i+ipak][85:96].rstrip()) # Package name\n lst_val_IN.append(float(doc[i+ipak][63:80])) # value IN\n lst_val_OUT.append(float(doc[i+ipak+npack+5][63:80])) # Value OUT\n pak_type.append(doc[i+ipak][55:62]) # Package type\n\n Budget = pd.DataFrame({\"Pack\":lst_nam_pak,\n \"IN\":lst_val_IN,\n \"OUT\":lst_val_OUT,\n \"Type\":pak_type})\n\n return Budget\n\n#15\ndef arr2ascii(arr,filename,x0,y0,res,nodata=-9999):\n \n \"\"\"\n Create an ascii raster file from an array as a base. Left corner origin and resolution must be provided.\n arr : 2D numpy arr\n filename : the path/name for the new ascii file\n x0,y0 : left corner origin of the array\n res : Ascii resolution\n nodata : no data value\n \"\"\"\n \n ncol = arr.shape[1]\n nrow = arr.shape[0]\n with open(filename,\"w\") as file:\n file.write(\"ncols {}\\n\".format(ncol))\n file.write(\"nrows {}\\n\".format(nrow))\n file.write(\"xllcorner {}\\n\".format(x0))\n file.write(\"yllcorner {}\\n\".format(y0))\n file.write(\"cellsize {}\\n\".format(res))\n file.write(\"nodata_value {}\\n\".format(nodata))\n for irow in range(nrow):\n for icol in range(ncol):\n file.write(str(arr[irow,icol])+\" \")\n\n#16\ndef rspl_rast(rast_path,grid,band=1):\n \n \"\"\"\n Use the resample_to_grid method from flopy Raster. \n rast_path : path to the raster\n grid : modelgrid (gwf.modelgrid or flopy.discretisation)\n \"\"\"\n \n rast = Raster.load(rast_path)\n arr = rast.resample_to_grid(grid,band)\n return arr\n\n#17\ndef k_zones(k,z1,layer,kn,ix): \n \n \"\"\"\n Change value in a numpy 3D array location based on a certain zone (format: [(x1,y1),(x2,y2), ...])\n Design for update permeability array but can be used for any other purpose that imply modifying an array in a specific zone\n \n z1: list of tuples, zone (format: [(x1,y1),(x2,y2), ...])\n layer : list or int, layers on which to apply changes\n kn : float, the new value of k\n ix : gridintersect object --> ix = GridIntersect(grid) as grid the modelgrid\n \"\"\"\n \n poly = Polygon(z1)\n res = ix.intersect(poly)\n if type(layer) != int:\n for ilay in layer:\n for cellid in res.cellids:\n irow = cellid[0]\n icol = cellid[1]\n k[ilay,irow,icol] = kn \n \n elif type(layer) == int:\n for cellid in res.cellids:\n irow = cellid[0]\n icol = cellid[1]\n k[layer,irow,icol] = kn \n \n else :\n raise Exception (\"layer must be an int or a list of int\")\n \n#18\ndef liss_mob(arr,n,null_v = 0):\n \n \"\"\"\n Apply a moving average (with 2*n numbers) on 2D array.\n arr : 2D numpy array\n n : number of elements (in one of the four direction) to take into account for the moving average (n=2 --> average of a specific number will be calculated with the surroundings 5x5 elements)\n return a 2D array and replace null value by 0\n \"\"\"\n \n \n arr[arr==null_v]=None\n for irow in range(n,arr.shape[0]-n):\n for icol in range(n,arr.shape[1]-n):\n if not np.isnan(arr[irow,icol]):\n bloc = arr[irow-n:irow+n+1,icol-n:icol+n+1]\n arr[irow,icol] = np.nanmean(bloc)\n arr = np.nan_to_num(arr)\n return arr","sub_path":"codes_flopy/modules/Rouss.py","file_name":"Rouss.py","file_ext":"py","file_size_in_byte":18869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"530863365","text":"import numpy as np\nfrom keras.models import Input, Model\nfrom keras import backend as K\nimport tensorflow as tf\n\nfrom lights import Lights\nfrom camera import Camera\nfrom model import Geometry\nfrom warp import Warp\nfrom render import Render\n\nBATCH_SIZE = 10\nNUM_WARPS = 5\nnum_vertices = 100\nnum_faces = 150\nnum_lights = 1\n\n\nbase_model = Geometry(vertices=np.random.randn(num_vertices, 3), faces=np.random.randint(0, num_vertices, size=[num_faces, 3]))\nlights = Lights(positions=np.random.randn(num_lights, 3), intensities=np.random.randn(num_lights, 3))\ncamera = Camera(eye=np.random.randn(1, 3), center=np.random.randn(1, 3), world_up=np.random.randn(1, 3))\ntrans = Transform(batch_size=BATCH_SIZE)\n\nwarp_params = Input(shape=[NUM_WARPS, 1])\nwarped_vertices = Warp(num_warps=NUM_WARPS)([K.identity(base_model.vertices), warp_params])\nworld_coords = trans(warped_vertices)\ncolors = K.constant(np.random.randn(BATCH_SIZE, num_vertices, 3))\nrendered = Render(512, 512)([world_coords, base_model.faces],\n base_model.calculate_normals(world_coords),\n colors,\n [camera.eye, camera.center, camera.world_up],\n [lights.positions, lights.intensities])\n\n#model = Model(inputs=[warp_params], outputs=[renderer])\n\nsess = K.get_session()\nsess.run(tf.global_variables_initializer())\nsess.run([rendered], feed_dict={warp_params : np.random.randn(BATCH_SIZE, NUM_WARPS, 1)})\n\n","sub_path":"mesh_renderer/keras_api/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"253909529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 04 18:55:31 2018\n\n@author: Hardik Galiawala\n\"\"\"\n\nimport csv\nimport numpy as np\nimport re\n\n\n\ndictionary = np.array([[]])\n\nwith open('lexicon_easy.csv', 'rb') as textfile:\n sentiWordNet = csv.reader(textfile, delimiter=',')\n for i in sentiWordNet:\n dictionary = np.append(dictionary, i[0].lower())\n dictionary = np.append(dictionary, i[1])\n dictionary = dictionary.reshape(len(dictionary)/2,2)\n\nfinal_result = np.array([[]])\n\nwith open('tweetsUnencoded.csv', 'rb') as csvfile:\n tweet_unencoded = csv.reader(csvfile, delimiter=',')\n j = 0\n for tweet in tweet_unencoded:\n words = re.sub(\"[^\\w]\", \" \", tweet[-1]).split()\n score = np.array([])\n #print(words)\n for word in words:\n my_string=\"skewed#1 distorted#2\"\n pattern = re.compile(r''+word.lower())\n for k in range(len(dictionary)):\n if(pattern.findall(dictionary[k][0]) != []):\n score = np.append(score, dictionary[k][1])\n break\n \n final_result = np.append(final_result, tweet[3])\n final_result = np.append(final_result, (np.sum(score.astype(int))))\n final_result = final_result.reshape(len(final_result) / 2, 2)\n \n\nwith open('sentimentAnalysis.csv', 'wb') as outfile:\n \n writer = csv.writer(outfile)\n writer.writerow(['twitter tweet', 'sentiment', 'score'])\n for count in range(len(final_result)):\n \n sentiment = \"\"\n if (int(final_result[count][1]) > 0):\n sentiment = \"Positive\"\n elif(int(final_result[count][1]) < 0):\n sentiment = \"Negative\"\n else:\n sentiment = \"Neutral\"\n writer.writerow([final_result[count][0], sentiment, final_result[count][1]])\n","sub_path":"Lexicon_text_analysis/lexical_sentiment_analysis.py","file_name":"lexical_sentiment_analysis.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"105561066","text":"\r\nimport sly \r\n\r\n\r\nclass Lexer(sly.Lexer):\r\n\ttokens = {\r\n\t\t\tNUMBER,AND,BREAK,DO,ELSE,ELSEIF,END,FALSE,FOR,FUNCTION,\r\n IF,IN,LOCAL,NIL,NOT,OR,REPEAT,RETURN,THEN,TRUE,\r\n UNTIL,WHILE,NAME,STRING,EQ,NE,LE,GE,LT,GT,TDOT,APPEND,\r\n FUNCTION,LOCAL,CONCAT,UMINUS,VARARG,\r\n }\t\r\n\t\r\n\r\n\tliterals = \"+-*/%^#=(){}[];:,.><~_^|\"\r\n\r\n\r\n\tignore = ' \\t\\r'\r\n\tignore_newline = r'\\n+'\r\n \r\n\tdef ignore_newline(self, t):\r\n\t\tself.lineno += t.value.count('\\n')\r\n\r\n\t@_(r'0x[0-9a-fA-F]+',r'(\\d+\\.\\d*|\\d+)([eE][-+]?\\d+)?')\r\n\tdef NUMBER(self, t):\r\n\t\tif t.value.startswith('0x'):\r\n\t\t\tt.value = int(t.value[2:], 16)\r\n\t\telse: \r\n\t\t\tt.value = int(t.value)\r\n\t\treturn t\r\n\r\n #IDENTIFICADORES\r\n\tNAME = r'[a-zA-Z_][a-zA-Z0-9_]*'\r\n\tNAME[\"and\"]=\"AND\"\r\n\tNAME[\"break\"]=\"BREAK\"\r\n\tNAME[\"do\"]=\"DO\" \r\n\tNAME[\"else\"]=\"ELSE\"\r\n\tNAME[\"elseif\"]=\"ELSEIF\"\r\n\tNAME[\"end\"]=\"END\"\r\n\tNAME[\"false\"]=\"FALSE\"\r\n\tNAME[\"for\"]=\"FOR\"\r\n\tNAME[\"function\"]=\"FUNCTION\"\r\n\tNAME[\"if\"]=\"IF\"\r\n\tNAME[\"in\"]=\"IN\"\r\n\tNAME[\"local\"]=\"LOCAL\"\r\n\tNAME[\"nil\"]=\"NIL\"\r\n\tNAME[\"not\"]=\"NOT\"\r\n\tNAME[\"or\"]=\"OR\"\r\n\tNAME[\"repeat\"]=\"REPEAT\"\r\n\tNAME[\"return\"]=\"RETURN\"\r\n\tNAME[\"then\"]=\"THEN\"\r\n\tNAME[\"true\"]=\"TRUE\"\r\n\tNAME[\"until\"]=\"UNTIL\"\r\n\tNAME[\"while\"]=\"WHILE\"\r\n\t#NAME[\"function\"]=\"FUNCTION\"\r\n\tNAME[\"concat\"]=\"CONCAT\"\r\n\tNAME[\"vararg\"]=\"VARARG\"\r\n\r\n\t#cadenas\r\n\tSTRING = r'\".*\"'\r\n\t\r\n #operadores de comparacion\r\n\tEQ = r\"==\"\r\n\tNE = r\"~=\"\r\n\tLE = \"<=\"\r\n\tGE = \">=\"\r\n\tLT = \"<\"\r\n\tGT = \">\"\r\n\tTDOT = r'(\\.\\.\\.)'\r\n\tAPPEND = r'(\\.\\.)'\r\n\t\r\n\t\r\n\t@_(r'\\-\\-\\[\\[(.|\\n)*?\\]\\]')\r\n\tdef COMMENTCORCHETE(self , t) :\r\n\t\tself.lineno += len(t.value)\r\n\r\n\r\n\t@_(r'\\-\\-[^\\[].*')\r\n\tdef COMMENTLINEAL(self , t) :\r\n\t\tself.lineno += len(t.value)\r\n\r\n\t@_(r'\\-\\-\\[\\=+\\[(.|\\n)*?\\]\\=+\\]',)\r\n\tdef COMMENT(self , t) :\r\n\t\tcadena = t.value.replace('\\t','xx').replace('\\n','xx').replace(' ','x')\r\n\t\tcorIzquierda = 0\r\n\t\tigualIzquierda = 0 \r\n\t\tcorDerecha = 0\r\n\t\tigualDerecha = 0 \r\n\t\ti = 0\r\n\t\tj = len(cadena)-1\r\n\t\twhile corIzquierda < 2 or corDerecha < 2:\r\n\t\t\tif corIzquierda < 2:\r\n\t\t\t\tif cadena[i] == '[':\r\n\t\t\t\t\tcorIzquierda+=1\r\n\t\t\t\tif cadena[i] == '=':\r\n\t\t\t\t\tigualIzquierda += 1\r\n\t\t\t\ti+=1\r\n\r\n\t\t\t\tif cadena[j]== ']':\r\n\t\t\t\t\tcorDerecha+=1\r\n\t\t\t\tif cadena[j]=='=':\r\n\t\t\t\t\tigualDerecha+=1\r\n\t\t\t\tj-=1\r\n\t\tif igualDerecha == igualIzquierda:\r\n\t\t\tself.lineno += len(t.value)\r\n\t\telse:\r\n\t\t\tprint('*****catidad \"=\" inicio/final diferente %r , Linea %d' % (t.value, self.lineno))\r\n\t\t\tself.index +=1\r\n\r\n\r\n\t@_(r'\\-\\-\\[\\=+\\[(.|\\n)*?\\]',r'\\-\\-\\[\\=+\\[(.|\\n)*?',r'\\-\\-\\[\\[(.|\\n)*?\\]',r'\\-\\-\\[\\[(.|\\n)*?',\r\n\t\tr'\\-\\-\\[ +\\[(.|\\n)*?\\]\\]',r'\\-\\-\\[\\[(.|\\n)*?\\] +\\]',\r\n\t\tr'\\-\\-\\[(.|\\n)*?\\]\\]'\r\n\t\t)\r\n\tdef errorComentarios (self, t):\r\n\t\tprint('*****comentario mal cerrado %r , Linea %d' % (t.value, self.lineno))\r\n\t\tself.index += 1\r\n\r\n\tdef error(self, t):\r\n\t\tprint('***** Linea %d: Caracter ilegal %r' % (self.lineno, t.value[0]))\r\n\t\tself.index += 1\r\n \r\n","sub_path":"AnalizadorLua/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"273043194","text":"from Content.Beings import Player, Enemy\r\nfrom Content.Items.Weapons import Weapons, Swords, Wands, Staffs, Hammers, Daggers, BattleStaffs, Bows\r\nfrom Content.Items.Armor import Armor, HeadArmor, TorsoArmor, LegArmor\r\nfrom Content.Items import Items, Food, Tiers\r\nfrom Content.Towns import Towns, Shops\r\nfrom Content.Modifiers import Modifiers, WeaponModifiers\r\nfrom Content.GUIs import GUI, MainMenu, CharacterCreation, TownScreen, GUIIndexes, LevelUp, PlayerInventory\r\nfrom Content.GUIs.ShopGUIs import BlacksmithScreen, ChallengeScreen, StoreScreen\r\nfrom Content.Enemies import Enemies\r\nfrom Content.Dungeons import Dungeons, Rooms\r\nfrom math import *\r\nimport pickle\r\nimport os\r\nfrom os.path import dirname, abspath\r\nimport random\r\nd = dirname(abspath(__file__))\r\n\r\n\r\nclass Game(object):\r\n def __init__(self):\r\n self.guiHandler = GUI.GUIHandler(self)\r\n\r\n self.player = Player()\r\n\r\n self.level = 1\r\n\r\n self.towns = [Towns.Town(game=self, level=self.level)]\r\n self.currentTown = 0\r\n self.towns[0].newDungeon()\r\n #self.nextTown()\r\n\r\n self.g = 0\r\n\r\n self.guiHandler.start()\r\n\r\n def prevTown(self):\r\n self.currentTown -= 1\r\n\r\n def nextTown(self):\r\n if(self.towns[self.currentTown] == self.towns[-1]):\r\n self.level += random.uniform(0.3, 0.8)\r\n self.towns.append(Towns.Town(game=self, level=self.level))\r\n self.towns[-1].newDungeon()\r\n self.currentTown += 1\r\n\r\n def getCurrentTown(self):\r\n return self.towns[self.currentTown]\r\n\r\n def addXPToPlayer(self, amount):\r\n if(self.player.addXP(amount)):\r\n self.player.levelUp()\r\n self.guiHandler.swapGUI(GUIIndexes.LEVEL_UP)\r\n\r\n def save(self, event):\r\n if(event.char == 's'):\r\n gameFile = open(d + \"\\saves\\{}.player\".format(self.player.name), \"w+b\")\r\n self.g = self.guiHandler.unloadGUIs()\r\n pickle.dump(self, gameFile)\r\n self.guiHandler.loadGUIs(self.g)\r\n\r\n def load(self, newGame):\r\n self.guiHandler.currentGUI.destroy()\r\n self = newGame\r\n self.guiHandler.loadGUIs(self.g)\r\n\r\n def getSaveFiles(self):\r\n files = []\r\n for file in os.listdir(d + \"\\saves\"):\r\n if file.endswith(\".player\"):\r\n files.append(file)\r\n return d, files\r\n\r\ngame = Game()\r\n\r\n\r\n\"\"\"\r\nTESTING FUNCTIONS\r\n\"\"\"\r\ndef swordDamage():\r\n #---DAMAGE OF SWORD---\r\n s = Swords.WoodenSword()\r\n four = 0\r\n five = 0\r\n six = 0\r\n seven = 0\r\n\r\n\r\n values = [0]*100\r\n\r\n\r\n for i in range(100):\r\n damage = s.getDamage(Player())\r\n print(\"{}: {}\".format(i, damage))\r\n values[damage] += 1\r\n\r\n for l in range(0, len(values)):\r\n if(values[l] != 0):\r\n print(\"{}: {}\".format(l, values[l]))\r\n\r\n\r\ndef buffsOfSwords(weapon=Swords.StoneSword):\r\n #---BUFFS OF RANDOM SWORDS---\r\n for i in range(25):\r\n \tl = weapon()\r\n \tprint(\"Sword #: {}\\nstr: {}\\nagi: {}\\nint: {}\\n\".format(i, l.strBuff, l.agiBuff, l.intBuff))\r\n\r\n\r\ndef modifierStats():\r\n #---MODIFIERS OF RANDOM SWORDS---\r\n vals = [0]*len(WeaponModifiers.WEAPON_MODIFIERS)\r\n swords = 1000\r\n\r\n for p in range(swords):\r\n m = Swords.StoneSword()\r\n m.equipModifiers = []\r\n #CHANCES = PLAYERLEVEL//3\r\n m.generateModifiers(4)\r\n\r\n for mod in m.equipModifiers:\r\n vals[WeaponModifiers.WEAPON_MODIFIERS.index(mod)] += 1\r\n m.addModifiers()\r\n print(m.name)\r\n\r\n chanceForMod=0\r\n for l in range(len(WeaponModifiers.WEAPON_MODIFIERS)):\r\n total = sum(vals)\r\n percent = round(100*(vals[l]/total), 2)\r\n totalChance = round(100*(vals[l]/swords), 2)\r\n chanceForMod += totalChance\r\n print(\"{}: {} | {}% of modifiers, | {}% of all chances.\".format(WeaponModifiers.WEAPON_MODIFIERS[l].prefix, vals[l], percent, totalChance))\r\n print(\"Final chance for any modifier: {}\".format(round(chanceForMod,2)))\r\n\r\n\r\ndef randomModifierStats():\r\n for mod in WeaponModifiers.WEAPON_MODIFIERS:\r\n print(\"{}: Value: {} | Weight: {} | str: {} | agi: {} | int: {}\".format(mod.prefix, mod.valueBuff, mod.weightBuff, mod.strBuff, mod.agiBuff, mod.intBuff))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"576427331","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 22 11:22:12 2018\r\n\r\n@author: caenglish\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport scipy.stats\r\nfrom sklearn import linear_model\r\nimport numpy as np\r\n\r\n#Load proposal data all the way back to 2012\r\ndata=pd.read_csv('CEDAallData_proposals4.csv')\r\ndata.drop(['DATE','CSD','Multi_MeasID','MEASTYPE','LTR','RECTYPE'],inplace=True,axis=1)\r\n\r\n#Load information on how many voters of each party exist in each county\r\nvoter=pd.read_csv('CAcountry_voters.txt',sep='\\t')\r\ndata_bus_tax=data[data['RECTYPENAME']=='Business Tax']\r\n\r\n#Combine each data frame so D-R spread is placed according to the county of the vote\r\ndata_tax=pd.merge(data_bus_tax,voter, on='CNTYNAME', how='left')\r\n\r\n#Let's do a linear regression to see if there is a relationship between which party has more voters in the county and whether they are more likely to vote for tax hikes at the polls\r\nx=[];y=[]\r\nfor i in range(len(data_tax['DRspread'])):\r\n x.append([data_tax['DRspread'].iloc[i]])\r\n y.append((data_tax['Percent_sum'].iloc[i]-0.5)*100.0)\r\n\r\n \r\nlinear_regression = linear_model.LinearRegression()\r\nlinear_regression.fit(x,y)\r\n\r\n #Calculate correlation coefficients\r\ncorr_p=scipy.stats.pearsonr(data_tax['DRspread']/100.0, data_tax['Percent_sum'])\r\ncorr_s=scipy.stats.spearmanr(data_tax['DRspread']/100.0, data_tax['Percent_sum'])\r\nprint('Pearson CC: ',corr_p)\r\nprint('Spearman CC: ',corr_s)\r\n\r\n#Plot the relationship or lack thereof\r\nx_test=np.arange(-30,60,10).reshape(-1,1)\r\n\r\ny_predict=linear_regression.predict(x_test)\r\n\r\nplt.xlim(-30,50)\r\nplt.xlabel('Democrat Voter Advantage Margin[%]')\r\nplt.ylabel('Vote Success Margin[%]')\r\nplt.scatter(x,y)\r\nplt.plot(x_test,y_predict,linewidth=2)\r\nplt.savefig('BusinessTax_Scatter.png',dpi=500)","sub_path":"pol_analysis_business.py","file_name":"pol_analysis_business.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"4011870","text":"'''SudoSimu - Module techchrcc - Technique de résolution \"chiffre/rang-colonne\"\nlocale pour un certain chiffre et sur une certaine colonne de carrés.\n\nScript d'import dans techchrcc.py de fonctions et méthodes privées de la classe\nTechChRCcol. Il s'agit des fonctions qui gèrent les états d'avancement\nd'application de la technique.\n\nDernière mise à jour : 11/10/2017\nVérification de complétude des modifications -suppr-mem- et -split-,\nparallèlement à la mise à jour de techchrcg.py.\nComplément d'harmonisation du nommage pour _finish_apply()\n'''\n\n\n#imports des modules de la simulation\nif __name__ in (\"__main__\", \"techchrcc2\", \"techchrc.techchrcc2\"):\n import sudoui as ui\n import sudorules as rules\n from sudorules import Sudoku_Error\n from sudomemory import SudoMemory\n import sudogridview as gridview\n from sudotest import *\nelif __name__ == \"sudosimu.techchrc.techchrcc2\":\n from sudosimu import sudoui as ui\n from sudosimu import sudorules as rules\n from sudosimu.sudorules import Sudoku_Error\n from sudosimu.sudomemory import SudoMemory\n from sudosimu import sudogridview as gridview\n from sudosimu.sudotest import *\nelse:\n raise Exception(\"Impossible de faire les imports dans le module techchrcc2.\")\n\n\ndef _start_apply(self):\n '''Début de la résolution. Initialisation des informations en mémoire\n et lancement de la résolution des colonnes.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _start_apply()\")\n TEST.display(\"techchrccol\", 2, \"TechChRCcol : début de résolution.\")\n assert self._initOk\n mem = self._mem\n mem.memorize(\"techchrccol_encours\", True, self)\n mem.memorize(\"techchrccol_stepcol\", 0, self)\n \n #comptages\n mem.memorize(\"techchrccol_nbplccol\", 0, self)\n #étape #0\n r = self._solve_debut()\n return r\n \ndef _solve_debut(self):\n '''Résolution pour une colonne de carrés - Début, et demande de la 1ère\n observation : quelles sont les carrés de cette colonne qui ne contiennent\n pas le chiffre.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_debut()\")\n assert self._initOk\n mem = self._mem\n #se rappeler le chiffre à placer et dans quelle colonne de carrés\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqcol = mem.recall(\"techchrccol_isqcol\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_debut() - \"\\\n \"Début de résolution de la colonne n° {0}\".format(icol))\n #1ère étape : dans quels carrés est-ce que est absent\n obsPattern = (gridview.OBS_SQRSINSQRCOL_NOTCONTAIN, (isqcol, chiffre) )\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #memoriser l'avancement et la fonction pour l'opération suivante\n mem.memorize(\"techchrccol_stepcol\", 1, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite1, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 1\", self)\n #retourner en indiquant la demande d'observation\n TEST.display(\"techchrccol\", 3, \"_solve_debut() - Demande 1ère \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite1(self):\n '''Résolution pour une colonne (Col) - Retour 1ère observation = les\n carrés qui ne contiennent pas le chiffre traité.\n S'il y a 1 carré sans le chiffre, passe à l'observation suivante = quel\n est la colonne sur lequel le chiffre n'est pas.\n Dans les autre cas, fin avec succès ou échec.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite1()\")\n assert self._initOk\n mem = self._mem\n #se rappeler le chiffre à placer et dans quelle colonne de carrés\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() - Colonne de carrés \"\\\n \"n° {0} : retour 1ère observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser\n #c'est un tuple de nombre et liste de carrés\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbSqr, listSqr) = found\n \n #si tous les carrés de la colonne ont le chiffre, il n'y a rien d'autre\n #à faire et la technique est terminée\n if nbSqr <= 0:\n TEST.display(\"techchrccol\", 3, \"_solve_suite1 - Pas de carré à \"\\\n \"remplir, fin de la technique locale avec succès.\")\n r = self._solve_fin(\"noplace\")\n \n #s'il y a 2 ou 3 carrés, on ne le fait pas pour le moment (complexité)\n #donc le résultat de la technique = abandon\n elif nbSqr > 1:\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() - Plus d'1 carré à \"\\\n \"remplir, trop complexe. La technique locale est \"\\\n \"abandonnée.\")\n r = self._solve_fin(\"quit\")\n\n #1 carré où le chiffre manque => la technique continue\n #algorithme : on cherche maintenant une seule colonne où le chiffre manque\n else: #sbSqr==1\n isqr = listSqr[0] #le carré en question\n TEST.display(\"techchrccol\", 3, \"_solve_suite1 - 1 carré à \"\\\n \"remplir, la technique passe à l'étape suivante.\")\n #observation suivante : dans quelles colonnes de ce carré est-ce que\n # n'est pas ?\n obsPattern = (gridview.OBS_COLSBYSQR_NOTCONTAIN, (isqr, chiffre))\n #mémoriser les informations pour l'étape suivante\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n mem.memorize(\"techchrccol_isqr\", isqr, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #état d'avancement et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_stepcol\", 2, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite2, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 2\", self)\n #retourner en indiquant la demande d'observation à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() : demande de 2ème \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite2(self):\n '''Résolution pour une colonne (Col) - Retour 2ème observation = la\n colonne où le chiffre n'est pas. Il y en a forcément exactement 1.\n 3ème observation à faire = les rangs de ce carré où le chiffre\n n'est pas.\n La seule cause possible d'échec est un fail mémoire, en particulier\n un fail de mémoire du résultat d'observation.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite2()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqr = mem.recall(\"techchrccol_isqr\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() - Colonne de carrés \"\\\n \"n° {0} : retour 2ème observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser\n #c'est un tuple de nombre et liste de colonnes\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbCol, listCol) = found\n #détection d'erreur : il doit y avoir une seule colonne\n if not nbCol == 1:\n raise(Sudoku_Error, \"Erreur d'observation dans TechChRCcol\"\\\n \"._solve_suite2(), nombre de colonnes invalide.\")\n colmiss = listCol[0]\n TEST.display(\"techchrccol\", 3, \"_solve_suite2() - Le chiffre {0}\"\\\n .format(chiffre) + \"n'est pas dans la colonne {0}.\"\\\n .format(colmiss))\n \n #algorithme : on cherche maintenant dans quelles rangs du même carré\n #le chiffre n'est pas\n obsPattern = (gridview.OBS_ROWSBYSQR_NOTCONTAIN, (isqr, chiffre))\n r = (\"observe\", obsPattern)\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n mem.memorize(\"techchrccol_colmiss\", colmiss, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #avancement de la technique et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_stepcol\", 3, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite3, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 3\", self)\n #retourner en indiquant la demande d'observation à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite2() : demande de 3ème \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite3(self):\n '''Résolution pour une colonne (Col) - Retour 3ème observation = les\n rangs du carré où le chiffre n'est pas. Il y en a forcément au moins\n un (puisqu'il y a un chiffre à mettre dans ce carré.\n 4ème observation à faire = les cases vides à l'intersection des rangs et\n de la colonne sans le chiffre.\n La seule cause possible d'échec est un fail mémoire.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite3()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqr = mem.recall(\"techchrccol_isqr\", self)\n colmiss = mem.recall(\"techchrccol_colmiss\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite3() - Colonne de carrés \"\\\n \"n° {0} : retour 3ème observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser\n #c'est un tuple de nombre et liste de rangs\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbRow, listRow) = found\n TEST.display(\"techchrccol\", 3, \"_solve_suite3() - Le chiffre {0}\"\\\n .format(chiffre) + \"est absent des {0} rang(s) : {1}.\"\\\n .format(nbRow, listRow))\n \n #algorithme : on cherche maintenant les cases vides à l'intersection\n #des rangs et de la colonne libres\n argRowCol =(listRow, (colmiss,))\n obsPattern = (gridview.OBS_EMPTYPLACES_RC, argRowCol)\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_rowsmiss\", listRow, self)\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #avancement de la technique et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_stepcol\", 4, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite4, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 4\", self)\n #retourner en indiquant la demande d'observation à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite3() - Demande 4ème \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite4(self):\n '''Résolution pour une colonne (Col) - Retour 4ème observation = les\n cases vides à l'intersection des lignes et colonnes disponibles.\n S'il y en a une seule, faire le placement. S'il y en a plusieurs, on\n considère que d'est trop complexe -> fin de la technique\n La seule cause possible d'échec est un fail mémoire.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite4()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqr = mem.recall(\"techchrccol_isqr\", self)\n colmiss = mem.recall(\"techchrccol_colmiss\", self)\n colsmiss = mem.recall(\"techchrccol_colsmiss\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Colonne de carrés \"\\\n \"n° {0} : retour 4ème observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser.\n #c'est un tuple de nombre et liste de cases\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbPlc, listPlc) = found\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Les cases {0}\"\\\n .format(listPlc) + \"sont disponibles pour placer le {0}.\"\\\n .format(chiffre))\n #algorithme : il doit y a au moins une case libre, s'il y a une seule\n #c'est celle ou se fait le placement\n if nbPlc < 1:\n raise(Sudoku_Error, \"Erreur d'observation dans TechChRCcol\"\\\n \"._solve_suite4(), nombre de cases libres invalide.\")\n #s'il y en a plus d'une, trop complexe pour le moment -> abandon\n elif nbPlc > 1:\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Plus d'1 case à \"\\\n \"remplir, trop complexe. La technique locale est \"\\\n \"abandonnée.\")\n r = self._solve_fin(\"quit\")\n # ok 1 case, on fait le placement\n else:\n placement = (listPlc[0][0], listPlc[0][1], chiffre)\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_availplc\", listPlc, self)\n mem.memorize(\"techchrccol_result\", \"place\", self)\n mem.memorize(\"techchrccol_placement\", placement, self)\n #avancement de la technique et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_stepcol\", 5, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite5, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 5\", self)\n #retourner en indiquant la demande de placement à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Demande de \"\\\n \"placement de {0} en {1}\".format(chiffre, listPlc[0]))\n r = (\"place\", placement)\n return r\n\ndef _solve_suite5(self):\n '''Résolution pour une colonne (Col) - Retour de placement.\n Vérifier que le placement a été correct et terminer la technique.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite5()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n placement = mem.recall(\"techchrccol_placement\", self)\n nbplc = mem.recall(\"techchrccol_nbplccol\", self)\n #vérifier que le placement a été bien réalisé. Sinon c'est une erreur\n #de mémoire, d'algorithme ou de cohérence de la grille.\n plcValid = mem.recall(\"techchrccol_placeok\", self)\n if plcValid is not True:\n raise Sudoku_Error (\"Erreur de placement dans TechChRCcol\"\\\n \"._solve_suite5(), le placement a échoué.\")\n #incrémenter le compteur de placement puis passer à la fin\n mem.increment(\"techchrccol_nbplccol\", 1, self)\n (row, col) = (placement[0], placement[1])\n TEST.display(\"techchrccol\", 3, \"_solve_suite5() - Le placement \"\\\n \"de {0} en {1} est validé.\".format(chiffre, (row, col)))\n #fin de la technique avec succès\n r = self._solve_fin(\"succeed\")\n return r\n\ndef _solve_fin(self, endResult=\"end\"):\n \"\"\"A la fin des étapes de résolution, commande la fin de la technique\n et construit la réponse à retourner à apply() - et donc à la fonction\n appelante du programme.\n \"\"\"\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_fin()\")\n assert self._initOk\n mem = self._mem\n nbplccol = mem.recall(\"techchrccol_nbplccol\", self)\n TEST.display(\"techchrccol\", 2, \\\n \"Fin de la technique \\\"Chiffre-rang-colonne\\\" sur une \"\\\n \"colonne.\\nNombre de chiffres placés : {0}\" \\\n .format(nbplccol))\n #mettre à jour les données d'avancement\n self._finish_apply()\n #construire le tuple de détail de résultats\n endDetails = (endResult, nbplccol)\n TEST.display(\"techlplccol\", 2, \"La technique se termine avec le \"\\\n \"résultat : '{0}'\".format(endDetails))\n #retour à SudoThinking\n return (\"end\", endDetails)\n\ndef _finish_apply(self):\n '''Marque la technique comme terminée. Il faudra appeler 'reset()'\n pour la relancer.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _finish_tech()\")\n assert self._initOk\n mem = self._mem\n mem.memorize(\"techchrccol_chiffre\", None, self)\n mem.memorize(\"techchrccol_icol\", None, self)\n mem.memorize(\"techchrccol_isqcol\", None, self)\n mem.memorize(\"techchrccol_finished\", True, self)\n mem.memorize(\"techchrccol_encours\", False, self)\n self._finished = True\n return \n","sub_path":"sudosimu/techchrc/techchrcc2.py","file_name":"techchrcc2.py","file_ext":"py","file_size_in_byte":16953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"313372608","text":"# Copyright (C) 2014 Glamping Hub (https://glampinghub.com)\n# License: BSD 3-Clause\n\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import activate, get_language\n\nfrom painlessseo import settings\n\n\nclass SeoMetadata(models.Model):\n content_type = models.ForeignKey(ContentType, null=True, blank=True)\n object_id = models.PositiveIntegerField(null=True, blank=True)\n content_object = generic.GenericForeignKey('content_type', 'object_id')\n path = models.CharField(verbose_name=_('Path'), max_length=200, db_index=True,\n help_text=_(\"This should be an absolute path, excluding the domain name. Example: '/foo/bar/'.\"))\n lang_code = models.CharField(verbose_name=_('Language'), max_length=2,\n choices=settings.SEO_LANGUAGES,\n default=settings.DEFAULT_LANG_CODE)\n title = models.CharField(verbose_name=_('Title'), max_length=68, blank=True)\n description = models.CharField(verbose_name=_('Description'), max_length=155, blank=True)\n override_path = models.BooleanField(default=False)\n\n class Meta:\n verbose_name = _('SEO metadata')\n verbose_name_plural = _('SEO metadata')\n db_table = 'seo_metadata'\n unique_together = (('path', 'lang_code'), )\n ordering = ('path', 'lang_code')\n\n def __unicode__(self):\n return \"Language: %s | URL: %s\" % (self.lang_code, self.path)\n\n def save(self, *args, **kwargs):\n if kwargs.pop('override_path', None):\n self.override_path = True\n else:\n try:\n old_seo = SeoMetadata.objects.get(pk=self.pk)\n if old_seo.override_path:\n self.path = old_seo.path\n except SeoMetadata.DoesNotExist:\n pass\n super(SeoMetadata, self).save(*args, **kwargs)\n\n\ndef update_seo(sender, instance, **kwargs):\n active_lang = get_language()\n if hasattr(instance, 'get_current_language') and callable(instance.get_current_language):\n active_lang = instance.get_current_language()\n for lang_code, lang_name in settings.SEO_LANGUAGES:\n if active_lang == lang_code:\n activate(lang_code)\n try:\n sm = SeoMetadata.objects.get(content_type=ContentType.objects.get_for_model(instance),\n object_id=instance.id, lang_code=lang_code)\n if instance.get_absolute_url() != sm.path:\n sm.path = instance.get_absolute_url()\n except SeoMetadata.DoesNotExist:\n sm = SeoMetadata(lang_code=lang_code, content_object=instance, path=instance.get_absolute_url())\n sm.save(override_path=True)\n activate(active_lang)\n\n\ndef delete_seo(sender, instance, **kwargs):\n ctype = ContentType.objects.get_for_model(instance)\n for sm in SeoMetadata.objects.filter(content_type=ctype, object_id=instance.id):\n sm.delete()\n\n\ndef register_seo_signals():\n for app, model in settings.SEO_MODELS:\n ctype = ContentType.objects.get(app_label=app, model=model)\n if not hasattr(ctype.model_class(), 'get_absolute_url'):\n raise ImproperlyConfigured(\"Needed get_absolute_url method not defined on %s.%s model.\" % (app, model))\n models.signals.post_save.connect(update_seo, sender=ctype.model_class(), weak=False)\n models.signals.pre_delete.connect(delete_seo, sender=ctype.model_class(), weak=False)\n","sub_path":"painlessseo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"407126015","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 17 15:17:37 2016\n\n@author: MichaelEK\n\"\"\"\n\nfrom pandas import read_table, DataFrame, concat, merge, Timedelta, datetime, to_datetime, DateOffset, date_range, Timestamp, read_csv, to_numeric\nfrom misc_functions_v01 import printf\nfrom WA_analysis_fun import stream_nat\nfrom linear_reg_v02 import lin_reg\nfrom seaborn import regplot\n\n########################################\n### Parameters\n\nusage_path = 'C:/ecan/base_data/usage/usage_takes_mon_series_all_est_SD_with_cav.csv'\nflow_path = 'C:/ecan/Projects/otop/flow/otop_flow_recorders.CSV'\nwap_num_path = 'C:/ecan/Projects/Pareora/analysis/usage/wap_sites/'\n\nflow_site_name = 70103\nwap_sites = [70103, 70105]\n\nexport = False\n\nexport_mon_path = 'C:/ecan/Projects/Pareora/analysis/data/nat_mon_vol_70103.csv'\nexport_nat_path = 'C:/ecan/Projects/Pareora/analysis/data/nat_flow_70103.csv'\n\n########################################\n### Run naturalization function\n\nnat_mon_vol, nat_flow = stream_nat(usage_path, flow_path, wap_num_path, flow_site_name, wap_sites, export=export, export_mon_path=export_mon_path, export_nat_path=export_nat_path)\n\n#######################################\n### Testing section\n\nboth2 = concat([flow4, new_flow], axis=1)\n\nboth2.plot(ylim=[0, 5])\n\nnat_flow.plot(ylim=[0, 5])\n\nhuts = nat_flow\nlower = nat_flow\n\nboth3 = concat([huts, lower], axis=1)\n\nboth3.plot(ylim=[0, 5])\n\nlin_reg(nat_mon_vol[70103], nat_mon_vol['nat'], log_axis=True)[0]\nlin_reg(nat_mon_vol[70103], nat_mon_vol['sd_usage_est'])[0]\nreg1 = lin_reg(nat_mon_vol[70103], nat_mon_vol['nat'])[0]\n\n\nregplot(nat_mon_vol[70103], nat_mon_vol['nat'], truncate=True)\n\nx = nat_mon_vol[70103]\ny = nat_mon_vol['nat']\n\nnat_mon_vol['nat_est2'] = nat_mon_vol[70103] * reg1['Slope'][0] + reg1['Intercept'][0]\n\nnat_mon_vol['nat_est2_ratio'] = (nat_mon_vol['nat_est2'] -nat_mon_vol['nat'])/nat_mon_vol['nat']\n\n\nlow1 = nat_mon_vol[nat_mon_vol[70103] < 2000000]\nlin_reg(low1[70103], low1['nat'])[0]\n\n\net_nat = concat([nat_mon_vol['sd_usage_est'], et_mon], axis=1, join='inner')\net_nat = concat([nat_mon_vol['sd_usage_est'], dem_mon], axis=1, join='inner')\n\nlin_reg(et_nat[0], et_nat['sd_usage_est'])\n\nregplot(et_nat[0], et_nat['sd_usage_est'])\n\nregplot(log(et_nat[0]), log(et_nat['sd_usage_est']))\n\n\n\n\n\n\n\n","sub_path":"python_scripts/hydro/naturalization.py","file_name":"naturalization.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"243317006","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 30 10:58:44 2019\n\n@author: riteshsharma\n\"\"\"\nimport bcrypt as bc\nimport hashlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\ndef numberTwo(t):\n F1 = open(\"D1.txt\", \"r\")\n F2 = open(\"D2.txt\", \"r\")\n \n \n data1 = F1.readlines()\n data2 = F2.readlines()\n \n wholeString1 = list()\n wholeString2 = list()\n \n kGramsStorage1 = set()\n kGramsStorage2 = set()\n \n for text1 in data1:\n wholeString1 += text1\n \n for text2 in data2:\n wholeString2 += text2\n \n\n for j in range(0, len(wholeString1) - 2):\n kGramsStorage1.add(wholeString1[j] + wholeString1[j+1] + wholeString1[j+2])\n \n for k in range(0, len(wholeString2) - 2):\n kGramsStorage2.add(wholeString2[k] + wholeString2[k+1] + wholeString2[k+2])\n \n \n #Fast Min Hash Algorithm\n \n\n\n salt = list()\n vector = list()\n \n for i in range(t):\n vector.append(np.Inf)\n \n def sha(salt, value):\n hashValue = hashlib.sha1(salt.encode() + value.encode()).hexdigest()\n return int(hashValue, 16) % 10_000\n \n\n for i in range(t):\n salt.append(str(bc.gensalt()))\n \n for kGram in kGramsStorage1:\n for i in range(t):\n if (sha(salt[i], kGram) < vector[i]):\n vector[i] = sha(salt[i], kGram)\n \n jaccardSimilaritya = 0\n \n ##vector2\n vector1 = list()\n \n for i1 in range(t):\n vector1.append(np.Inf)\n \n\n for kGram in kGramsStorage2:\n for i1 in range(t):\n if (sha(salt[i1], kGram) < vector1[i1]):\n vector1[i1] = sha(salt[i1], kGram)\n \n \n for i in range(t):\n if(vector[i] == vector1[i]):\n jaccardSimilaritya += 1\n \n jaccardSimilaritya = jaccardSimilaritya/t\n \n return jaccardSimilaritya\n \n \ndef graph():\n a = list()\n ti = list()\n \n for i in range(20, 1000, 20):\n start = time.clock()\n a.append(numberTwo(i))\n stop = time.clock()\n b = stop - start\n ti.append(b)\n \n \n\n plt.plot(ti)\n plt.ylabel(\"time\")\n plt.xlabel(\"t\")\n plt.show\n \ngraph()\n","sub_path":"untitled2.py","file_name":"untitled2.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"47432062","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pyaudio\n\nfrom echomesh.base import Config\nfrom echomesh.sound import GetFormatName\nfrom echomesh.sound import Sound\nfrom echomesh.sound.Input import Input\nfrom echomesh.util import Log\nfrom echomesh.util.thread.ThreadLoop import ThreadLoop\n\nLOGGER = Log.logger(__name__)\n\nMIN_CHUNK_SIZE = 16\nMAX_CHUNK_SIZE = 2048\n\nclass InputThread(ThreadLoop):\n def __init__(self, device_index, sample_bytes, rates):\n super(InputThread, self).__init__(name='InputThread')\n self.chunk_size = max(MIN_CHUNK_SIZE,\n min(MAX_CHUNK_SIZE,\n Config.get('audio', 'input', 'chunk_size')))\n\n self.input = Input()\n fmt = GetFormatName.get_format_name(sample_bytes)\n try:\n len(rates)\n except TypeError:\n rates = [rates]\n\n self.clients = set()\n pyaud = Sound.PYAUDIO()\n for rate in rates:\n try:\n self.stream = pyaud.open(format=fmt, channels=1, rate=rate,\n input_device_index=device_index, input=True)\n break\n except IOError as e:\n if 'Invalid sample rate' not in str(e):\n raise\n else:\n raise Exception(\"Couldn't open audio device named %s.\" % device_index)\n\n def single_loop(self):\n try:\n self.input.receive(self.stream.read(self.chunk_size))\n except:\n LOGGER.error()\n else:\n for client in self.clients:\n client(self.input)\n\n def add_client(self, client):\n self.clients.add(client)\n\n def remove_client(self, client):\n self.clients.remove(client)\n\n def _after_thread_pause(self):\n self.stream.close()\n self.stream = None\n","sub_path":"code/python/echomesh/sound/InputThread.py","file_name":"InputThread.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"342794001","text":"\"\"\"mpeaks URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom django.views.generic import RedirectView\nfrom mpeaks.mpeaksapp.views import PeakList, PeakDetail, PeakFilterList\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Mpeaks API\",\n default_version='v1',\n description=\"API Documentation\",\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n url(r'^doc(?P\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n url(r'^doc/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n url(r'^$', RedirectView.as_view(url='doc/')),\n path('peaks/', PeakList),\n path('peaks//', PeakDetail),\n path('peaks/filter/', PeakFilterList),\n]\n","sub_path":"mpeaks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"351917253","text":"\r\nimport smtplib\r\nimport socket\r\n\r\nsender = 'from@fromdomain.com'\r\nreceivers = 'to@todomain.com'\r\n\r\nhost = ''\r\nport = 25\r\nlocal_hostname = 'localhost'\r\n\r\nmessage = '''From: From Person\r\n\r\nTo: To Person \r\nMIME-Version: 1.0\r\nContent-type: text/html\r\nSubject: SMTP HTML e-mail test\r\n\r\n\r\nThis is an e-mail message to be sent in HTML\r\nformat\r\n\r\nThis is HTML message\r\n

This is headline.

\r\n\r\n'''\r\n\r\ntry:\r\n \r\n smtpObj = smtplib.SMTP(local_hostname)\r\n smtpObj.sendmail(sender, receivers, message)\r\n\r\n print('Successfully sent e-mail')\r\n\r\nexcept Exception as msg:\r\n print('Error: Unable to send e-mail due to [ %s ]' % (msg))\r\n","sub_path":"smtp--mailing.py","file_name":"smtp--mailing.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"333202896","text":"__author__ = 'thomas.ballas'\n\nimport time\nfrom crc_ccitt import crc16xmodem\nimport serial\nimport utilities\nimport servo_controller\n\n# CONSTANTS\nMAST_MIN = 210\nMAST_MAX = 2100\nMAST_HEIGHT = 1400\nNAME = \"zippermast\"\n\n\ndef mast_up(commander):\n servo = servo_controller.Servo()\n servo.open_lid()\n set_mast_height(commander, MAST_HEIGHT)\n\n\ndef mast_down(commander):\n set_mast_height(commander, 0)\n servo = servo_controller.Servo()\n servo.close_lid()\n\n\ndef set_mast_height(commander, target_height):\n commander.relay_on(\"zipmast_relay\")\n time.sleep(.1)\n commander.relay_on(\"zipmast_wake\")\n time.sleep(.5)\n commander.relay_off(\"zipmast_wake\")\n cur_ht = check_zippermast(commander)\n if \"NO RESPONSE\" not in cur_ht:\n commander.device_stats[\"mast\"] = cur_ht\n target_mm = int(target_height) + MAST_MIN\n cur_mm = int(commander.device_stats[\"mast\"])\n # height change below granularity of movement; doing nothing\n if in_range(target_mm, cur_mm, 25):\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] User tried to set mast height to current mast height.\"))\n # recurse with height set to zero - function will set to MAST_MIN\n elif target_mm < MAST_MIN:\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] User tried to set mast height below {} mm threshold.\".format(MAST_MIN)))\n return set_mast_height(commander, str(0))\n # recurse with target height set to greatest acceptable input: (MAST_MAX - MAST_MIN)\n elif target_mm > MAST_MAX:\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] User tried to set mast height above {} mm threshold.\".format(MAST_MAX)))\n return set_mast_height(commander, str(MAST_MAX - MAST_MIN))\n # input was within acceptable parameters; continue with raise mast\n elif target_mm > cur_mm:\n travel_mm = target_mm - cur_mm\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Raising mast for {} mm\".format(travel_mm)))\n if travel_mm < 100:\n mast_speed = \"25\"\n else:\n mast_speed = \"100\"\n raise_mast(commander, mast_speed)\n done = False\n fail_ctr = 0\n while not done and fail_ctr < 3:\n cur_ht = check_zippermast(commander)\n if \"NO RESPONSE\" not in cur_ht:\n commander.device_stats[\"mast\"] = cur_ht\n fail_ctr = 0\n else:\n fail_ctr += 1\n continue\n if int(cur_ht) >= target_mm:\n done = True\n else:\n if (target_mm - int(cur_ht)) < 100:\n raise_mast(commander, \"25\")\n time.sleep(0.1)\n stop_raise_mast(commander)\n # input was within acceptable parameters, begin lower mast\n else:\n travel_mm = cur_mm - target_mm\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Lowering mast for {} mm\".format(travel_mm)))\n if travel_mm < 100:\n mast_speed = \"25\"\n else:\n mast_speed = \"100\"\n lower_mast(commander, mast_speed)\n done = False\n fail_ctr = 0\n while not done and fail_ctr < 3:\n cur_ht = check_zippermast(commander)\n if \"NO RESPONSE\" not in cur_ht:\n commander.device_stats[\"mast\"] = cur_ht\n fail_ctr = 0\n else:\n fail_ctr += 1\n if float(cur_ht) <= target_mm:\n done = True\n else:\n if (int(cur_ht) - target_mm) < 100:\n lower_mast(commander, \"25\")\n time.sleep(0.25)\n stop_lower_mast(commander)\n commander.relay_off(\"zipmast_relay\")\n return \"{}\".format(int(commander.device_stats[\"mast\"]) - MAST_MIN)\n\n\ndef raise_mast(commander, speed):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'raise_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"rmt\")\n zip_mast_pcmov(commander, \"up\", speed)\n return \"MAST_UP\"\n\n\ndef stop_raise_mast(commander):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'raise_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"srmt\")\n # sleep appropriate amount of time for boot\n response = \"NO RESPONSE\"\n second_check = \"\"\n fail_ctr = 0\n while not in_range(response, second_check, 10) and fail_ctr < 5:\n response = zip_mast_pcmov(commander, \"up\", \"0\")\n if response is not \"NO RESPONSE\":\n commander.device_stats[\"mast\"] = response\n time.sleep(0.1)\n second_check = check_zippermast(commander)\n else:\n fail_ctr += 1\n response = zip_mast_pcmov(commander, \"up\", \"0\")\n return \"MAST_UP\"\n\n\ndef in_range(response, second_check, ranger):\n if response is \"NO RESPONSE\" or second_check is \"NO RESPONSE\" or response == \"\" or second_check == \"\":\n return False\n else:\n ht_one = int(response)\n ht_two = int(second_check)\n if (ht_one - ranger) <= ht_two <= (ht_one + ranger):\n return True\n return False\n\n\ndef lower_mast(commander, speed):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'lower_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"lmt\")\n # send signal to zippermast relay - circuit closed\n zip_mast_pcmov(commander, \"down\", speed)\n return \"MAST_DOWN\"\n\n\ndef stop_lower_mast(commander):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'lower_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"slmt\")\n response = \"NO RESPONSE\"\n second_check = \"\"\n fail_ctr = 0\n while not in_range(response, second_check, 10) and fail_ctr < 5:\n response = zip_mast_pcmov(commander, \"down\", \"0\")\n if response is not \"NO RESPONSE\":\n commander.device_stats[\"mast\"] = response\n time.sleep(0.1)\n second_check = check_zippermast(commander)\n else:\n fail_ctr += 1\n return \"MAST_DOWN\"\n\n\ndef check_zippermast(commander):\n was_on = True\n if commander.relay_stats[\"zipmast_relay\"] != \"1\":\n was_on = False\n commander.relay_on(\"zipmast_relay\")\n time.sleep(.5)\n commander.relay_on(\"zipmast_wake\")\n time.sleep(.5)\n commander.relay_off(\"zipmast_wake\")\n\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Attempting to query zippermast for height.\"))\n mast_response = \"NO RESPONSE\"\n # transmit pcmov_down to zippermast\n try:\n modem = serial.Serial(port=commander.physical_ports['zm_port'], baudrate=38400, rtscts=True, timeout=1.0)\n commander.debug_logger(utilities.logger(NAME, \"{}\".format(modem)))\n modem.flushInput()\n modem.flushOutput()\n mast_response = modem.readline()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Zippermast response: {}\".format(mast_response)))\n modem.flushInput()\n modem.flushOutput()\n modem.close()\n except:\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Failed to write command to zippermast\"))\n\n if not was_on:\n commander.relay_off(\"zipmast_relay\")\n\n if mast_response is not \"\" and \"$PPSST\" in mast_response:\n mast_resps = mast_response.split(\",\")\n if mast_resps > 3:\n commander.debug_logger(utilities.logger(NAME, \"{}\".format(mast_resps)))\n height_mm = mast_resps[2]\n return height_mm\n return \"NO RESPONSE\"\n\n\ndef zip_mast_pcmov(commander, direction, speed):\n directions = {\"down\": \"0\",\n \"up\": \"1\"}\n # generate PCMOV message\n zm_pcmov = \"PCMOV\"\n zm_id = \"1\" # value unknown\n zm_dir = directions[direction]\n zm_speed = speed # 0-100, equiv. to mm/sec\n zm_delay = \"0\" # time delay in seconds\n pcmov = \"$\" + \",\".join([zm_pcmov, zm_id, zm_dir, zm_speed, zm_delay])\n crc = hex(crc16xmodem(pcmov[1:]))[2:]\n pcmov = pcmov + \"*{}\\r\\n\".format(crc)\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Generated PCMOV instruction: {}\".format(pcmov)))\n mast_response = \"NO RESPONSE\"\n # transmit pcmov_down to zippermast\n try:\n modem = serial.Serial(port=commander.physical_ports['zm_port'], baudrate=38400, rtscts=True, timeout=1.0)\n commander.debug_logger(utilities.logger(NAME, \"{}\".format(modem)))\n # for i in range(15):\n modem.flushInput()\n modem.flushOutput()\n modem.write(pcmov)\n mast_response = modem.readline()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Zippermast response: {}\".format(mast_response)))\n modem.flushInput()\n modem.flushOutput()\n modem.close()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Wrote message to port {}\".format(commander.zm_port)))\n except:\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Failed to write command to zippermast\"))\n if mast_response is not \"\" and \"$PPSST\" in mast_response:\n mast_resps = mast_response.split(\",\")\n if mast_resps > 3:\n height_mm = mast_resps[2]\n return height_mm\n return \"NO RESPONSE\"\n\n\ndef zippermast_on(commander, zm_port):\n mast_response = \"\"\n try:\n modem = serial.Serial(port=zm_port, baudrate=38400, rtscts=True, timeout=.5)\n modem.flushInput()\n modem.flushOutput()\n time.sleep(.5)\n mast_response = modem.readline()\n mast_response += modem.readline()\n mast_response += modem.readline()\n modem.flushInput()\n modem.flushOutput()\n modem.close()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Read from zippermast: '{}'\".format(mast_response)))\n except:\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Failed to read from zippermast\"))\n if \"$PPSST\" in mast_response:\n return True\n return\n","sub_path":"arduino/archive/RESILIENTVIPER_v2/zippermast.py","file_name":"zippermast.py","file_ext":"py","file_size_in_byte":10416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"533640212","text":"# -*- coding:utf-8 -*- \n__author__ = 'John 2018/3/5 15:19'\n\n\"\"\"\n对于一个给定的 source 字符串和一个 target 字符串,\n你应该在 source 字符串中找出 target 字符串出现的第一个位置(从0开始)。如果不存在,则返回 -1。\n样例\n如果 source = \"source\" 和 target = \"target\",返回 -1。\n\n如果 source = \"abcdabcdefg\" 和 target = \"bcd\",返回 1。\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param: source: source string to be scanned.\n @param: target: target string containing the sequence of characters to match\n @return: a index to the first occurrence of target in source, or -1 if target is not part of source.\n \"\"\"\n\n def strStr(self, source, target):\n # write your code here\n if target == '':\n return 0\n else:\n if target in source:\n for n, i in enumerate(source):\n if i == target[0]:\n return n\n else:\n return -1\n\n\nS = Solution()\n","sub_path":"lintcode/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"132215249","text":"#!/usr/bin/env pytest\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Test WEBP driver\n# Author: Even Rouault, \n#\n###############################################################################\n# Copyright (c) 2011-2013, Even Rouault \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nfrom osgeo import gdal\n\n\nimport gdaltest\nimport pytest\n\n###############################################################################\n# Test if WEBP driver is present\n\n\ndef test_webp_1():\n\n gdaltest.webp_drv = gdal.GetDriverByName('WEBP')\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n \n###############################################################################\n# Open() test\n\n\ndef test_webp_2():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n ds = gdal.Open('data/rgbsmall.webp')\n cs = ds.GetRasterBand(1).Checksum()\n assert cs == 21464 or cs == 21450 or cs == 21459, \\\n 'did not get expected checksum on band 1'\n\n###############################################################################\n# CreateCopy() test\n\n\ndef test_webp_3():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n src_ds = gdal.Open('data/rgbsmall.tif')\n out_ds = gdaltest.webp_drv.CreateCopy('/vsimem/webp_3.webp', src_ds, options=['QUALITY=80'])\n src_ds = None\n cs1 = out_ds.GetRasterBand(1).Checksum()\n out_ds = None\n gdal.Unlink('/vsimem/webp_3.webp')\n gdal.Unlink('/vsimem/webp_3.webp.aux.xml')\n\n # 21502 is for libwebp 0.3.0\n assert cs1 == 21464 or cs1 == 21502 or cs1 == 21695 or cs1 == 21700, \\\n 'did not get expected checksum on band 1'\n\n###############################################################################\n# CreateCopy() on RGBA\n\n\ndef test_webp_4():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n md = gdaltest.webp_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LOSSLESS') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('../gcore/data/stefan_full_rgba.tif')\n out_ds = gdaltest.webp_drv.CreateCopy('/vsimem/webp_4.webp', src_ds)\n src_ds = None\n cs1 = out_ds.GetRasterBand(1).Checksum()\n cs4 = out_ds.GetRasterBand(4).Checksum()\n out_ds = None\n gdal.Unlink('/vsimem/webp_4.webp')\n\n # 22849 is for libwebp 0.3.0\n assert cs1 in (22001, 22849, 34422, 36652, 36658, 45319), \\\n 'did not get expected checksum on band 1'\n\n assert cs4 == 10807, 'did not get expected checksum on band 4'\n\n###############################################################################\n# CreateCopy() on RGBA with lossless compression\n\n\ndef test_webp_5():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n md = gdaltest.webp_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LOSSLESS') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('../gcore/data/stefan_full_rgba.tif')\n out_ds = gdaltest.webp_drv.CreateCopy('/vsimem/webp_5.webp', src_ds, options=['LOSSLESS=YES'])\n src_ds = None\n cs1 = out_ds.GetRasterBand(1).Checksum()\n cs4 = out_ds.GetRasterBand(4).Checksum()\n out_ds = None\n gdal.Unlink('/vsimem/webp_5.webp')\n\n assert cs1 == 12603 or cs1 == 18536 or cs1 == 14800, \\\n 'did not get expected checksum on band 1'\n\n assert cs4 == 10807, 'did not get expected checksum on band 4'\n\n\n\n","sub_path":"autotest/gdrivers/webp.py","file_name":"webp.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"460720074","text":"#!/usr/bin/env python\n\"\"\"RF2_reducer.py\"\"\"\n\nfrom operator import itemgetter\nimport sys\n#Reducer para el RF2 del Taller 1\n\n\n# input comes from STDIN\n# Input follows the format: \n# number_of_month tab sum_of_total_costs tab number_of_trips\n# Output format\n# Same\n\ntotals = {}\n\nfor line in sys.stdin:\n # remove leading and trailing whitespace\n line = line.strip()\n\n # parse the input we got from mapper.py\n key, count = line.split('\\t')\n\n #Updates Structures\n if(key in totals):\n totals[key] = totals[key] + int(count) \n else:\n totals[key] = int(count)\n \n\n\nfor key in totals.keys():\n print('%s\\t%s' % (key, totals[key]))\n\n","sub_path":"Taller 1/MapReduceScripts/splitter_tester/splitter_reducer.py","file_name":"splitter_reducer.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"5131852","text":"d, n = map(int, input().split())\ndef division(x):\n if x % 100 != 0:\n return 0\n else:\n return division(x//100) +1\ncount= 0\nfor i in range(1, 1010001):\n if division(i) == d:\n count +=1\n if count == n:\n print(i)\n break","sub_path":"Python_codes/p03324/s424594570.py","file_name":"s424594570.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"638630555","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CNN(nn.Module):\n def __init__(self, opt):\n super(CNN, self).__init__()\n\n self.MODEL = opt[\"MODEL\"]\n self.BATCH_SIZE = opt[\"BATCH_SIZE\"]\n self.MAX_SENT_LEN = opt[\"MAX_SENT_LEN\"]\n self.WORD_DIM = opt[\"WORD_DIM\"]\n self.VOCAB_SIZE = opt[\"VOCAB_SIZE\"]\n self.CLASS_SIZE = opt[\"CLASS_SIZE\"]\n self.FILTERS = opt[\"FILTERS\"]\n self.FILTER_NUM = opt[\"FILTER_NUM\"]\n self.DROPOUT_PROB = opt[\"DROPOUT_PROB\"]\n self.IN_CHANNEL = 1\n\n assert (len(self.FILTERS) == len(self.FILTER_NUM))\n\n # one for UNK and one for zero padding\n self.embedding = nn.Embedding(self.VOCAB_SIZE + 2, self.WORD_DIM, padding_idx=self.VOCAB_SIZE + 1)\n if self.MODEL == \"static\" or self.MODEL == \"non-static\" or self.MODEL == \"multichannel\":\n self.WV_MATRIX = opt[\"WV_MATRIX\"]\n self.embedding.weight.data.copy_(torch.from_numpy(self.WV_MATRIX))\n if self.MODEL == \"static\":\n self.embedding.weight.requires_grad = False\n elif self.MODEL == \"multichannel\":\n self.embedding2 = nn.Embedding(self.VOCAB_SIZE + 2, self.WORD_DIM, padding_idx=self.VOCAB_SIZE + 1)\n self.embedding2.weight.data.copy_(torch.from_numpy(self.WV_MATRIX))\n self.embedding2.weight.requires_grad = False\n self.IN_CHANNEL = 2\n\n for i in range(len(self.FILTERS)):\n conv = nn.Conv1d(self.IN_CHANNEL, self.FILTER_NUM[i], self.WORD_DIM * self.FILTERS[i], stride=self.WORD_DIM)\n setattr(self, 'conv_%d'%i, conv)\n\n self.fc = nn.Linear(sum(self.FILTER_NUM), self.CLASS_SIZE)\n\n def get_conv(self, i):\n return getattr(self, 'conv_%d'%i)\n\n def forward(self, inp):\n x = self.embedding(inp).view(-1, 1, self.WORD_DIM * self.MAX_SENT_LEN)\n if self.MODEL == \"multichannel\":\n x2 = self.embedding2(inp).view(-1, 1, self.WORD_DIM * self.MAX_SENT_LEN)\n x = torch.cat((x, x2), 1)\n\n conv_results = [\n F.max_pool1d(F.relu(self.get_conv(i)(x)), self.MAX_SENT_LEN - self.FILTERS[i] + 1)\n .view(-1, self.FILTER_NUM[i])\n for i in range(len(self.FILTERS))]\n\n x = torch.cat(conv_results, 1)\n x = F.dropout(x, p=self.DROPOUT_PROB, training=self.training)\n x = self.fc(x)\n return x\n\n\n\n\nclass CNN1(nn.Module):\n \n def __init__(self, opt):\n super(CNN1,self).__init__()\n self.opt = opt\n \n V = opt.embed_num\n D = opt.embed_dim\n C = opt.class_num\n Ci = 1\n Co = opt.kernel_num\n Ks = opt.kernel_sizes\n\n self.embed = nn.Embedding(V, D)\n #self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]\n self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])\n '''\n self.conv13 = nn.Conv2d(Ci, Co, (3, D))\n self.conv14 = nn.Conv2d(Ci, Co, (4, D))\n self.conv15 = nn.Conv2d(Ci, Co, (5, D))\n '''\n self.dropout = nn.Dropout(opt.dropout)\n self.fc1 = nn.Linear(len(Ks)*Co, C)\n\n def conv_and_pool(self, x, conv):\n x = F.relu(conv(x)).squeeze(3) #(N,Co,W)\n x = F.max_pool1d(x, x.size(2)).squeeze(2)\n return x\n\n\n def forward(self, x):\n x = self.embed(x) # (N,W,D)\n \n if self.args.static:\n x = Variable(x)\n\n x = x.unsqueeze(1) # (N,Ci,W,D)\n\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)\n\n\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)\n\n x = torch.cat(x, 1)\n\n '''\n x1 = self.conv_and_pool(x,self.conv13) #(N,Co)\n x2 = self.conv_and_pool(x,self.conv14) #(N,Co)\n x3 = self.conv_and_pool(x,self.conv15) #(N,Co)\n x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)\n '''\n x = self.dropout(x) # (N,len(Ks)*Co)\n logit = self.fc1(x) # (N,C)\n return logit\n\n","sub_path":"models/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"138211632","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# ---------------------------------------------------------------------\n# Copyright (c) 2012 Michael Hull.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ----------------------------------------------------------------------\n\nfrom morphforge.core import is_iterable\nfrom morphforge.units import qty\nfrom morphforge.simulation.base import SimulationResult\nfrom mhlibs.quantities_plot import QuantitiesFigure\nfrom morphforge.simulationanalysis.tagviewer.plotspecs import TagPlot\nfrom morphforge.traces import TraceFixedDT\nfrom morphforge.traces import TraceVariableDT\nfrom morphforge.traces import TracePiecewise\nfrom morphforge.traces.eventset import EventSet\nimport morphforge.units as units\n\n# pylint: disable=W0108\n# (Suppress warning about 'unnessesary lambda functions')\n\n\nfrom decimal import Decimal\n\nclass DefaultTagPlots(object):\n Voltage = TagPlot(\"Voltage\", ylabel='Voltage', yrange=(-80*units.mV, 50*units.mV), yunit=units.mV )\n CurrentDensity = TagPlot(\"CurrentDensity\", ylabel='Current\\nDensity', yunit=units.picoamp / units.um2 )\n Current = TagPlot(\"Current\", ylabel='Current', yunit=units.picoamp)\n Conductance = TagPlot(\"Conductance\", ylabel=\"Conductance\", yunit=units.pS)\n ConductanceDensity = TagPlot(\"ConductanceDensity\", ylabel=\"ConductanceDensity\", yunit=units.mS / units.cm2 )\n StateVariable = TagPlot(\"StateVariable\", ylabel=\"StateVariable\")\n StateVariableTau = TagPlot(\"StateTimeConstant\", yunit=units.millisecond, ylabel=\"Time Constant\")\n StateVariableInf = TagPlot(\"StateSteadyState\", ylabel=\"Steady State\")\n Event = TagPlot(\"Event\", ylabel=\"Events\")\n\n\n\n\n\nclass TagViewer(object):\n\n MPL_AUTO_SHOW = True\n\n _default_plot_specs = (\n DefaultTagPlots.Voltage,\n DefaultTagPlots.CurrentDensity,\n DefaultTagPlots.Current,\n DefaultTagPlots.Conductance,\n DefaultTagPlots.ConductanceDensity,\n DefaultTagPlots.StateVariable,\n DefaultTagPlots.StateVariableTau,\n DefaultTagPlots.StateVariableInf,\n DefaultTagPlots.Event,\n )\n\n _default_fig_kwargs = {'figsize': (12, 8) }\n\n _options_show_xlabel = ('only-once','all', False)\n _options_show_xticklabels=('only-once','all', False)\n _options_show_xticklabels_with_units=(True,False)\n _options_show_xaxis_position = ('bottom','top')\n\n def __init__(\n self,\n srcs,\n plots=None,\n additional_plots=None,\n figtitle=None,\n fig_kwargs=None,\n show=True,\n linkage=None,\n timerange=None,\n mpl_tight_bounds=False,\n decimate_points=False,\n\n share_x_labels=True,\n\n nxticks=4,\n show_xlabel='only-once',\n show_xticklabels='only-once',\n show_xticklabels_with_units=True,\n show_xaxis_position='bottom',\n xticklabel_quantisation = False,\n xticks=None,\n xlabel='Time'\n ):\n\n self.xlabel = xlabel\n\n if fig_kwargs is None:\n fig_kwargs = self._default_fig_kwargs\n\n self.linkage = linkage\n self.decimate_points = decimate_points\n\n\n if not is_iterable(srcs):\n srcs = [srcs]\n\n # For each type of input (in 'srcs'); this should return a list of traces:\n self.all_trace_objs = []\n self.all_event_set_objs = []\n trace_extractors = {\n SimulationResult: lambda obj: (self.all_trace_objs.extend(obj.traces),self.all_event_set_objs.extend(obj.evsets)),\n TraceFixedDT: lambda obj: self.all_trace_objs.append(obj),\n TraceVariableDT: lambda obj: self.all_trace_objs.append(obj),\n TracePiecewise: lambda obj: self.all_trace_objs.append(obj),\n EventSet: lambda obj: self.all_event_set_objs.append(obj)\n }\n\n for obj in srcs:\n tr_extractor = trace_extractors[type(obj)]\n tr_extractor(obj)\n\n # Use the new PlotSpec architecture:\n # Filter out which plots are actually going to display something,\n # and filter out the rest:\n plots = plots if plots is not None else TagViewer._default_plot_specs\n\n if additional_plots:\n plots = tuple(list(plots) + list(additional_plots))\n\n self.plot_specs = [plotspec for plotspec in plots if\n [tr for tr in self.all_trace_objs if plotspec.addtrace_predicate(tr)] or \\\n [evset for evset in self.all_event_set_objs if plotspec.addeventset_predicate(evset)] \\\n ]\n\n\n self.fig_kwargs = fig_kwargs\n self.figtitle = figtitle\n self.mpl_tight_bounds = mpl_tight_bounds\n\n self.timerange = timerange\n #self.share_x_labels = share_x_labels\n self.nxticks = nxticks\n\n\n # X-axis configuration:\n self.show_xlabel = show_xlabel\n self.show_xticklabels = show_xticklabels\n self.show_xticklabels_with_units = show_xticklabels_with_units\n self.show_xaxis_position = show_xaxis_position\n #self.xticks_as_ints = xticks_as_ints\n self.xticklabel_quantisation = xticklabel_quantisation\n self.xticks=xticks\n assert self.show_xlabel in self._options_show_xlabel, 'Invalid'\n assert self.show_xticklabels in self._options_show_xticklabels, 'Invalid: %s' % show_xticklabels\n assert self.show_xticklabels_with_units in self._options_show_xticklabels_with_units\n assert self.show_xaxis_position in self._options_show_xaxis_position\n if is_iterable( self.xticks ) and all( [isinstance(xtick, (int, float)) for xtick in self.xticks]):\n self.xticks = [ xtick*units.ms for xtick in self.xticks]\n assert self.xticks is None or isinstance(self.xticks, int) or ( is_iterable(self.xticks) and [ qty(xtick) for xtick in self.xticks] )\n\n\n self.fig = None\n self.subaxes = []\n self.create_figure()\n\n\n if TagViewer.MPL_AUTO_SHOW and show:\n import pylab\n pylab.show()\n\n def create_figure(self):\n self.fig = QuantitiesFigure(**self.fig_kwargs)\n\n # Add a title to the plot:\n if self.figtitle:\n self.fig.suptitle(self.figtitle)\n\n # Work out what traces are on what graphs:\n plotspec_to_traces = dict([(plot_spec, [tr for tr in self.all_trace_objs if plot_spec.addtrace_predicate(tr)]) for plot_spec in self.plot_specs ])\n if self.linkage:\n self.linkage.process(plotspec_to_traces)\n\n n_plots = len(self.plot_specs)\n\n\n import matplotlib.gridspec as gridspec\n height_ratios = [ps.height_ratio for ps in self.plot_specs]\n gs = list(gridspec.GridSpec(n_plots, 1, height_ratios=height_ratios,) )\n\n ## Lets share a commonn x-axis:\n #axes0 = self.fig.add_axes( gs[0].get_position(self.fig) ) \n #axesoneplus = [ self.fig.add_axes( ss.get_position(self.fig), sharex=axes0 ) for ss in gs[1:]]\n #axes = [axes0] + axesoneplus\n\n axes = [ self.fig.add_axes( ss.get_position(self.fig) ) for ss in gs]\n\n for (i, (plot_spec,ax)) in enumerate(zip(self.plot_specs,axes)):\n\n # Create the axis:\n #ax = self.fig.add_subplot(n_plots, 1, i + 1)\n ax.set_xunit(units.millisecond)\n ax.set_xmargin(0.05)\n ax.set_ymargin(0.05)\n\n ax.set_xaxis_maxnlocator(self.nxticks)\n\n # Leave the plotting to the tag-plot object\n plot_spec.plot( ax=ax,\n all_traces=self.all_trace_objs,\n all_eventsets=self.all_event_set_objs,\n time_range=self.timerange,\n linkage=self.linkage,\n decimate_points=self.decimate_points,\n show_xlabel = self.show_xlabel,\n show_xticklabels = self.show_xticklabels,\n show_xticklabels_with_units = self.show_xticklabels_with_units,\n show_xaxis_position = self.show_xaxis_position,\n xticklabel_quantisation=self.xticklabel_quantisation,\n is_top_plot = (i==0),\n is_bottom_plot = (i==n_plots-1),\n xticks = self.xticks,\n xlabel=self.xlabel,\n\n )\n\n\n # Save the Axis:\n self.subaxes.append(ax)\n\n if self.mpl_tight_bounds:\n import pylab\n try:\n pylab.tight_layout()\n except AttributeError:\n pass # This is version specfic\n except ValueError:\n pass # Top can't be less than bottom\n\n\n","sub_path":"src/morphforge/simulationanalysis/tagviewer/tagviewer.py","file_name":"tagviewer.py","file_ext":"py","file_size_in_byte":10091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"10771899","text":"# -*- coding: utf-8 -*-\nimport pandas as pd \nimport numpy as np\nimport math\nimport os\n\nstop_word_list = [\"a\",\"able\",\"about\",\"across\",\"after\",\"all\",\"almost\",\"also\",\"am\",\"among\",\"an\",\"and\",\"any\",\"are\",\"as\",\"at\",\"be\",\"because\",\"been\",\"but\",\"by\",\"can\",\"cannot\",\"could\",\"dear\",\"did\",\"do\",\"does\",\"either\",\"else\",\"ever\",\"every\",\"for\",\"from\",\"get\",\"got\",\"had\",\"has\",\"have\",\"he\",\"her\",\"hers\",\"him\",\"his\",\"how\",\"however\",\"i\",\"if\",\"in\",\"into\",\"is\",\"it\",\"its\",\"just\",\"least\",\"let\",\"like\",\"likely\",\"may\",\"me\",\"might\",\"most\",\"must\",\"my\",\"neither\",\"no\",\"nor\",\"not\",\"of\",\"off\",\"often\",\"on\",\"only\",\"or\",\"other\",\"our\",\"own\",\"rather\",\"said\",\"say\",\"says\",\"she\",\"should\",\"since\",\"so\",\"some\",\"than\",\"that\",\"the\",\"their\",\"them\",\"then\",\"there\",\"these\",\"they\",\"this\",\"tis\",\"to\",\"too\",\"twas\",\"us\",\"wants\",\"was\",\"we\",\"were\",\"what\",\"when\",\"where\",\"which\",\"while\",\"who\",\"whom\",\"why\",\"will\",\"with\",\"would\",\"yet\",\"you\",\"your\"]\n\n\"\"\"\n texturizer.simple: Basic text feature calculation\n Calculate statistics such as the average length of words, max word length\n proportion of non stop-words.\n\n Stop-word list taken from: https://www.textfixer.com/tutorials/common-english-words.txt \n\n\"\"\"\n\n########################################################################################\ndef add_text_summary_features(df, columns):\n \"\"\"\n Given a pandas dataframe and a set of column names.\n calculate the simple text summary features and add them.\n \"\"\"\n rez = df.copy()\n for col in columns:\n rez = add_text_features(rez, col)\n return rez\n\n########################################################################################\ndef add_text_features(df, col):\n \"\"\"\n Given a pandas dataframe and a column name.\n calculate the simple text summary features and add them.\n \"\"\"\n col_len = col + \"_len\"\n df[col_len] = df[col].apply(null_tolerant_len)\n def cal_features(x, col):\n if x[col]!=x[col]:\n word_count = 0 \n avg_word_len = 0 \n content_wd = 0 \n capital_d = 0\n else:\n chars = null_tolerant_len(x[col])\n capitals = sum(1 for c in x[col] if c.isupper())\n capital_d = capitals/chars\n word_array = x[col].lower().split()\n non_stop_words = list(set(word_array) - set(stop_word_list))\n word_count = len(word_array)\n word_lengths = list(map(len, word_array))\n avg_word_len = sum(word_lengths)/word_count\n content_wd = len(non_stop_words)/len(word_array)\n return word_count, avg_word_len, content_wd, capital_d\n\n df[[col+'_wc', col+'_avg_wl', col+'_cwd', col+'_caps']] = df.apply(cal_features, col=col, axis=1, result_type=\"expand\")\n\n return df\n\n########################################################################################\ndef null_tolerant_len(x):\n if x != x:\n return 0\n else:\n return len(x)\n\n \n########################################################################################\n","sub_path":"texturizer/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"557150311","text":"# -*- Mode: Python -*-\n#\n# General log emission class\n#\n\nimport array, inspect, os, sys, traceback, __main__\nfrom time import (strftime, gmtime)\n\ntry:\n from thread import get_ident\nexcept:\n from _thread import get_ident\n\n#\n# Logger -- general routines for submitting information to a log - syslog or \n# file.\n#\nclass Logger:\n\n kLevelMap = { 'fatal': 0,\n 'error': 1,\n 'warning': 2,\n 'info': 3,\n 'verbose': 4,\n 'debug': 5 }\n\n #\n # Define entry and exit tags used by LogProcEntry and LogProcExit\n #\n kEntryTag = 'BEG'\n kExitTag = 'END'\n\n #\n # Default output to nothing.\n #\n def __init__( self ):\n for name, level in list(self.kLevelMap.items()):\n setattr(self, 'k' + name.capitalize(), level)\n\n self._out = self._flush = self._close = self._nothing\n self.fd = None # File name or descriptor logging to\n self._showTime = 1 # If TRUE, print timestamp\n self._showThread = 0 # If TRUE, print thread ID\n self._showFile = 1 # If TRUE, print defining file name\n self._showFunc = 1 # If TRUE, print function name\n self._showSelf = 1 # If TRUE, print first method arg\n self.maxLength = None\n self.setLevel(self.kError)\n\n #\n # If logging to a file, close it and reopen it. Used for rolling log files.\n #\n def reset(self):\n if isinstance(self.fd, str):\n self.useFile(self.fd, self._showTime, self._showThread, self._showFile)\n\n def _log(self, level, logMsg):\n self._out(logMsg)\n self._flush()\n\n def _fatal(self, *args): self._log(self.kFatal, self._formatOutput(self.kFatal, args))\n def _error(self, *args): self._log(self.kError, self._formatOutput(self.kError, args))\n def _warning(self, *args): self._log(self.kWarning, self._formatOutput(self.kWarning, args))\n def _info(self, *args): self._log(self.kInfo, self._formatOutput(self.kInfo, args))\n def _verbose(self, *args): self._log(self.kVerbose, self._formatOutput(self.kVerbose, args))\n def _debug(self, *args): self._log(self.kDebug, self._formatOutput(self.kDebug, args))\n def _begin(self, *args): self._log(self.kVerbose, self._formatOutput(self.kVerbose, args, self.kEntryTag))\n def _end(self, *args): self._log(self.kVerbose, self._formatOutput(self.kVerbose, args, self.kExitTag))\n\n #\n # Issues a log message at the given severity level.\n #\n def log(self, level, *args):\n self._log(level, self._formatOutput(level, args))\n\n #\n # Does not log anything\n #\n def _nothing(self, *ignored):\n return\n\n #\n # Set the log threshold level. For faster processing, update methods to\n # either emit something or return silently.\n #\n def setLevel(self, maxLevel):\n if isinstance(maxLevel, str):\n tmp = self.kLevelMap.get(maxLevel.lower())\n if tmp is None:\n raise ValueError(maxLevel)\n maxLevel = tmp\n \n if maxLevel > self.kDebug:\n maxLevel = self.kDebug\n elif maxLevel < self.kFatal:\n maxLevel = self.kFatal\n\n #\n # Turn on/off levels\n #\n procs = self.__class__.__dict__\n for name, level in list(self.kLevelMap.items()):\n if level <= maxLevel:\n setattr(self, name, getattr(self, '_' + name))\n else:\n setattr(self, name, self._nothing)\n\n #\n # Special handling for entry/exit methods.\n #\n if maxLevel >= self.kVerbose:\n self.begin = self._begin\n self.end = self._end\n else:\n self.begin = self.end = self._nothing\n\n #\n # Flush any pending output and revert to doing nothing\n #\n def close(self):\n self._flush()\n self._close()\n self._out = self._flush = self._close = self._nothing\n\n #\n # Setup to use the given file object for log writing\n #\n def useFile(self, fd, showTime = 1, showThread = 0, showFile = 1):\n self.close()\n self.fd = fd\n if isinstance(fd, str):\n fd = open(fd, 'w+')\n self._close = fd.close # Only close file desc. we open\n self._out = fd.write\n self._flush = fd.flush\n self._showTime = showTime\n self._showThread = showThread\n self._showFile = showFile\n\n #\n # Setup to use Python syslog module.\n #\n def useSyslog(self, ident, opts, facility, showTime = 0, showThread = 0, showFile = 1):\n import syslog\n self.close()\n syslog.openlog(ident, opts, facility)\n self._out = syslog.syslog\n self._close = syslog.closelog\n self._showTime = showTime\n self._showThread = showThread\n self._showFile = showFile\n\n #\n # Setup to use whatever is set in sys.stderr.\n #\n def useStdErr(self, showTime = 1, showThread = 0, showFile = 1):\n self.useFile(sys.stderr, showTime, showThread, showFile)\n\n #\n # Setup to use whatever is set in sys.stdout.\n #\n def useStdOut(self, showTime = 1, showThread = 0, showFile = 1):\n self.useFile(sys.stdout, showTime, showThread, showFile)\n\n #\n # Set flag that determines if timestamp is printed in output.\n #\n def showTime(self, showTime = 1):\n self._showTime = showTime\n \n #\n # Set flag that determines if thread ID is printed in output.\n #\n def showThread(self, showThread = 1):\n self._showThread = showThread\n\n #\n # Set flag that determines if filename that contains the log statement is\n # printed.\n #\n def showFile(self, showFile = 1):\n self._showFile = showFile\n\n #\n # set flag that determines if function name is printed in output.\n #\n def showFunction(self, showFunc = 1):\n self._showFunc = showFunc\n \n #\n # Set flag that determines if the first argument (self) of a method is\n # printed in Begin() output.\n #\n def showSelf(self, showSelf = 1):\n self._showSelf = showSelf\n\n #\n # Internal function that builds the string that is ultimately sent to the\n # current sink device.\n #\n def _formatOutput(self, level, args, tag = \"\"):\n\n # Try to get context information. Looking for the name of the file we\n # are in, the name of the function (with possible class name\n # prepended), and if we are formatting a Begin() call, a list of\n # argnames and values that describe what is being passed into the\n # function we are logging.\n #\n doBegin = tag == self.kEntryTag and len(args) == 0\n fileName, proc, bArgs = self._procInfo(doBegin)\n if len(bArgs) > 0:\n args = bArgs\n \n # Generate timestamp\n #\n bits = []\n if self._showTime:\n bits.append(strftime(\"%Y%m%d.%H%M%S\", gmtime()))\n\n #\n # Generate thread ID\n #\n if self._showThread:\n bits.append('#{} '.format(get_ident()))\n\n #\n # Print file name containing the log statement\n #\n if self._showFile:\n bits.append(fileName)\n\n #\n # Print the function name containing the log statement. May also have a\n # class name if this is a method.\n #\n if self._showFunc:\n bits.append(proc)\n\n #\n # Print BEG/END tag\n #\n if len(tag) > 0:\n bits.append(tag)\n\n #\n # Append each argument to message string\n #\n bits.extend([str(z) for z in args])\n return ' '.join(bits) + '\\n'\n\n #\n # Return the name of the class that defines the function being logged. We\n # walk the class hierarchy just like Python does in order to locate the\n # actual defining class.\n #\n def _definingClass(self, theClass, codeObj):\n classDict = theClass.__dict__\n name = codeObj.co_name\n if name in classDict:\n tmp = classDict[name]\n if tmp.__code__ == codeObj:\n return theClass.__name__\n for eachClass in theClass.__bases__:\n name = self._definingClass(eachClass, codeObj)\n if name != None:\n return name\n return None\n\n #\n # Returns a tuple containing information about the function being logged:\n # file name, caller name, argument list\n #\n def _procInfo(self, genArgs = 0):\n fileName = '__main__'\n procName = '?'\n args = []\n\n frame = inspect.currentframe()\n frame = frame.f_back # Get out of _procInfo\n if frame:\n frame = frame.f_back # Get out of _formatOutput\n if frame:\n frame = frame.f_back # Get ouf of _log\n\n if frame:\n\n #\n # Extract the code object that contains the call to our log method.\n #\n code = frame.f_code\n fileName = os.path.split(code.co_filename)[1]\n procName = code.co_name\n numArgs = code.co_argcount\n if numArgs > 0:\n \n #\n # Assume we will display first argument. If we determine that\n # we are logging a method, obey the setting for showSelf.\n #\n firstArg = 0\n \n #\n # Get first argument and see if it is an object (ala self)\n #\n frameLocals = frame.f_locals\n varNames = code.co_varnames\n obj = frameLocals[varNames[0]]\n if hasattr(obj, '__class__'):\n className = None\n for each in inspect.getmro(type(obj)):\n if each.__dict__.get(code.co_name):\n className = each.__name__\n break\n if className:\n procName = className + '.' + procName\n if not self._showSelf:\n firstArg = 1\n \n #\n # Create a list of argument names and their runtime values.\n # Only done if we are in a Begin() log method.\n #\n if genArgs:\n for each in varNames[firstArg : numArgs]:\n value = frameLocals[each]\n if isinstance(value, str):\n arg = each + ': ' + value\n else:\n arg = each + ': ' + repr(value)\n args.append(arg)\n each = None\n\n obj = frameLocals = None\n frame = code = None\n return (fileName, procName, args)\n\nclass Foo(object):\n def __init__(self):\n gLog.begin()\n gLog.end()\n \n def bar(self):\n gLog.begin()\n gLog.end()\n\ndef test():\n def a():\n gLog.begin()\n gLog.debug('this is a test')\n gLog.end()\n a()\n f = Foo()\n f.bar()\n\ndef DelLog():\n delattr(__main__.__builtins__, 'gLog')\n\n#\n# First time we are imported, install a global variable `gLog' for everyone to\n# see and use. By default, use stderr.\n#\nif not hasattr(__main__.__builtins__, 'gLog'):\n __main__.__builtins__.gLog = Logger()\n gLog.useStdErr()\n","sub_path":"Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":11357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"635532086","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Cpu, Core, Ram, Swap, Measure\nfrom django.db.models import Q\nimport cStringIO as StringIO\nimport psutil\nimport json\nfrom datetime import datetime\n\n\n# Create your views here.\n\ndef json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")\n\n\ndef generate_pc_data():\n cpu = psutil.cpu_percent(interval=3, percpu=True)\n swap = [param / (10 ** 6) for param in psutil.swap_memory()] # swap total, used, free in MB\n swap[3] *= 10 ** 6\n ram = [param / (10 ** 6) for param in psutil.virtual_memory()] # memory active and inactive (as occupied and free)\n ram[2] *= 10 ** 6\n disk = [param / (10 ** 6) for param in psutil.disk_usage('/')] # disk occupiance\n disk[-1] *= 10 ** 6\n return cpu, swap, ram, disk\n\n\ndef clear_first_entries(user):\n if len(Cpu.objects.filter(owner=user)) > 1000:\n Cpu.objects.filter(owner=user).first().delete()\n Cpu.objects.filter(~Q(owner=user)).delete() # Delete all data not belonging to logged user\n Ram.objects.filter(owner=user).first().delete()\n Ram.objects.filter(~Q(owner=user)).delete()\n Swap.objects.filter(owner=user).first().delete()\n Swap.objects.filter(~Q(owner=user)).delete()\n Measure.objects.filter(owner=user).first().delete()\n Measure.objects.filter(~Q(owner=user)).delete()\n\n\ndef record_data(user, cpu, ram, swap):\n if user.is_authenticated():\n\n clear_first_entries(user)\n\n own_cpu = Cpu()\n own_ram = Ram()\n own_swap = Swap()\n\n own_cpu.owner = user\n own_cpu.save()\n\n for c in cpu:\n own_core = Core()\n own_core.cpu_id = own_cpu\n own_core.core_load = c\n own_core.save()\n\n own_ram.owner = user\n own_ram.ram_load_free = ram[4]\n own_ram.ram_load_used = ram[3]\n\n own_swap.owner = user\n own_swap.swap_load_total = swap[0]\n own_swap.swap_load_used = swap[1]\n own_swap.swap_load_free = swap[2]\n\n own_ram.save()\n own_swap.save()\n\n measure = Measure()\n measure.user_cpu = own_cpu\n measure.user_ram = own_ram\n measure.user_swap = own_swap\n measure.owner = user\n\n measure.save()\n\n\ndef monitor_list(request):\n cpu, swap, ram, disk = generate_pc_data()\n json_response = {'cpu_parameters': cpu, 'swap_parameters': swap, 'ram_parameters': ram,\n 'disk_parameters': disk}\n\n record_data(request.user, cpu=cpu, swap=swap, ram=ram)\n if request.is_ajax():\n return HttpResponse(json.dumps({'user':json_response}))\n return render(request, 'monitors/hw.html', {'cpu_parameters': cpu, 'swap_parameters': swap, 'ram_parameters': ram,\n 'disk_parameters': disk})\n\n\ndef generate_json():\n cpu = psutil.cpu_percent(percpu=True, interval=3)\n cpu_dict = {}\n for i, core in enumerate(cpu):\n cpu_dict[\"core\" + str(i)] = core\n ram = psutil.virtual_memory()._asdict()\n swap = psutil.swap_memory()._asdict()\n disk = psutil.disk_usage('/')._asdict()\n data = {\"stats\": [{\"cpu_parameters\": cpu_dict}, {\"ram_parameters\": ram}, {\"swap_parameters\": swap},\n {\"disk_parameters\": disk}]}\n return json.dumps(data, indent=4, sort_keys=True)\n\n\ndef monitor_json(request):\n json_file = StringIO.StringIO()\n json_file.write(generate_json())\n response = HttpResponse(content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=json_stats'\n response.write(json_file.getvalue())\n return response\n\n\ndef clear_database(request):\n Cpu.objects.all().delete()\n Ram.objects.all().delete()\n Swap.objects.all().delete()\n Measure.objects.all().delete()\n Core.objects.all().delete()\n return render(request, 'monitors/hw.html')\n\n\ndef chart_list(request):\n measures_data = Measure.objects.filter(owner=request.user).order_by('measure_date')\n cpus_data = [Cpu.objects.get(pk=m.user_cpu.pk) for m in measures_data]\n ram_data = [Ram.objects.get(pk=m.user_ram.pk) for m in measures_data]\n swap_data = [Swap.objects.get(pk=m.user_swap.pk) for m in measures_data]\n\n ram_load_used_data = [r.ram_load_used for r in ram_data]\n ram_load_free_data = [r.ram_load_free for r in ram_data]\n\n swap_load_total_data = [s.swap_load_total for s in swap_data]\n swap_load_used_data = [s.swap_load_used for s in swap_data]\n swap_load_free_data = [s.swap_load_free for s in swap_data]\n\n measures_dates = [m.measure_date for m in measures_data]\n\n cores_data = []\n\n for cpu in cpus_data:\n temp_cores = []\n for core in Core.objects.filter(cpu_id=cpu.pk):\n temp_cores.append(core.core_load)\n cores_data.append(temp_cores)\n\n return render(request, 'monitors/charts.html',\n {'cores': json.dumps(cores_data),\n 'ram_free': json.dumps(ram_load_free_data),\n 'ram_used': json.dumps(ram_load_used_data),\n 'swap_total': json.dumps(swap_load_total_data),\n 'swap_used': json.dumps(swap_load_used_data),\n 'swap_free': json.dumps(swap_load_free_data),\n 'measures': json.dumps(measures_dates, default=json_serial, ensure_ascii=False)})\n","sub_path":"monitors/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"49534517","text":"from django.conf import settings\nimport os\nfrom datetime import datetime, date, time\nfrom lxml import etree, objectify\nimport uuid\nimport glob\nfrom zipfile import ZipFile\nimport datetime\nimport time\nimport dateutil.parser\nfrom decimal import Decimal\nfrom cost_cadastr import xmlfirload, xmllistcreate\nimport requests\nimport shutil\nfrom lxml import etree, objectify\n\ndef validateXML(xmlfile):\n \"\"\"\n проверка на соовтетствие XML-схеме\n \"\"\"\n try:\n xml_doc = etree.parse(xmlfile)\n except Exception as e:\n raise Exception(\"Ошибка парсинга XML-файла {0}\".format(xmlfile))\n else:\n return True\n xml_schema_filename = os.path.normcase(os.path.join(settings.STATICFILES_DIRS[0], \n 'scheme/ListForRating_v04/', 'ListForRating_v04.xsd '))\n xml_schema_doc = etree.parse(xml_schema_filename)\n xmlschema = etree.XMLSchema(xml_schema_doc)\n if xmlschema.validate(xml_doc):\n return True\n else:\n return False\n#--------------------------\ndef chekfiles(fileName, zuoptionslist, oksoptionslist, loadmifoption):\n \"\"\"\n проверка файлана условия соответствия критериям выбранных объектов\n \"\"\"\n try:\n xml_doc = etree.parse(fileName)\n except Exception as e:\n raise Exception(\"Ошибка парсинга XML-файла {0}\".format(fileName))\n else:\n objecttype = xml_doc.xpath('/ListForRating/ListInfo/ObjectsType/ObjectType')[0].text\n #если объект ОКС проверяем на вхождение в список указанных пользователем\n if objecttype in oksoptionslist:\n cadnums = []\n if objecttype == '002001002000':\n buildingNodes = xml_doc.xpath('/ListForRating/Objects/Buildings/Building')\n for building in buildingNodes:\n cadnums.append(building.get('CadastralNumber'))\n if loadmifoption:\n xmllistcreate.loadMifMid(os.path.normpath(os.path.dirname(fileName)), cadnums)\n elif objecttype == '002001004000':\n constrNodes = xml_doc.xpath('/ListForRating/Objects/Constructions/Construction')\n for constr in constrNodes:\n cadnums.append(constr.get('CadastralNumber'))\n if loadmifoption:\n xmllistcreate.loadMifMid(os.path.normpath(os.path.dirname(fileName)), cadnums)\n elif objecttype == '002001005000':\n unconstrNodes = xml_doc.xpath('/ListForRating/Objects/Uncompleteds/Uncompleted')\n for unconstr in unconstrNodes:\n cadnums.append(unconstr.get('CadastralNumber'))\n if loadmifoption:\n xmllistcreate.loadMifMid(os.path.normpath(os.path.dirname(fileName)), cadnums)\n elif objecttype == '002001003000':\n flatsNodes = xml_doc.xpath('/ListForRating/Objects/Flats/Flat')\n for flat in flatsNodes:\n cadnums.append(flat.get('CadastralNumber'))\n elif objecttype == '002001009000':\n carsNodes = xml_doc.xpath('/ListForRating/Objects/CarParkingSpaces/CarParkingSpace')\n for car in carsNodes:\n cadnums.append(car.get('CadastralNumber'))\n return len(cadnums), xml_doc.xpath('/ListForRating/ListInfo')[0].get('DateForm')\n #если объект ЗУ\n elif objecttype == '002001001000' and zuoptionslist:\n categories = xml_doc.xpath('//ListForRating/ListInfo/Categories/Category')\n chek_cat = False\n for item in categories:\n if item.text in zuoptionslist:\n chek_cat = True\n if not chek_cat:\n return False\n else: \n parcels = xml_doc.xpath('//ListForRating/Objects/Parcels/Parcel')\n count = int(xml_doc.xpath('//ListForRating/ListInfo/Quantity')[0].text)\n for item in parcels:\n item_category = item.xpath('./Category')[0].text\n if item_category not in zuoptionslist:\n item.getparent().remove(item)\n count -= 1\n if count > 0:\n xml_doc.xpath('//ListForRating/ListInfo/Quantity')[0].text = str(count)\n xml_doc.write(fileName, encoding='UTF-8')\n return count, xml_doc.xpath('/ListForRating/ListInfo')[0].get('DateForm')\n else:\n return False\n else:\n return False\n \n#------------------------------------\ndef convertList(filelist, zuoptionslist, oksoptionslist, loadmifoption):\n \"\"\"\n переформатирование перечня \n \"\"\"\n #распаковываем перечень\n try:\n xml_files_list = xmlfirload.exctractZip(filelist)\n except Exception as e:\n raise Exception(\"Ошибка распаковки архивного файла\")\n else:\n #создадим директорию для загрузки графики\n# if loadmifoption:\n# try:\n# normal_dir_path = os.path.normpath(os.path.dirname(xml_files_list[0]))\n# os.mkdir(os.path.normpath(normal_dir_path))\n# except:\n# raise Exception(\"Ошибка создания директории для загрузки MIF файлов\")\n \n totalobjectscount = 0\n datecreate = ''\n for item in xml_files_list:\n count = chekfiles(item, zuoptionslist, oksoptionslist, loadmifoption)\n if not count:\n os.remove(item)\n else:\n totalobjectscount += count[0]\n datecreate = count[1]\n out_dir = xmlfirload.createDir(settings.MEDIA_ROOT + '/cost_cadastr/temp')\n file_out = xmllistcreate.packToZIP2(out_dir, os.path.dirname(xml_files_list[0]))\n return totalobjectscount, file_out, datecreate\n \n\n","sub_path":"cost_cadastr/pskoload.py","file_name":"pskoload.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"81025200","text":"import copy\nimport datetime\nimport json\nimport logging\nimport os\nfrom operator import itemgetter\n\nfrom ourcup import basedir\nfrom ourcup.fixtures.country_codes import CountryCodes\n\nFIXTURES_JSON_FILE = os.path.join(basedir, 'ourcup', 'fixtures', 'data', 'world-cup-2019-matches.json')\n\nlogger = logging.getLogger(__name__)\n\n\nclass MatchPicker(object):\n\n def __init__(self):\n self._logger = logging.getLogger(__name__)\n self._json_file_path = FIXTURES_JSON_FILE\n self._fixtures = MatchPicker._process_matches(json.load(open(self._json_file_path)))\n self._logger.info(\"loading fixtures from \"+self._json_file_path)\n self._translator = CountryCodes()\n\n @staticmethod\n def _process_matches(raw_match_data):\n matches = []\n for m in raw_match_data:\n team1_alpha3 = m['home_team']['code']\n team2_alpha3 = m['away_team']['code']\n # skip matches that we don't know the teams of yet\n if (team1_alpha3 != 'TBD') and (team2_alpha3 != 'TBD'):\n match = {\n 'team1': team1_alpha3,\n 'team2': team2_alpha3,\n 'date': m['datetime'][0:10]\n }\n matches.append(match)\n return matches\n\n def by_population(self, country_pop_data):\n all_games = copy.deepcopy(self._fixtures)\n # change the score so US doesn't show up very much, commented out because USA isn't in it!\n # country_alpha3_to_pop_map['USA'] = 1\n country_alpha3_to_pop_map = {m['country']: m['population'] for m in country_pop_data}\n for game in all_games:\n logger.debug(\"{} vs. {}\".format(game['team1'], game['team2']))\n try:\n team1_pop = country_alpha3_to_pop_map[self._translator.getByFifaAlpha3(game['team1']).iso]\n except KeyError:\n logger.warning(\"Can't find population data for {}\".format(game['team1']))\n team1_pop = 0 # the country isn't our list of\n try:\n team2_pop = country_alpha3_to_pop_map[self._translator.getByFifaAlpha3(game['team2']).iso]\n except KeyError:\n logger.warning(\"Can't find population data for {}\".format(game['team2']))\n team2_pop = 0 # the country isn't our list of\n game['score'] = team1_pop + team2_pop\n\n prioritized_games = sorted(all_games, key=itemgetter('score'), reverse=True)\n for game in prioritized_games:\n game['team1Country'] = self._translator.getByFifaAlpha3(game['team1'])\n game['team2Country'] = self._translator.getByFifaAlpha3(game['team2'])\n game['date'] = datetime.datetime.strptime(game['date'], '%Y-%m-%d')\n return prioritized_games\n\n def participating_country_codes(self):\n team1_codes = set([game['team1'] for game in self._fixtures])\n team2_codes = set([game['team2'] for game in self._fixtures])\n combined = team1_codes.union(team2_codes)\n return combined\n","sub_path":"ourcup/fixtures/match_picker.py","file_name":"match_picker.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"71377752","text":"#!/usr/bin/env python3\n#you need this above to get ROS to execute the script - points to the python executable\n#use python 3 here due to the RL libraries being written in python 3\n\n'''\nThis script -----------\n\nAuthor: Charles (Chuck) Sullivan\nLast update: 9-13-2020\n'''\n\n\nfrom gym import spaces\nimport gym\nimport numpy as np\nimport time\nimport random\nfrom math import sqrt\nimport datetime\nimport os\nimport pprint\nimport sys\n\n#import necessary stable baselines TD3 libraries for learning purposes\nfrom stable_baselines import TD3\nfrom stable_baselines.td3.policies import MlpPolicy as Td3MlpPolicy\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, BaseCallback\nfrom stable_baselines.common.evaluation import evaluate_policy\n#import ROS specific libraries and custom message types\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Empty\nfrom rl_robotics_framework.msg import sensor_processing\nfrom rl_robotics_framework.msg import gcode_packager\nfrom rl_robotics_framework.msg import apriltag_data\n\n\nmodel_dir = str(sys.argv[1])\nprint (model_dir)\n\npp = pprint.PrettyPrinter(indent = 4)\n\n#global variables\ntestString = \"\"\n\nxCommand = 0.\nyCommand = 0.\n#sensor readings\nxState_global = 0.\nyState_global = 0.\n#apriltag readings\nxPos_global = 0.\nyPos_global = 0.\n\nxZero = 0. #zero values for each episode\nyZero = 0.\n\naction_done = False\naction_done_flag = False\n\n#define constants\nNUM_STEPS_EPISODE = 24 #index at 0 - (desirednum - 1)\nTOTAL_STEPS = 8000\nTIME_PER_STEP = 1 #this could be variable depending on hardware\nrobotName = \"robot_1\"\nNOISE_CONSTANT = .03 #how do i update this inside the TD3.learn function\nSIGMA = .44\n#just going to put this at a rate large enough taht we liikkely wont get hardware hangups\n\n#here are the constants for the TD3 Model\nGAMMA = 0.99\nLEARNING_RATE = .05\nBUFFER_SIZE = 25000\nLEARNING_STARTS = 1000 #1024\nGRADIENT_STEPS = 3\nBATCH_SIZE = 64 #512\nTRAIN_FREQ = 64 #64\nTAU = .05\nPOLICY_DELAY = 25 #32\n# ACTION_NOISE = None | this is set later when you build the noise generator\nTARGET_POLICY_NOISE = 0.1\nTARGET_NOISE_CLIP = 0.25\nRANDOM_EXPLORATION = 0.0\nVERBOSE = 0\nTENSORBOARD_LOG = \"./logs/model_log\"\n_INIT_SETUP_MODEL = True\nPOLICY_KWARGS = None\nFULL_TENSORBOARD_LOG = 'tensorboadr_full'\nSEED = None\nN_CPU_TF_SESS = None\n\n#set up a log file for the printed output\nbaseDir = 'logs/terminal_output' + robotName + '/learining_run_TD3_'\n\nt = datetime.datetime.now()\nt = t.strftime(\"%m_%d_%y_%X/\")\ndirName = baseDir + t \nif not os.path.exists(dirName):\n\tos.makedirs(dirName)\n\tprint(dirName + \" created successfully\")\n\tdone = True\nelse:\n\tprint(dirName + \" exists\")\n\t\n\t\t\nfile = open(os.path.join(dirName, \"learn.txt\"), \"w\")\nfile.close()\n\n\n# define ROS publisher nodes\ncmd_pub = rospy.Publisher('/actuator_commands', gcode_packager, queue_size = 30)\ndirect_cmd_publisher = rospy.Publisher('/grbl_commands', String, queue_size = 30)\ngrbl_reset_pub = rospy.Publisher('/grbl_rst', Empty, queue_size=1)\n#test pub to update the subscribed node. for testing\ntestpub = rospy.Publisher('/testData', String, queue_size = 20)\n\n#define ROS subscriber callback methods\ndef robot_state_callback(data):\n\t#print(\"Updating State\")\n\tglobal xState_global\n\tglobal yState_global\n\txState_global = data.xSensor\n\tyState_global = data.ySensor\n\t#print(\"x sensor: {}\", format(xState))\n\t#print(\"y sensor: {}\", format(yState))\t\n\ndef gnd_truth_callback(data):\n\t#print(\"updating Ground Truth Data\")\n\tglobal xPos_global, yPos_global\n\txPos_global = data.x_pos_gnd*1000\n\tyPos_global = data.y_pos_gnd*1000\n\t#print(\"x position: {}\", format(xPosition))\n\t#print(\"y position: {}\", format(yPosition))\n\ndef action_done_callback(data):\n\tglobal action_done\n\taction_done = data.data\n\ndef testData_callback(data):\n\t#print(\"test data recieved\")\n\tglobal testString\n\ttestString = data.data + '\\n'\n\t\n\n\n#Define ROS subscriber nodes\ndef RL_subscribers():\n rospy.init_node('learning_node')\n rospy.Subscriber(\"/robot_state\", sensor_processing, robot_state_callback)\n rospy.Subscriber(\"/gnd_pos_truth\", apriltag_data, gnd_truth_callback)\n rospy.Subscriber(\"/action_done\", Bool, action_done_callback)\n rospy.Subscriber(\"/testData\", String, testData_callback)\n\n # skipping spin to see if the subscriber works without spinnign whle the thin is running\n #rospy.spin()\n\t# \n\t# \n#Define various functions used throughout script\t\n\ndef rewardCalculation(x_current, y_current, x_startstep, y_startstep):\n\t# xDist = x_current - x_startstep\n\tyDist = y_current - y_startstep\n\n\t# reward = yDist\n\n\tif yDist > .5:\n\t\t#movement positive\n\t\treward = 10*yDist\n\telif yDist <= .5 and yDist >=-.5:\n\t\t#movement doesnt change much?\n\t\t#this discourages the same movement\n\t\treward = -100\n\telse:\n\t\treward = yDist*2\n\n\treturn reward\n\ndef wait_for_action(is_homing):\n\tglobal action_done\n\tcount = 0\n\ttimeout = 10 #seconds\n\tif is_homing:\n\t\ttimeout = 20\n\tstart = time.time()\n\twhile not action_done:\n\t\tcount = count + 1\n\t\t# Sometimes if the commands are too close the system completes the action so fast the system doesnt get\n\t\t# a chance to register it. no action should take more than 10 seconds so if greater than that assume the \n\t\t# action is done\n\t\tif ((time.time()-start) > timeout): #this is 10 seconds\n\t\t\taction_done = True\n\t\t\n\t\t\n\t\t\n\tprint(\"\\t--Action Complete--\\t|\")\n\ttime.sleep(1) #this ensures things tont compute too quickly and action done works for the rest of the episode meaning only one action complete\n\n# def screenCmdData(generated, xPrev, yPrev):\n# \tglobal step_count\n# \tcheckRange = 1\n# \tchangeAmount = 1.5\n\t\n\t\n# \txGen = generated[0]\n# \tyGen = generated[1]\n\n\t\n\n# \t#find difference between generaged and previous command\n# \txDiff = xGen-xPrev\n# \tyDiff = yGen-yPrev\n\n# \t#a for loop would be better/more scalable here but i just want to see if it works\n\n# \tif (abs(xDiff) < checkRange):\n# \t\tif xGen > (100. - changeAmount): #this would result in a greator than 100 command\n# \t\t\txScreened = xGen - random.uniform(2,5)\n# \t\telse:\n# \t\t\txScreened = xGen + random.uniform(2,5)\n# \telse:\n# \t\txScreened = xGen\n\n# \tif (abs(yDiff) < checkRange):\n# \t\tif yGen > (100. - changeAmount): #this would result in a greator than 100 command\n# \t\t\tyScreened = yGen - random.uniform(2,5)\n# \t\telse:\n# \t\t\tyScreened = yGen + random.uniform(2,5)\n# \telse:\n# \t\tyScreened = yGen\n\n# \t# print ('\\tScreening command data -- xCmd: ' +str(xScreened) + '\\t yCmd: ' + str(yScreened))\n\n\n\n# \treturn [xScreened, yScreened]\n\n\ndef homeGrblController():\n\tprint(\"Homing System....\")\n\tdirect_cmd_publisher.publish('$h')\n\twait_for_action(True)\n\ttime.sleep(.5)\n\tdirect_cmd_publisher.publish('G92 X0 Y0')\n\tprint(\"Homing Complete - moving to robot 0 state\")\n\t# cmd_message = gcode_packager()\n\t#publish action\n\t# cmd_message.x_percentage = 0.\n\t# cmd_message.y_percentage = 0.\n\t# # time.sleep(1)\n\t# cmd_pub.publish(cmd_message)\n\t# wait_for_action(False)\n\ndef initGrblController():\n\tprint(\"Initializing Grbl System...\")\n\tdirect_cmd_publisher.publish('$X')\n\thomeGrblController()\n\ndef resetGrblController():\n\tprint(\"Resetting grbl controller...\")\n\tmsg = Empty()\n\tgrbl_reset_pub.publish(msg)\n\ttime.sleep(4) #wait for reset to take plae\n\tinitGrblController()\n\tprint(\"grbl controller reset!\")\n\ndef homeRobot():\n\tprint(\"Sending Robot Home\")\n\tdirect_cmd_publisher.publish('G0 X0 Y0')\n\twait_for_action()\n\tprint(\"Robot Home\")\n\t\n\n\nclass soft_learner():\n\tdef __init__(self):\n\t\t#init position variables\n\t\tself.testS = \"\" #test string for testing data passing\n\t\tself.xCmd = 0\n\t\tself.yCmd = 0\n\t\tself.xCmdPrev = 0\n\t\tself.yCmdPrev = 0\n\t\tself.xPos = 0\n\t\tself.yPos = 0\n\t\tself.xPosPrev = 0\n\t\tself.yPosPrev = 0\n\t\tself.xZero = 0\n\t\tself.yZero = 0\n\t\tself.xState = 0\n\t\tself.yState = 0\n\t\tself.xStatePrev = 0\n\t\tself.yStatePrev = 0\n\t\tself.xZero = 0 \n\t\tself.yZero = 0\n\n\n\t\tself.log_file = None\n\n\t\tself.state = np.array([0,0])\n\t\tself.statePrev = np.array([0,0])\n\n\t\tself.FirstCommand = True\n\n\t\tself.TotalStepCount = 0\n\t\tself.TotalEpisodeCount = 0\n\t\t#init steps and dt (time per step)\n\t\tself.n_steps = 0\n\t\tself.dt = TIME_PER_STEP #1=1second - play with this variable\n\t\t#initialize proper spaces and metadata \n\t\t#both the action and state space are bounded by 0-100 for %of actuation\n\t\t#mapping these to real world actiona dnnand sensors are handeled in another script\n\t\t# self.observation_space = spaces.Box(low=np.array([0.,0.]), high=np.array([100.,100.])) #obs space = continuous, \n\t\t\n\t\t#remapping observations -1 to 1 so debug \"convergence problem\"\n\t\tself.observation_space = spaces.Box(low=np.array([-1.,-1.]), high=np.array([1.,1.])) #obs space = continuous, \n\n\t\tself.action_space = spaces.Box(low=np.array([-1.,-1.]), high=np.array([1.,1.]))\n\t\t\n\t\tself.metadata = 0\n\n\t\t#send initial commands to grbl (home, set 0 all that)\n\n\tdef reset(self):\n\t\tglobal xZero, yZero, xPos_global, yPos_global\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.n_steps = 0\n\t\tself.reward = 0\n\n\t\tself.xCmd = 0\n\t\tself.yCmd = 0\n\t\tself.xCmdPrev = 0\n\t\tself.yCmdPrev = 0\n\t\tself.xPos = 0\n\t\tself.yPos = 0\n\t\tself.xPosPrev = 0\n\t\tself.yPosPrev = 0\n\t\tself.xZero = 0\n\t\tself.yZero = 0\n\t\tself.xState = 0\n\t\tself.yState = 0\n\t\tself.xStatePrev = 0\n\t\tself.yStatePrev = 0\n\n\t\tself.FirstCommand = True\n\n\t\t\n\t\t#reset grbl controller\n\t\tresetGrblController()\n\t\tprint('==== BEGINNING EPISODE ' + str(self.TotalEpisodeCount) + ' ====')\n\n\t\t#re calibrate and set x and y zero values for each new run for april tags data\n\t\tself.xZero = xPos_global\n\t\tself.yZero = yPos_global\n\n\t\txReturn = self.x/50-1\n\t\tyReturn = self.y/50-1\n\n\t\t#run calibration function here\n\t\treturn xReturn, yReturn\n\n\tdef step(self, generated_cmd_array):\n\t\t\n\t\tglobal xPos_global, yPos_global, xState_global, yState_global\n\t\t\n\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\tself.log_file = f\n\n\t\txPos_startstep = xPos_global #try to take a 0 so every stpe knows where it starts and us it to find reward\n\t\tyPos_startstep = yPos_global\n\t\t\n\t\tprint('---------------------| Total Steps: ' + str(self.TotalStepCount) + ' | Episode: ' + str(self.TotalEpisodeCount) + ' | Episode Step: ' + str(self.n_steps) + ' |-----------------------')\n\t\tself.log_file.write('---------------------| Total Steps: ' + str(self.TotalStepCount) + ' | Episode: ' + str(self.TotalEpisodeCount) + ' | Episode Step: ' + str(self.n_steps) + ' |-----------------------\\n')\n\n\t\tgenerated_cmd_array = np.clip(generated_cmd_array, self.action_space.low, self.action_space.high)\n\n\t\t#declare message type\n\t\tcmd_message = gcode_packager()\n\t\t\n\t\t#preprocess generated commands to make sure they are sufficiently far enough away from last command to not freeze system and within 0-100\n\t\t# screened_cmd_array = screenCmdData(generated_cmd_array, self.xCmdPrev, self.yCmdPrev)\n\t\t# self.xCmd = screened_cmd_array[0]\n\t\t# self.yCmd = screened_cmd_array[1]\n\n\t\tself.xCmd = (generated_cmd_array[0]+1)*50\n\t\tself.yCmd = (generated_cmd_array[1]+1)*50\n\n\t\t# self.yCmd = generated_cmd_array[1]\n\t\tprint(\"\\tCommand Generated\\t| \\t xCmd: %6.3f \\t yCmd: %6.3f\" %(self.xCmd, self.yCmd))\n\t\tself.log_file.write(\"\\tCommand Generated\\t| \\t xCmd: %6.3f \\t yCmd: %6.3f\\n\" %(self.xCmd, self.yCmd))\n\t\t#publish action\n\t\tcmd_message.x_percentage = self.xCmd\n\t\tcmd_message.y_percentage = self.yCmd\n\t\t# time.sleep(1)\n\t\tcmd_pub.publish(cmd_message)\n\t\tprint(\"\\t--Command Sent --\\t|\")\n\t\t\n\t\t\n\n\t\t#wait for hardware to complete action\n\t\twait_for_action(False)\n\t\t# print(\"\\t--Action Complete--\\t|\")\n\t\t# print(\"\\t--Done waiting --\\t|\")\n\t\t\n\t\t#subscribe/read state\n\t\tself.state = [xState_global, yState_global]\n\t\t# print(\"\\t--state data got --\\t|\")\n\t\tself.xState = self.state[0]\n\t\t# print(\"\\t--state 0 set --\\t|\")\n\t\tself.yState = self.state[1]\n\t\t# print(\"\\t--state 1 set --\\t|\")\n\t\tprint(\"\\tState Information\\t| \\t xState: %6.3f \\t yState: %6.3f\" %(self.xState, self.yState))\n\t\tprint(\"\\t \\t| \\t xSPrev: %6.3f \\t ySPrev: %6.3f\" %(self.xStatePrev, self.yStatePrev))\n\t\tself.log_file.write(\"\\tState Information\\t| \\t xState: %6.3f \\t yState: %6.3f\\n\" %(self.xState, self.yState))\n\t\tself.log_file.write(\"\\t \\t| \\t xSPrev: %6.3f \\t ySPrev: %6.3f\\n\" %(self.xStatePrev, self.yStatePrev))\n\t\t#compute reward\n\t\t#use beginning of episode as zero\n\t\t# self.reward, self.xPos, self.yPos = rewardCalculation(xPos_global, yPos_global, self.xZero, self.yZero, self.xPosPrev, self.yPosPrev)\n\t\t#use each steps starting position to find reward\n\t\tself.reward = rewardCalculation(xPos_global, yPos_global, xPos_startstep, yPos_startstep)\n\t\t\n\n\t\tself.xPos = xPos_global\n\t\tself.yPos = yPos_global\n\n\t\tprint(\"\\tPosition Information\\t| \\t xPos: %6.3f \\t yPos: %6.3f\" %(self.xPos, self.yPos))\n\t\tprint(\"\\t \\t| \\txPosPrev: %6.3f \\tyPosPrev: %6.3f\" %(self.xPosPrev, self.yPosPrev))\n\t\tprint(\"\\t \\t| \\t xZero: %6.3f \\t yZero: %6.3f\" %(self.xZero, self.yZero))\n\t\tprint(\"\\tReward Information \\t| \\t Reward: %6.3f\" %(self.reward))\n\t\tself.log_file.write(\"\\tPosition Information\\t| \\t xPos: %6.3f \\t yPos: %6.3f\\n\" %(self.xPos, self.yPos))\n\t\tself.log_file.write(\"\\t \\t| \\txPosPrev: %6.3f \\tyPosPrev: %6.3f\\n\" %(self.xPosPrev, self.yPosPrev))\n\t\tself.log_file.write(\"\\t \\t| \\t xZero: %6.3f \\t yZero: %6.3f\\n\" %(self.xZero, self.yZero))\n\t\tself.log_file.write(\"\\tReward Information \\t| \\t Reward: %6.3f\\n\" %(self.reward))\n\n\t\t#assign all current data to previous data containers for next state\n\t\tself.xStatePrev = self.xState\n\t\tself.yStatePrev = self.yState\n\t\tself.xCmdPrev = self.xCmd\n\t\tself.yCmdPrev = self.yCmd\n\t\tself.xPosPrev = self.xPos\n\t\tself.yPosPrev = self.yPos\n\n\t\t#increment and finish step\n\t\tself.TotalStepCount = self.TotalStepCount + 1\n\t\t# step_count = step_count +1\n\t\t# print (\"step count: {}\", format(step_count))\n\t\t\n\t\tself.n_steps += 1\n\t\tif self.n_steps > NUM_STEPS_EPISODE:\n\t\t\tself.TotalEpisodeCount = self.TotalEpisodeCount + 1\n\t\t\tprint('====END OF EPISODE====')\n\t\t\tself.log_file.write('====END OF EPISODE====\\n')\n\t\tdone = self.n_steps > NUM_STEPS_EPISODE\n\n\t\tself.log_file.close()\n\t\t\n\t\tleft_data = open(os.path.join(dirName,\"left_data.txt\"), \"a\")\n\t\tleft_data.write(str(self.xCmd) + '\\n')\n\t\tleft_data.close()\n\t\tright_data = open(os.path.join(dirName,\"right_data.txt\"),\"a\")\n\t\tright_data.write(str(self.yCmd) + '\\n')\n\t\tright_data.close()\n\n\t\t#remap states from -1 to 1 - this was after much debugging\n\t\tself.state[0] = (self.state[0]/50)-1\n\t\tself.state[1] = (self.state[1]/50)-1\n\t\treturn self.state, self.reward, done, {}\n\n\nclass customCallback(BaseCallback):\n\t\"\"\"\n\tA custom callback that derives from ``BaseCallback``.\n\n\t:param verbose: (int) Verbosity level 0: not output 1: info 2: debug\n\t\"\"\"\n\n\tdef __init__(self, verbose=0):\n\t\tsuper(customCallback, self).__init__(verbose)\n\t\t# Those variables will be accessible in the callback\n\t\t# (they are defined in the base class)\n\t\t# The RL model\n\t\t# self.model = None # type: BaseRLModel\n\t\t# An alias for self.model.get_env(), the environment used for training\n\t\t# self.training_env = None # type: Union[gym.Env, VecEnv, None]\n\t\t# Number of time the callback was called\n\t\t# self.n_calls = 0 # type: int\n\t\t# self.num_timesteps = 0 # type: int\n\t\t# local and global variables\n\t\t# self.locals = None # type: Dict[str, Any]\n\t\t# self.globals = None # type: Dict[str, Any]\n\t\t# The logger object, used to report things in the terminal\n\t\t# self.logger = None # type: logger.Logger\n\t\t# # Sometimes, for event callback, it is useful\n\t\t# # to have access to the parent object\n\t\t# self.parent = None # type: Optional[BaseCallback]\n\t\tself.startTime = None\n\t\tself.endTime = None\n\n\tdef _on_training_start(self) -> None:\n\t\t\"\"\"\n\t\tThis method is called before the first rollout starts.\n\t\t\"\"\"\n\t\tself.startTime = time.time()\n\t\tprint(\"Begin training\")\n\t\tpass\n\n\tdef _on_rollout_start(self) -> None:\n\t\t\"\"\"\n\t\tA rollout is the collection of environment interaction\n\t\tusing the current policy.\n\t\tThis event is triggered before collecting new samples.\n\n\t\t\"\"\"\n\n\t\tprint(\"\\t--Rollout Strt --\\t|\")\n\t\tpass\n\n\tdef _on_step(self) -> bool:\n\t\t\"\"\"\n\t\tThis method will be called by the model after each call to `env.step()`.\n\n\t\tFor child callback (of an `EventCallback`), this will be called\n\t\twhen the event is triggered.\n\n\t\t:return: (bool) If the callback returns False, training is aborted early.\n\t\t\"\"\"\n\t\tglobal SIGMA, LEARNING_RATE\n\t\tif (self.num_timesteps % 1000) ==0:\n\t\t\t# import pdb; pdb.set_trace()\n\t\t\tt = time.time()\n\t\t\ttime_elapsed = t-self.startTime #seconds\n\t\t\tself.model.save(\"td3_model_int_test\")\n\t\t\tSIGMA = SIGMA*.9\n\t\t\t# LEARNING_RATE = LEARNING_RATE*.9\n\t\t\tprint(\"---------\" + str(self.num_timesteps) +\" steps complete | SIGMA = \" + str(SIGMA) + \" | Learning Rate: \" + str(LEARNING_RATE) + \"|----------\")\n\t\t\tprint(\"---------------Time Elapsed: \" + str(time_elapsed) + \" seconds\")\n\t\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\t\tf.write(\"---------\" + str(self.num_timesteps) +\" steps complete | SIGMA = \" + str(SIGMA) + \" | Learning Rate: \" + str(LEARNING_RATE) + \"|----------\\n\")\n\t\t\tf.write(\"--------- Time Elapsed: \" + str(time_elapsed) + \" seconds -----------\\n\")\n\t\t\tf.close()\n\t\t\t\n\t\t\tself.model.action_noise = NormalActionNoise(0,SIGMA) #annealed noise\n\t\t\t# self.model.learning_rate = LEARNING_RATE\n\t\t\t# td3_noise = OrnsteinUhlenbeckActionNoise(np.zeros(a_dim), sigma*np.ones(a_dim)) \n\t\t\t\n\n\t\tprint(\"\\t--Step Done --\\t|\")\n\t\tif yPos_global > 200:\n\t\t\tinput(\"Please reset the robot to start and press enter key to continue..\")\n\n\t\treturn True\n\n\tdef _on_rollout_end(self) -> None:\n\t\t\"\"\"\n\t\tThis event is triggered before updating the policy.\n\t\t\"\"\"\n\t\tprint(\"\\t--Updte Ploicy --\\t|\")\n\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\tf.write(\"\\t--Updte Ploicy --\\t|\\n\")\n\t\tf.close()\n\t\tpass\n\n\tdef _on_training_end(self) -> None:\n\t\t\"\"\"\n\t\tThis event is triggered before exiting the `learn()` method.\n\t\t\"\"\"\n\t\tself.endTime = time.time()\n\t\ttime_elapsed = (self.endTime - self.startTime)/60 #minutes\n\t\tavg = self.num_timesteps/time_elapsed\n\t\tprint(\"\\t--Train Complt --\\t|\")\n\t\tprint(\"\\t elapsed time: \" + str(time_elapsed) + \" min\\tavg tiem/step: \" + str(avg) + \" sec\")\n\n\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\tf.write(\"\\t--Train Complt --\\t|\\n\")\n\t\tf.write(\"\\t--Elapsed time --\\t| \" + str(time_elapsed) + \" min\\tavg Time/Step: \" + str(avg) + \" sec\\n\")\n\t\tf.close()\n\t\tpass\n\n# Use deterministic actions for evaluation\n\n\n\nif __name__ == '__main__':\n\t\n\t#run suscriber nodes\n\tRL_subscribers()\n\tprint(\"Starting...\")\n\n\ttime.sleep(3) #give ros time to set up\n\n\t#init environmnet\n\tenv = soft_learner()\n\tenv = DummyVecEnv([lambda: env])\n\t\n\tprint('done')\n\n\tmodel = TD3.load(model_dir)\n\t\n\t# Evaluate the agentm\n\t# mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)\n\n\tobs = env.reset()\n\tfor i in range(1000):\n\t\taction, _states = model.predict(obs)\n\t\tobs, rewards, dones, info = env.step(action)\n\n\t# a_dim = env.action_space.shape[0]\n\t# # td3_noise = OrnsteinUhlenbeckActionNoise(np.zeros(a_dim), .9*np.ones(a_dim)) \n\t# td3_noise = NormalActionNoise(0,SIGMA)\n\t# td3_env = DummyVecEnv([lambda: env])\n\t# # td3_env = env\n\t\n\t# checkpoint_on_event = CheckpointCallback(save_freq=1000, save_path= \"./logs/model_checkpoints\",\n # name_prefix='rl_model')\n\t# event_callback = EveryNTimesteps(n_steps=500, callback=checkpoint_on_event)\n\n\t# eval_callback = EvalCallback(td3_env, best_model_save_path='./logs/',\n # log_path='./logs/', eval_freq=100,\n # deterministic=True, render=False)\n\n\t\n\n\t# # td3_model.learning_starts = 100\n\t\n\t# custom_callback = customCallback(verbose=0)\n\t# callback = CallbackList([custom_callback, checkpoint_on_event])\n\n\t# td3_model = TD3(Td3MlpPolicy, td3_env,\n\t# \t\t\t\tgamma = GAMMA,\n\t# \t\t\t\tlearning_rate = LEARNING_RATE,\n\t# \t\t\t\tbuffer_size = BUFFER_SIZE,\n\t# \t\t\t\tlearning_starts = LEARNING_STARTS,\n\t# \t\t\t\ttrain_freq = TRAIN_FREQ,\n\t# \t\t\t\tgradient_steps = GRADIENT_STEPS,\n\t# \t\t\t\tbatch_size = BATCH_SIZE,\n\t# \t\t\t\ttau = TAU,\n\t# \t\t\t\tpolicy_delay = POLICY_DELAY,\n\t# \t\t\t\taction_noise = td3_noise,\n\t# \t\t\t\ttarget_policy_noise = TARGET_POLICY_NOISE,\n\t# \t\t\t\ttarget_noise_clip = TARGET_NOISE_CLIP,\n\t# \t\t\t\trandom_exploration = RANDOM_EXPLORATION,\n\t# \t\t\t\tverbose = VERBOSE,\n\t# \t\t\t\ttensorboard_log = TENSORBOARD_LOG,\n\t# \t\t\t\t_init_setup_model = _INIT_SETUP_MODEL,\n\t# \t\t\t\tpolicy_kwargs = POLICY_KWARGS,\n\t# \t\t\t\tfull_tensorboard_log = FULL_TENSORBOARD_LOG,\n\t# \t\t\t\tseed = SEED,\n\t# \t\t\t\tn_cpu_tf_sess = N_CPU_TF_SESS)\n\n\t# #every x episodes fun the model for y amount of episodes and evaluate it\n\t\n\t# td3_model.learn(total_timesteps = TOTAL_STEPS, callback=callback)\n\t# td3_model.save(\"td3_model\")\n\n\n\t# print(\"learning complete\")\n\n\n\n\n\n","sub_path":"src/rl_robotics_framework/src/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":20717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"369802227","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\npub_columns_filter.py -- add needed columns, remove unused columns\n\"\"\"\n\n__author__ = \"Alex Loiacono and Nicholas Rejack\"\n__copyright__ = \"Copyright 2016 (c) Alex Loiacono and Nicholas Rejack\"\n__license__ = \"New BSD License\"\n__version__ = \"0.01\"\n\nfrom vivopump import read_csv_fp, write_csv_fp, get_parms, vivo_query\nimport sys\nimport utils\nimport time\n\ndef get_vivo_academic_articles(parms):\n \"\"\"\n Query VIVO and return a list of all the academic articles.\n @see uf_examples/publications/filters/pub_match_filter.py\n @see https://wiki.duraspace.org/display/VIVO/VIVO-ISF+1.6+relationship+diagrams%3A+Authorship\n\n :param: parms: vivo_query params\n :return: dictionary of uri keyed by DOI\n \"\"\"\n query = \"\"\"\n SELECT\n ?uri ?doi\n WHERE {\n ?uri a vivo:InformationResource .\n ?uri bibo:doi ?doi .\n }\n \"\"\"\n results = vivo_query(query, parms)\n bindings = results['results']['bindings']\n doi_list = [b['doi']['value'] for b in bindings]\n uri_list = [b['uri']['value'] for b in bindings]\n return dict(zip(doi_list, uri_list))\n\ndate = time.strftime(\"%Y_%m_%d\")\n\nfile_name = 'vivo_author_list.csv'\nutils.print_err(\"Using static disambiguation file: {}\".format(file_name))\n\ndisamb_file = open('data_out/disambiguation_'+date+'.txt', 'w+')\n\nauthors_missing_pubs_file = open('data_out/authors_missing_pubs_'+date+'.txt', 'w+')\n\nauthors_missing_pubs_dict = {}\n\n# get dictionaries of authors keyed by name parts\nvivo_auth_disambig_data = utils.get_vivo_disambiguation_data_from_csv(\n file_name)\n\nparms = get_parms()\ndata_in = read_csv_fp(sys.stdin)\nutils.print_err(\"{} rows in the input\".format(len(data_in)))\n\ndata_out = {}\n# get dictionary of pub uri keyed by doi\nvivo_pubs = get_vivo_academic_articles(parms)\n\nutils.print_err('{} publications found in VIVO'.format(len(vivo_pubs)))\n# print >>sys.stderr, vivo_pubs\n\nrow_out = 1\n\ndisamb_dict = []\n\nfor row, data in data_in.items():\n\n if data['doi'] not in vivo_pubs:\n #data_out[row]['pub_uri'] = ''\n authors_missing_pubs_dict[row] = data\n continue\n\n data_out[row] = data\n\n utils.print_err(\"data is: \\n{}\".format(data))\n utils.print_err(\"row_out: {} ||| row: {}\".format(row_out,row))\n\n data_out[row]['pub_uri'] = vivo_pubs[data['doi']]\n\n if data['uf'] == 'false':\n # Always put in the non-UF author as new\n #row_out += 1\n data_out[row] = data\n data_out[row]['uri'] = ''\n #utils.print_err(\"UF entry is false {}\".format(row_index))\n else:\n author_uris = utils.get_author_disambiguation_data(\n vivo_auth_disambig_data,\n data['last'],\n data['first'],\n data['middle'])\n\n count = len(author_uris)\n utils.print_err(\"author_uris: {}\".format(author_uris))\n if count == 0:\n # There is no match in the current VIVO ==> add a new UF author\n #row_out += 1\n data_out[row] = data\n data_out[row]['uri'] = ''\n elif count == 1:\n data_out[row]['uri'] = author_uris[0]\n else:\n utils.print_err(\"Disamb: {}\".format(author_uris))\n data_out[row]['uri'] = author_uris[0]\n disamb_dict.append(\"Paper: {} -- written by {} has uris : \\n{}\\n\\n\".format(data['pub_uri'], data['display_name'], author_uris))\n row_out += 1\n\nutils.print_err('{} rows in the output'.format(len(data_out)))\n\nfor line in disamb_dict:\n disamb_file.write(line)\n\ndisamb_file.close()\n\nwrite_csv_fp(authors_missing_pubs_file, authors_missing_pubs_dict)\nwrite_csv_fp(sys.stdout, data_out)","sub_path":"uf_examples/publications/filters/author_pubs_match_filter.py","file_name":"author_pubs_match_filter.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"332945721","text":"import hashlib\nimport logging\n\nimport utils\nfrom settings import CAPTURE_DEVICE, SSD_FPN_OPENVINO_CLASSES_PATH, \\\n SSD_FPN_OPENVINO_INFO_PATH, SSD_FPN_OPENVINO_WEIGHTS_PATH, SSD_FPN_OPENVINO_CONFIG_PATH, \\\n SSD_FPN_OPENVINO_DIMENSIONS_PATH, SSD_FPN_OPENVINO_CLASSES_PATH_CPU, \\\n SSD_FPN_OPENVINO_INFO_PATH_CPU, SSD_FPN_OPENVINO_WEIGHTS_PATH_CPU, SSD_FPN_OPENVINO_CONFIG_PATH_CPU, \\\n SSD_FPN_OPENVINO_DIMENSIONS_PATH_CPU, PARALLEL_IMAGES\n\nlogger = logging.getLogger(__name__)\n\n\nclass ModelService:\n\n def __init__(self, model_arch, run_on_cpu=False):\n if model_arch == 'ssd_fpn_openvino':\n from model.ssd_openvino_detector import BinDetectorOpenVino\n\n if run_on_cpu:\n logger.info(\"Running on CPU\")\n self.classes = utils.parse_classes_file(SSD_FPN_OPENVINO_CLASSES_PATH_CPU)\n model_info = utils.parse_info_file(SSD_FPN_OPENVINO_INFO_PATH_CPU)\n weights_path = SSD_FPN_OPENVINO_WEIGHTS_PATH_CPU\n config_path = SSD_FPN_OPENVINO_CONFIG_PATH_CPU\n dimension_path = SSD_FPN_OPENVINO_DIMENSIONS_PATH_CPU\n else:\n logger.info(\"running on VPU\")\n self.classes = utils.parse_classes_file(SSD_FPN_OPENVINO_CLASSES_PATH)\n model_info = utils.parse_info_file(SSD_FPN_OPENVINO_INFO_PATH)\n weights_path = SSD_FPN_OPENVINO_WEIGHTS_PATH\n config_path = SSD_FPN_OPENVINO_CONFIG_PATH\n dimension_path = SSD_FPN_OPENVINO_DIMENSIONS_PATH\n\n h = ModelService.get_model_hash(weights_path)\n if 'FRAMEWORK_VERSION' in model_info:\n ModelService.check_framework('openvino', model_info['FRAMEWORK_VERSION'], 'inference_engine')\n if 'SHA1' in model_info:\n ModelService.check_hash(h, model_info['SHA1'])\n self.bifocal = model_info['BIFOCAL']\n self.model = BinDetectorOpenVino(config_path,\n weights_path,\n num_requests=PARALLEL_IMAGES * 2 if self.bifocal else PARALLEL_IMAGES,\n classes=self.classes,\n model_version=str(model_info['MODEL_VERSION']),\n threshold=model_info['THRESHOLD'],\n box_area_limit=model_info['BOX_AREA_LIMIT'],\n estimate_distance=True,\n dimensions_json=dimension_path,\n capture_device=CAPTURE_DEVICE,\n resize_h=349, resize_w=349,\n run_on_cpu=run_on_cpu)\n\n else:\n raise ValueError(\n \"Invalid model type identifier: \" + model_arch +\n \"Available formats are 'ssd_fpn_openvino'\"\n )\n logger.info(model_info)\n logger.info(\"Detector SHA1 %s\" % h)\n\n @staticmethod\n def add_object_coordinates(results, locations):\n def is_nan(n):\n try:\n float(n)\n return True\n except ValueError:\n return False\n\n for result, location in zip(results, locations):\n for r in result:\n if is_nan(location['lat']) or is_nan(location['long']) or is_nan(location['bearing']):\n lat, long = 0, 0 # we no longer support strings on this field, float only\n else:\n # adjust bearing of object relative to bearing of the truck\n bearing_obj = location['bearing'] + r['angle']\n if bearing_obj > 360:\n bearing_obj -= 360\n if bearing_obj < 0:\n bearing_obj += 360\n\n lat, long = utils.calculate_dpos(latitude=location['lat'], longitude=location['long'],\n head=bearing_obj,\n dist=r['distance'])\n\n r['latitude'], r['longitude'] = lat, long\n\n # Runs the provided image through the model, and returns an (image, result_dict) tuple\n def apply_model(self, images, locations):\n results = self.model.predict_on_image(images, bifocal=self.bifocal)\n self.add_object_coordinates(results, locations)\n return results\n\n # check the framework for correct version, if submodule name provided the version is checked for the submodule\n @staticmethod\n def check_framework(framework_name, required_version, submodule=None):\n framework = __import__(framework_name)\n if submodule:\n framework = framework.__dict__[submodule]\n if framework.__version__ != required_version:\n raise ValueError(\n \"Invalid framework version for {}. {} found, but {} is required\".format(\n framework_name, framework.__version__, required_version))\n\n @staticmethod\n def check_hash(computed_hash, info_hash):\n if computed_hash != info_hash:\n raise ValueError(\n \"Invalid model binary. Hash failed checked. Expected {} but {} was found\".format(\n info_hash, computed_hash))\n\n @staticmethod\n def get_model_hash(model_path):\n BUF_SIZE = 65536\n\n sha1 = hashlib.sha1()\n\n with open(model_path, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n sha1.update(data)\n\n return str(sha1.hexdigest())\n","sub_path":"pothole/map_scripts/model_service.py","file_name":"model_service.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"20765037","text":"class Node:\n def __init__(self, data, next1):\n self.data = data\n self.next = next1\n\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n self.size = 0\n\n def length(self):\n return self.size\n\n def is_empty(self):\n return self.size == 0\n\n def insert_at_the_beginning(self, data):\n self.insert_with_index(0, data)\n\n def insert_at_the_ending(self, data):\n self.insert_with_index(self.size, data)\n\n def insert_with_index(self, index, data):\n if index > self.size or index < 0:\n print(\"check given\", index, \"index value and enter again\")\n return False\n if index == 0:\n self.head = Node(data, self.head)\n else:\n current = self.head\n for i in range(index - 1):\n current = current.next\n current.next = Node(data, current.next)\n self.size += 1\n\n def peek_top(self):\n return self.peek_index(0)\n\n def peek_bottom(self):\n return self.peek_index(self.size - 1)\n\n def peek_index(self, index):\n if index >= self.size or index < 0:\n print(\"check given\", index, \"index value and enter again\")\n return False\n current = self.head\n for i in range(index):\n current = current.next\n return current.data\n\n def peek_element(self, data):\n current = self.head\n while current.data != data:\n if current.next is None:\n print(\"element\", data, \"not found\")\n return False\n current = current.next\n print(\"element\", data, \"is found\")\n return True\n\n def delete_top_element(self):\n return self.delete_with_index(0)\n\n def delete_bottom_element(self):\n return self.delete_with_index(self.size - 1)\n\n def delete_with_index(self, index):\n if index >= self.size or index < 0:\n print(\"check given\", index, \"index value and enter again\")\n return False\n self.size -= 1\n if index == 0:\n temp = self.head\n self.head = self.head.next\n return temp.data\n current = self.head\n for i in range(index - 1):\n current = current.next\n temp = current.next\n current.next = current.next.next\n return temp.data\n\n def delete_with_value(self, data):\n current = self.head\n previous = current\n while current.data != data:\n if current.next is None:\n print(\"element\", data, \"not found\")\n return False\n previous = current\n current = current.next\n temp = previous.next\n previous.next = current.next\n print(\"element\", data, \"is found and deleted\")\n self.size -= 1\n return temp.data\n\n def print_val(self):\n current = self.head\n while current:\n print(current.data, \"\\b--->\", end=\"\")\n current = current.next\n print()\n\n\nlinked_list = Linkedlist()\n\n\ndef trail1():\n linked_list.insert_at_the_beginning(45)\n linked_list.insert_at_the_beginning(65)\n linked_list.insert_at_the_beginning(34)\n linked_list.insert_at_the_beginning(56)\n linked_list.insert_at_the_beginning(78)\n linked_list.insert_at_the_beginning(98)\n linked_list.insert_at_the_beginning(63)\n linked_list.insert_at_the_beginning(31)\n linked_list.print_val()\n\n\ndef trail2():\n linked_list.insert_at_the_beginning(78)\n linked_list.insert_at_the_ending(67778)\n linked_list.insert_at_the_ending(899)\n linked_list.insert_at_the_ending(99)\n linked_list.print_val()\n trail1()\n\n\ndef trail3():\n linked_list.insert_at_the_beginning(34)\n linked_list.insert_at_the_beginning(56)\n linked_list.insert_at_the_beginning(78)\n linked_list.insert_at_the_beginning(31)\n linked_list.insert_at_the_ending(12)\n linked_list.insert_at_the_ending(14)\n linked_list.insert_at_the_ending(56)\n linked_list.insert_with_index(90, 345)\n linked_list.insert_with_index(5, 23)\n print(linked_list.peek_index(2))\n print(linked_list.peek_bottom())\n print(linked_list.peek_top())\n linked_list.peek_element(16)\n linked_list.peek_element(33)\n linked_list.insert_at_the_beginning(128)\n linked_list.insert_at_the_beginning(784)\n linked_list.insert_at_the_beginning(314)\n linked_list.print_val()\n print(linked_list.delete_with_index(5))\n linked_list.print_val()\n print(linked_list.delete_top_element())\n linked_list.print_val()\n print(linked_list.delete_bottom_element())\n linked_list.print_val()\n linked_list.delete_with_value(12)\n linked_list.print_val()\n # trail2()\n\n # this is siva\n # signing off\n\n\nif __name__ == \"__main__\":\n trail3()","sub_path":"code-lab/DSA - Singly Linked List API.py","file_name":"DSA - Singly Linked List API.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"132829464","text":"#!/usr/bin/python3\n\n# This has routines which process the gadgets collected in get_text.\n# Only gadgets with length 2 are taken and classified into 8 categories. \n\nimport sys\n\nfrom general import *\n\n# This routine will collect gadgets with N assembly instructions in it(excluding ret).\n# If N = 1, then the gadget will be of the form \"Inst; ret\"\ndef getLNGadgets(GadgetList, n) : \n\n nGadgetsList = list()\n\n for gadget in GadgetList:\n\n # Only ret instruction, don't do anything. \n if len(gadget) == 1: \n continue\n \n # If gadget length is 2, just append.\n elif len(gadget) == 2 : \n nGadgetsList.append(gadget)\n \n # If gadget length is > 2, get only 2 instructions and append.\n elif len(gadget) > 2 : \n \n newgadget = list()\n newgadget.append(gadget[-2])\n newgadget.append(gadget[-1])\n nGadgetsList.append(newgadget)\n \n return nGadgetsList\n\n# Takes in a list of 2 strings and returns it.\ndef getStrippedOperands(operands) : \n\n a = \"\"\n b = \"\"\n operands[0].lstrip()\n operands[0].rstrip()\n operands[1].lstrip()\n operands[1].rstrip()\n\n for char in operands[0]:\n if(char != ' '):\n a+= char\n \n for char in operands[1]:\n if(char != ' '):\n b+= char\n\n return [a, b]\n\n# Takes in one string and strips it. \ndef getStrippedOperand(operand) : \n\n a = \"\"\n operand.rstrip()\n operand.lstrip()\n\n for char in operand:\n if(char != ' '):\n a+= char\n\n return a\n\n\n# This categorises all the 1-instruction gadgets present. \n# Returns a list of lists - there are 8 main lists. \n# Each of those 8 lists has gadgets of it's category. \n# Refer general.py to know what those categories are.\ndef categorize(TwoInstGadgets): \n\n ALLGADGETS = [[] for x in range(TOTAL_CATEGORIES)]\n print(\"Length of ALLGADGETS = \", len(ALLGADGETS))\n\n x = 0\n while x < len(TwoInstGadgets) : \n \n gadget = TwoInstGadgets[x]\n inst = gadget[0]\n \n # TODO: Add the derivatives of \"mov\"\n if inst.mnemonic == \"mov\" : \n \n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n if (operand[0] in REGISTERS) and (operands[1] in REGISTERS) : \n ALLGADGETS[MOVREGG].append(gadget)\n\n elif operand[0] in REGISTERS and operands[1].isnumeric() : \n ALLGADGETS[LOADCONSTG].append(gadget)\n\n elif operands[0] in REGISTERS and not(operands[1].isnumeric()) : \n ALLGADGETS[LOADMEMG].append(gadget)\n\n elif operands[0] not in REGISTERS and not(operands[1].isnumeric()): \n ALLGADGETS[STOREMEMG].append(gadget)\n\n # \n elif inst.mnemonic == \"pop\" : \n \n operand = inst.op_str\n operand = getStrippedOperand(operand)\n\n if operand in REGISTERS : \n ALLGADGETS[LOADCONSTG].append(gadget)\n \n else : \n ALLGADGETS[STOREMEMG].append(gadget)\n\n # TODO: Add \"div\"\n elif inst.mnemonic == \"add\" or inst.mnemonic == \"sub\" or inst.mnemonic == \"mul\": \n \n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n # Original condition: operands[0] in REGISTERS) and ((operands[1] in REGISTERS) or (operands[1].isnumeric())\n if (operands[0] in REGISTERS) and (operands[1].isnumeric()) : \n \n ALLGADGETS[ARITHMETICG].append(gadget)\n \n else : \n print(\"Found a add / sub / mul instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instructions\", end = '\\n\\n')\n \n\n elif inst.mnemonic == \"inc\" or inst.mnemonic == \"dec\": \n\n operand = inst.op_str\n operand = getStrippedOperand(operand)\n\n if operand in REGISTERS : \n ALLGADGETS[ARITHMETICG].append(gadget)\n \n else: \n print(\"Found a inc / dec instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instructions\", end = '\\n\\n')\n\n\n elif inst.mnemonic == \"xor\": \n \n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n if (operands[0] in REGISTERS) and (operands[0] in REGISTERS) :\n\n if (operands[0] == operands[1]) : \n ALLGADGETS[LOADCONSTG].append(gadget)\n \n else : \n ALLGADGETS[ARITHMETICG].append(gadget)\n \n else : \n print(\"Found an xor instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instruction\", end = '\\n\\n')\n\n elif inst.mnemonic == \"and\" : \n\n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n # TODO: if int(operands[1]) == 0xffffffffffffffff : \n # ALLGADGETS[LOADCONSTG].append(gadget)\n # This is like loading (-1) into operands[0]\n\n if (operands[0] in REGISTERS) and (operands[0] in REGISTERS) :\n ALLGADGETS[ARITHMETICG].append(gadget)\n\n else : \n print(\"Found an and instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instruction\", end = '\\n\\n')\n\n\n elif inst.mnemonic == \"or\" : \n\n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n if (operands[0] in REGISTERS) and (operands[0] in REGISTERS) :\n ALLGADGETS[ARITHMETICG].append(gadget)\n\n else : \n print(\"Found an or instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instruction\", end = '\\n\\n')\n\n \n # Covering the special instructions without which we would have no job to do :P\n elif inst.mnemonic == \"int\" or inst.mnemonic == \"syscall\" : \n ALLGADGETS[SPECIAL_INST].append(gadget)\n\n else : \n \n print(\"Found a gadget who has not been categorized\")\n print(\"Need help in adding these!\", end = '\\n\\n')\n\n # Keep the loop going!\n x = x + 1\n\n\n # At this point, ALLGADGETS has duplicate gadgets also. \n \n # This will remove all duplicate gadgets\n UniqueGadgetsList = getSetOfGadgets(ALLGADGETS)\n\n return UniqueGadgetsList\n\n# This routine removes all repeating gadgets. \n# Example: \n # Suppose there is \"xor rax, rax; ret\" at 0x1234, 0x2345, 0x3456\n # This keeps only one instance and removes all others\n\ndef getSetOfGadgets(ListofLists) : \n\n # This function should be fixed first.\n\n # x = 0\n # while x < len(ListofLists) : \n\n # y = 0\n # while y < len(ListofLists[x]) : \n\n # gadget = ListofLists[x][y]\n # # ALLGADGETS[x].append(gadget)\n # z = 0\n # for z in ListofLists[x][y] : \n\n # if (gadget == z) or (gadget[0].address == z[0].address) or ((gadget[0].mnemonic == z[0].mnemonic) and (gadget[0].op_str == z[0].op_str)) :\n # ListofLists[x].remove(z)\n\n # z = z + 1\n\n # y = y + 1\n\n # x = x + 1 \n\n return ListofLists\n \n\n\n# From the categorized gadgets, this routine will return a list of gadgets belonging to the queried category and containing target register.\ndef queryGadgets(GadgetList, category, targetReg):\n\n # Basic error handling!\n if category < 0 and category > 7 : \n print(\"Error: category not present\")\n print(\"Exited in categorize.queryGadgets\")\n sys.exit()\n\n L = GadgetList[category]\n\n ReturnList = list()\n\n x = 0\n while x < len(L) : \n \n gadget = L[x]\n inst = gadget[0]\n\n operands = inst.op_str.split(',')\n if len(operands) == 2: \n operands = getStrippedOperands(operands)\n\n if operands[0] == targetReg : \n ReturnList.append(gadget)\n \n # Keep the loop going!\n x = x + 1\n \n return ReturnList\n\n# Returns a list of int gadgets if it is found.\n# If not found, it returns an empty list\ndef checkIfIntPresent(GadgetList) : \n\n specialList = GadgetList[SPECIAL_INST]\n intList = list()\n\n present = 0\n\n x = 0\n while x < len(specialList) : \n \n gadget = specialList[0]\n inst = gadget[0]\n if inst.mnemonic == \"int\" and inst.op_str == \"0x80\": \n intList.append(gadget)\n \n x = x + 1\n \n return intList\n\n\n# Returns a list of syscall gadgets if it is found.\n# If not found, it returns an empty list\ndef checkIfSyscallPresent(GadgetList) : \n\n specialList = GadgetList[SPECIAL_INST]\n syscallList = list()\n\n present = 0\n\n x = 0\n while x < len(specialList) : \n \n gadget = specialList[0]\n inst = gadget[0]\n if inst.mnemonic == \"syscall\": \n syscallList.append(gadget)\n \n x = x + 1\n \n return syscallList\n \n \n ","sub_path":"categorize.py","file_name":"categorize.py","file_ext":"py","file_size_in_byte":9284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"148889495","text":"#!/usr/bin/env python\r\n\r\nimport matplotlib\r\n#import matplotlib.pyplot as plt\r\nimport h5py\r\nimport numpy\r\nimport os, sys, inspect\r\nimport warnings\r\nimport matplotlib.ticker as ticker\r\n\r\n#show_rN = True\r\nshow_rN = False\r\n\r\n#makePDF = True\r\nmakePDF = False\r\nfor arg in sys.argv:\r\n if arg.lower()=='pdf':\r\n makePDF = True\r\n\r\nif makePDF:\r\n matplotlib.use('PDF')\r\nelse:\r\n matplotlib.use('qt5agg')\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nprint(\"This is \"+ inspect.getfile(inspect.currentframe()))\r\n\r\n#########\r\n##INPUT##\r\n#########\r\n\r\nquantityToPlot = \"BHat\"\r\n\r\nfilename = 'sfincsOutput.h5'\r\n\r\nFigSize = (12,10)\r\n\r\nfont = {'size':25}\r\nmatplotlib.rc('font', **font)\r\nmatplotlib.rc('lines',markeredgewidth=0,markersize=3,linewidth=2.5)\r\nmatplotlib.rc('axes',linewidth=1.5)\r\n\r\n#matplotlib.rcParams['mathtext.default'] = 'it'\r\n#matplotlib.rcParams['text.usetex'] = True\r\n\r\nzFactor = 1 ##T\r\n###W7-X##\r\n#xAxisTicks = [r'$0$', r'$\\pi/10$', r'$2\\pi/10$', r'$3\\pi/10$', r'$4\\pi/10$']\r\n###LHD\r\n##xAxisTicks = [r'$0$', r'$\\pi/20$', r'$2\\pi/20$', r'$3\\pi/20$', r'$4\\pi/20$']\r\n#\r\n#yAxisTicks = [r'$0$', r'$\\pi/2$', r'$\\pi$', r'$3\\pi/2$', r'$2\\pi$']\r\n\r\n\r\nfig = plt.figure(figsize=FigSize)\r\nfig.patch.set_facecolor('white')\r\nnumRows = 1\r\nnumCols = 1\r\n#iteration = 0\r\nnumContours = 100\r\n#ContourLevels = [2.7, 2.8, 2.9, 3.0, 3.1, 3.2]\r\nnumLevels = 5\r\n\r\nColorMap = 'rainbow'\r\n\r\n#############\r\n##END INPUT##\r\n#############\r\n\r\n#def fmt_cbar(x, pos):\r\n# if x == 0.0:\r\n# #return r'${}$'.format(x)\r\n# return r'{}'.format(x)\r\n# a, b = '{:.1e}'.format(x).split('e')\r\n# b = int(b)\r\n# #return r'${} \\cdot 10^{{{}}}$'.format(a, b)\r\n# return r'${} \\cdot 10^{{{}}}$'.format(a, b)\r\n\r\ndef fmt_xy_axis(x, pos):\r\n #return r'${}$'.format(x)\r\n #return r'${}$'.format('{:1.2f}'.format(x))\r\n return r'{}'.format('{:1.2f}'.format(x))\r\n\r\n#for i in range(6):\r\nprint (\"Processing file \",filename)\r\nf = h5py.File(filename,'r')\r\ntheta = f[\"theta\"][()]\r\nzeta = f[\"zeta\"][()]\r\nBHat = f[quantityToPlot][()]\r\nrN = f[\"rN\"][()]\r\nf.close()\r\n\r\nprint (\"theta max: \" + str(numpy.amax(theta)))\r\nprint (\"zeta max: \" + str(numpy.amax(zeta)))\r\n\r\nzMinData = zFactor*numpy.amin(BHat[:,:])\r\nzMaxData = zFactor*numpy.amax(BHat[:,:])\r\nprint (\"zMin = \" + str(zMinData))\r\nprint (\"zMax = \" + str(zMaxData))\r\n\r\n\r\ndelta = (numpy.amax(BHat) - numpy.amin(BHat)) / numLevels\r\nContourLevels = numpy.arange(numpy.amin(BHat), numpy.amax(BHat) + delta/2.0, delta)\r\nContourLevels = zFactor*ContourLevels\r\n \r\nax = plt.subplot(numRows,numCols,1)\r\n #plt.contourf(zeta,theta,1000*numpy.fliplr(BHat[:,:,iteration].transpose()),numContours)\r\nBPlot = plt.contourf(zeta,theta,zFactor*BHat.transpose(),numContours, cmap=plt.get_cmap(ColorMap))\r\nBPlot2 = plt.contour(BPlot,levels=ContourLevels, colors='k', hold='on')\r\n#BPlot2 = plt.contour(BPlot,levels=BPlot.levels[::2], colors='k', hold='on')\r\n#plt.xlabel(r'$\\zeta$' + \" \" + r'$\\mathrm{[rad]}$')\r\n#plt.ylabel(r'$\\theta$'+ \" \" + r'$\\mathrm{[rad]}$')\r\nplt.xlabel(r'zeta' + \" \" + r'[rad]')\r\nplt.ylabel(r'theta'+ \" \" + r'[rad]')\r\n#plt.zlabel(r'$B$'+ ' [T]')\r\nplt.xticks([0,max(zeta)/4,max(zeta)/2,3*max(zeta)/4,max(zeta)])\r\nplt.yticks([0,max(theta)/4,max(theta)/2,3*max(theta)/4,max(theta)])\r\n#plt.gca().axes.xaxis.set_ticklabels(xAxisTicks)\r\n#plt.gca().axes.yaxis.set_ticklabels(yAxisTicks)\r\n\r\n#plt.gca().axes.xaxis.set_label_coords(0.5,-0.09)\r\n#plt.gca().axes.yaxis.set_label_coords(-0.09,0.5)\r\nplt.gca().axes.xaxis.set_label_coords(0.5,-0.05)\r\nplt.gca().axes.yaxis.set_label_coords(-0.09,0.5)\r\n\r\n#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\r\n\r\nif show_rN:\r\n plt.title('rN = '+str(rN))\r\n\r\n#cbar = plt.colorbar(BPlot, label=r'$B$'+ ' [T]', ticks=ContourLevels)\r\n#cbar = plt.colorbar(BPlot, label=r'$\\Phi_1$'+ ' [V]', ticks=BPlot.levels[::2])\r\n#cbar.add_lines(BPlot2)\r\n#cbar = plt.colorbar(BPlot, format=ticker.FuncFormatter(fmt_xy_axis), ticks=ContourLevels)\r\ncbar = plt.colorbar(BPlot, ticks=ContourLevels)\r\n#cbar.ax.set_ylabel(r'$B$'+ \" \" + r'$\\mathrm{[T]}$', rotation=0, labelpad=10)\r\ncbar.ax.set_ylabel(r'B'+ \" \" + r'[T]', rotation=0, labelpad=10)\r\n\r\n#with warnings.catch_warnings():\r\n# warnings.simplefilter(\"always\")\r\n#plt.clabel(BPlot2, fmt='%2.1f', colors='k', fontsize=14)\r\n#plt.clabel(BPlot2, fmt=ticker.FuncFormatter(fmt_xy_axis), colors='k', fontsize=18, inline=False)\r\nplt.clabel(BPlot2, colors='k', fontsize=18, inline=False)\r\n\r\n#plt.subplots_adjust(wspace=0.27)\r\n\r\nprint (BHat.shape)\r\n\r\nif makePDF:\r\n print (\"Saving PDF\")\r\n\r\n if len(sys.argv)>2 : #Use the substituted name as file name\r\n print (\"Writing plot to \" + os.getcwd() + \"/\" + sys.argv[2] + \".pdf.\")\r\n plt.savefig(sys.argv[2] + \".pdf\", orientation = 'landscape', papertype='letter')\r\n else :\r\n head, tail = os.path.split(inspect.getfile(inspect.currentframe()))\r\n print (\"Writing plot to \" + os.getcwd() + \"/\" + tail + \".pdf.\")\r\n plt.savefig(tail+'.pdf', orientation = 'landscape', papertype='letter')\r\nelse:\r\n plt.show()\r\n","sub_path":"tools/Albert/version3/plot_tools/plotB_Python3.py","file_name":"plotB_Python3.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"98286061","text":"import os\nfrom csv import writer\nimport csv\nfrom datetime import datetime\nimport sounddevice as sd\nimport scipy.io.wavfile as wav\nfrom scipy.io.wavfile import write\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom time import sleep, perf_counter\nfrom threading import Thread\n\nprint(tf.version.VERSION)\n\n# Load the trained tensorflow Model\n# Define the class array\nmodel = tf.keras.models.load_model('saved_model/model')\nclass_names = ['motorbike', 'city', 'multiple-cars', 'single-cars']\n\n\n# Method that returns the current date & time to sort the collected data\ndef get_date_time():\n now = datetime.now()\n return now.strftime(\"%d-%m---%H-%M-%S\")\n\n\n# Define all Folders\n# wav_dir: collected audio files\n# png_dir: spectrograms as png of the audio file\n# classified_dir: spectrograms with label as name\n# csv_file: csv file to organize the collected data\nwav_dir = 'snippets/wav/' + get_date_time() + '/'\npng_dir = 'snippets/png/' + get_date_time() + '/'\nclassified_dir = 'snippets/classified/' + get_date_time() + '/'\ncsv_file = 'snippets/snippets.csv'\n\n\ndef get_current_couter(csvfilename):\n with open(csvfilename, \"r\", encoding=\"utf-8\", errors=\"ignore\") as scraped:\n line = scraped.readlines()[-1]\n return int(line[:1])\n\n\n# Snippet counter to give each audio file an ID\nsnippet_counter = get_current_couter(csv_file)\n\n\n# Create the needed folders to save the collected data\nos.mkdir(wav_dir)\nos.mkdir(png_dir)\nos.mkdir(classified_dir)\n\n\n# Get the current timestamp that is stored in the csv file\ndef get_timestamp():\n now = datetime.now()\n return now.strftime(\"%H:%M:%S %d-%m-%y\")\n\n\n# Write new collected data to the csv\n# Structure: TODO\ndef append_to_csv(list):\n with open(csv_file, 'a+', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow(list)\n\n\n# Rename the collected file, so that the label is in the filename\ndef rename_file(file, label):\n name = os.path.splitext(os.path.basename(file))[0]\n updated_name = classified_dir + name + '_' + label + '.png'\n os.rename(file, updated_name)\n return updated_name\n\n\n# Record a 2 seconds wav file with 44100Hz\ndef record(file):\n fs = 44100\n seconds = 2\n print('recording...')\n recording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)\n sd.wait()\n write(file, fs, recording)\n\n\n# Convert the recorded wav file to spectrogram with matplotlib\ndef wav_to_spectrogram(audio_path, save_path, dimensions=(128, 128), noverlap=16, cmap='gray_r'):\n sample_rate, samples = wav.read(audio_path)\n fig = plt.figure()\n fig.set_size_inches(\n (dimensions[0]/fig.get_dpi(), dimensions[1]/fig.get_dpi()))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.specgram(samples, Fs=2, noverlap=noverlap)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n fig.savefig(save_path, bbox_inches=\"tight\", pad_inches=0)\n plt.close(fig)\n\n\n# Get the prediction of a wav audio file that was recorded\ndef get_prediction(file, name_wav):\n img = keras.preprocessing.image.load_img(file, target_size=(128, 128))\n img_array = keras.preprocessing.image.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0)\n\n predictions = model.predict(img_array)\n score = tf.nn.softmax(predictions[0])\n\n print('-----------------------------------------------------------')\n print('')\n print(\n \"snippet {}: {} [{:.2f}]\"\n .format(snippet_counter, class_names[np.argmax(score)], 100 * np.max(score))\n )\n print('')\n label = class_names[np.argmax(score)]\n updated_name = rename_file(file, label)\n\n csv_list = [snippet_counter, get_timestamp(), name_wav,\n updated_name, label, 0]\n append_to_csv(csv_list)\n\n\n# Recording Task start of the script\n# 1. Record 2 second audio file\n# 2. Convert it to a spectrogram\n# 3. Get the AI prediction and save the label\ndef task():\n print('Starting record task...')\n\n while True:\n global snippet_counter\n snippet_counter += 1\n\n name_wav = wav_dir + 'snippet_' + str(snippet_counter) + '.wav'\n name_png = png_dir + 'snippet_' + str(snippet_counter) + '.png'\n\n record(name_wav)\n wav_to_spectrogram(name_wav, name_png)\n print(f'snippet {snippet_counter} done')\n\n Thread(target=get_prediction(name_png, name_wav)).start()\n\n\n# Start a new thread that records the audio data\ndef main():\n Thread(target=task).start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"realtime-classification.py","file_name":"realtime-classification.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"532741565","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\nfrom torchvision import transforms, utils\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport Class_OS.o1_获得当前工作目录\n\n# 参数列表\nEPOCH = 30\nBATCH_SIZE = 512\nLR = 0.01\nIMG_H = 30\nIMG_W = 15\npath = Class_OS.o1_获得当前工作目录.main()\n\n\n# 加载数据\nclass ReaderData(Dataset):\n def __init__(self, path):\n with open(path + \"data/ocrData.txt\", 'rt') as f:\n self.a = f.read()\n\n def __getitem__(self, index):\n im = Image.open(path + \"data/\" + str(index) + \".jpg\").convert('L')\n im = np.array(im).reshape(IMG_H, IMG_W).astype(np.float32)\n im = im / 255.0 * 2.0 - 1.0\n #im = torch.from_numpy(im)\n return im\n\n def __len__(self):\n return len(self.a)\n\n\n# 数据读取\ntrain_data = ReaderData(path=path)\ndata_loader = DataLoader(train_data, batch_size=512)\n\n# 定义网络\n\"\"\"\n抄莫烦的自解码网络\n\"\"\"\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Linear(IMG_H * IMG_W, 256),\n )\n # 解压\n self.decoder = nn.Sequential(\n nn.Linear(256, IMG_H * IMG_W),\n nn.Sigmoid(),\n )\n self.enZ = nn.Sequential(\n nn.Linear(256, 3), # 压缩成3个特征值方便画图\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n enZ = self.enZ(encoded)\n decoded = self.decoder(encoded)\n\n return enZ, decoded\n\n\n# 定义训练参数\nnet = Net()\noptimizer = torch.optim.Adam(net.parameters(), lr=LR)\nloss_func = nn.MSELoss()\n\nfor epoch in range(EPOCH):\n '''\n xList = [] # 记录绘图数据\n yList = []\n zList = []\n '''\n for step, x in enumerate(data_loader):\n b_x = x.view(-1, IMG_H * IMG_W)\n b_y = x.view(-1, IMG_H * IMG_W)\n\n enZ, decoded = net(b_x)\n\n loss = loss_func(decoded, b_y) # mean square error\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n\n print(\"EPOCH:\", epoch + 1, \"LOSS:\", loss.data)\n\n # 准确率图\n fig = plt.figure()\n plt.plot(step, loss.data,color='red', linewidth=1.0)\n plt.show()\n '''\n # x, y, z 的数据值\n X = enZ.data[:, 0].numpy()\n Y = enZ.data[:, 1].numpy()\n Z = enZ.data[:, 2].numpy()\n xList.append(X)\n yList.append(Y)\n zList.append(Z)\n '''\n '''\n # 制图\n fig = plt.figure()\n ax = Axes3D(fig) # 3D 图\n xList=np.array(xList)\n yList=np.array(yList)\n zList=np.array(zList)\n ax.plot_surface(xList, yList, zList, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))\n plt.show()\n '''\n\ntorch.save(net, path + 'save/net.pkl')\n","sub_path":"Class_Pytorch/p1_imgauto/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"237013413","text":"from decimal import Decimal\n\nfrom django import http\nfrom django.contrib.messages import constants, get_messages\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import ListView, DetailView\n\nfrom l10n.utils import moneyfmt\nfrom livesettings.functions import config_value\nfrom product.models import Category, Product\nfrom product.modules.configurable.models import ConfigurableProduct\nfrom product.signals import index_prerender\nfrom product.utils import find_best_auto_discount, display_featured, find_product_template, optionids_from_post\nfrom satchmo_utils.satchmo_json import json_encode\nfrom satchmo_utils.numbers import RoundedDecimalError, round_decimal\nfrom satchmo_utils.views import bad_or_missing\nimport logging\n\nlog = logging.getLogger('product.views')\n\n\nclass CategoryIndexView(ListView):\n model = Category\n template_name = \"product/category_index.html\"\n context_object_name = \"categorylist\"\n\n def get_queryset(self):\n return self.model.objects.root_categories()\n\ncategory_index = CategoryIndexView.as_view()\n \n# def category_index(request, template=\"product/category_index.html\", root_only=True):\n# \"\"\"Display all categories.\n\n# Parameters:\n# - root_only: If true, then only show root categories.\n# \"\"\"\n# cats = Category.objects.root_categories()\n# return render(request, template, { 'categorylist' : cats })\n\n\nclass CategoryView(DetailView):\n model = Category\n template_name = \"product/category.html\"\n context_object_name = \"category\"\n\n def get_queryset(self):\n return self.model.objects.by_site()\n \n def get_context_data(self, **kwargs):\n context = super(CategoryView, self).get_context_data(**kwargs)\n products = list(self.object.active_products())\n context['child_categories'] = self.object.get_all_children()\n context['sale'] = find_best_auto_discount(products)\n context['products'] = products\n index_prerender.send(Product, request=self.request, context=context, category=self.object, object_list=products)\n return context\n\ncategory_view = CategoryView.as_view()\n \n# def category_view(request, slug, parent_slugs='', template='product/category.html'):\n# \"\"\"Display the category, its child categories, and its products.\n\n# Parameters:\n# - slug: slug of category\n# - parent_slugs: ignored\n# \"\"\"\n# try:\n# category = Category.objects.get_by_site(slug=slug)\n# products = list(category.active_products())\n# sale = find_best_auto_discount(products)\n\n# except Category.DoesNotExist:\n# return bad_or_missing(request, _('The category you have requested does not exist.'))\n\n# child_categories = category.get_all_children()\n\n# ctx = {\n# 'category': category,\n# 'child_categories': child_categories,\n# 'sale' : sale,\n# 'products' : products,\n# }\n# index_prerender.send(Product, request=request, context=ctx, category=category, object_list=products)\n# return render(request, template, ctx)\n\n\ndef get_configurable_product_options(request, id):\n \"\"\"Used by admin views\"\"\"\n cp = get_object_or_404(ConfigurableProduct, product__id=id)\n options = ''\n for og in cp.option_group.all():\n for opt in og.option_set.all():\n options += '' % (opt.id, str(opt))\n if not options:\n return '' % cp.product.slug\n return http.HttpResponse(options, content_type=\"text/html\")\n\n\nclass ProductView(DetailView):\n model = Product\n context_object_name = \"current_product\"\n slug_url_kwarg = 'product_slug'\n default_view_tax = None\n selected_options = ()\n\n def get_template_names(self):\n template = find_product_template(self.product, producttypes=self.product.get_subtypes())\n return [template.template.name]\n\n def get_queryset(self):\n return self.model.objects.active_by_site()\n \n def get_context_data(self, **kwargs):\n self.product = self.get_product_variation()\n default_view_tax = self.get_default_view_tax()\n context = super(ProductView, self).get_context_data(**kwargs)\n context['product'] = self.product\n context['sale'] = find_best_auto_discount(self.product)\n context['error_message'] = self.get_error_message()\n context['default_view_tax'] = default_view_tax\n context = self.product.add_template_context(context=context, request=self.request,\n selected_options=self.selected_options,\n default_view_tax=default_view_tax)\n return context\n\n def get_error_message(self):\n errors = [m for m in get_messages(self.request) if m.level == constants.ERROR] \n try:\n return errors[0]\n except IndexError:\n pass\n\n def get_default_view_tax(self):\n return self.default_view_tax or config_value('TAX', 'DEFAULT_VIEW_TAX')\n\n def get_product_variation(self):\n product = self.object\n if 'ProductVariation' in self.object.get_subtypes():\n self.selected_options = product.productvariation.unique_option_ids\n product = product.productvariation.parent.product\n return product\n\nget_product = ProductView.as_view()\n \n# def get_product(request, product_slug=None, selected_options=(),\n# default_view_tax=None):\n# \"\"\"Basic product view\"\"\"\n\n# errors = [m for m in get_messages(request) if m.level == constants.ERROR]\n\n# try:\n# product = Product.objects.get_by_site(active=True, slug=product_slug)\n# except Product.DoesNotExist:\n# return bad_or_missing(request, _('The product you have requested does not exist.'))\n\n# if default_view_tax is None:\n# default_view_tax = config_value('TAX', 'DEFAULT_VIEW_TAX')\n\n# subtype_names = product.get_subtypes()\n\n# # Save product id for xheaders, in case we display a ConfigurableProduct\n# product_id = product.id\n\n# # Clone product object in order to have current product variations in context (extra_context)\n# current_product = product\n\n# if 'ProductVariation' in subtype_names:\n# selected_options = product.productvariation.unique_option_ids\n# #Display the ConfigurableProduct that this ProductVariation belongs to.\n# product = product.productvariation.parent.product\n# subtype_names = product.get_subtypes()\n\n# best_discount = find_best_auto_discount(product)\n\n# if errors:\n# error_message = errors[0]\n# else:\n# error_message = None\n\n# extra_context = {\n# 'product': product,\n# 'current_product' : current_product,\n# 'default_view_tax': default_view_tax,\n# 'sale': best_discount,\n# 'error_message' : error_message,\n# }\n\n# # Get the template context from the Product.\n# extra_context = product.add_template_context(context=extra_context,\n# request=request, selected_options=selected_options,\n# default_view_tax=default_view_tax)\n# template = find_product_template(product, producttypes=subtype_names)\n# response = render(request, template.template.name, extra_context)\n# try:\n# from django.core.xheaders import populate_xheaders\n# populate_xheaders(request, response, Product, product_id)\n# except ImportError:\n# pass\n# return response\n\n\ndef get_price(request, product_slug):\n \"\"\"Get base price for a product, returning the answer encoded as JSON.\"\"\"\n quantity = Decimal('1')\n\n try:\n product = Product.objects.get_by_site(active=True, slug=product_slug)\n except Product.DoesNotExist:\n return http.HttpResponseNotFound(json_encode(('', _(\"not available\"))), content_type=\"text/javascript\")\n\n prod_slug = product.slug\n\n if request.method == \"POST\" and 'quantity' in request.POST:\n try:\n quantity = round_decimal(request.POST['quantity'], places=2, roundfactor=.25)\n except RoundedDecimalError:\n quantity = Decimal('1.0')\n log.warn(\"Could not parse a decimal from '%s', returning '1.0'\", request.POST['quantity'])\n\n if 'ConfigurableProduct' in product.get_subtypes():\n cp = product.configurableproduct\n chosen_options = optionids_from_post(cp, request.POST)\n pvp = cp.get_product_from_options(chosen_options)\n\n if not pvp:\n return http.HttpResponse(json_encode(('', _(\"not available\"))), content_type=\"text/javascript\")\n prod_slug = pvp.slug\n price = moneyfmt(pvp.get_qty_price(quantity))\n else:\n price = moneyfmt(product.get_qty_price(quantity))\n\n if not price:\n return http.HttpResponse(json_encode(('', _(\"not available\"))), content_type=\"text/javascript\")\n\n return http.HttpResponse(json_encode((prod_slug, price)), content_type=\"text/javascript\")\n\n\ndef get_price_detail(request, product_slug):\n \"\"\"Get all price details for a product, returning the response encoded as JSON.\"\"\"\n results = {\n \"success\" : False,\n \"message\" : _(\"not available\")\n }\n price = None\n\n if request.method==\"POST\":\n reqdata = request.POST\n else:\n reqdata = request.GET\n\n try:\n product = Product.objects.get_by_site(active=True, slug=product_slug)\n found = True\n\n prod_slug = product.slug\n\n if 'quantity' in reqdata:\n try:\n quantity = round_decimal(reqdata['quantity'], places=2, roundfactor=.25)\n except RoundedDecimalError:\n quantity = Decimal('1.0')\n log.warn(\"Could not parse a decimal from '%s', returning '1.0'\", reqdata['quantity'])\n else:\n quantity = Decimal('1.0')\n\n if 'ConfigurableProduct' in product.get_subtypes():\n cp = product.configurableproduct\n chosen_options = optionids_from_post(cp, reqdata)\n product = cp.get_product_from_options(chosen_options)\n\n if product:\n price = product.get_qty_price(quantity)\n\n results['slug'] = product.slug\n results['price'] = float(price)\n results['success'] = True\n results['message'] = \"\"\n\n except Product.DoesNotExist:\n found = False\n\n data = json_encode(results)\n if found:\n return http.HttpResponse(data, content_type=\"text/javascript\")\n else:\n return http.HttpResponseNotFound(data, content_type=\"text/javascript\")\n","sub_path":"satchmo/satchmo/apps/product/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"641446571","text":"from typing import Callable, Dict, List, Optional, Set, Tuple, Type\n\nfrom datamodel_code_generator import PythonVersion, snooper_to_methods\nfrom datamodel_code_generator.format import format_code\nfrom datamodel_code_generator.imports import IMPORT_ANNOTATIONS\nfrom datamodel_code_generator.model.enum import Enum\nfrom datamodel_code_generator.parser.base import (\n JsonSchemaObject,\n Parser,\n dump_templates,\n get_singular_name,\n sort_data_models,\n)\nfrom datamodel_code_generator.types import DataType\nfrom prance import BaseParser\n\nfrom ..model.base import DataModel, DataModelField\n\n\n@snooper_to_methods(max_variable_length=None)\nclass OpenAPIParser(Parser):\n def __init__(\n self,\n data_model_type: Type[DataModel],\n data_model_root_type: Type[DataModel],\n data_model_field_type: Type[DataModelField] = DataModelField,\n filename: Optional[str] = None,\n base_class: Optional[str] = None,\n target_python_version: PythonVersion = PythonVersion.PY_37,\n text: Optional[str] = None,\n result: Optional[List[DataModel]] = None,\n dump_resolve_reference_action: Optional[Callable[[List[str]], str]] = None,\n ):\n self.base_parser = (\n BaseParser(filename, text, backend='openapi-spec-validator')\n if filename or text\n else None\n )\n\n super().__init__(\n data_model_type,\n data_model_root_type,\n data_model_field_type,\n filename,\n base_class,\n target_python_version,\n text,\n result,\n dump_resolve_reference_action,\n )\n\n def parse_any_of(self, name: str, obj: JsonSchemaObject) -> List[DataType]:\n any_of_data_types: List[DataType] = []\n for any_of_item in obj.anyOf:\n if any_of_item.ref: # $ref\n any_of_data_types.append(\n self.data_type(\n type=any_of_item.ref_object_name,\n ref=True,\n version_compatible=True,\n )\n )\n else:\n singular_name = get_singular_name(name)\n self.parse_object(singular_name, any_of_item)\n any_of_data_types.append(\n self.data_type(\n type=singular_name, ref=True, version_compatible=True\n )\n )\n return any_of_data_types\n\n def parse_all_of(self, name: str, obj: JsonSchemaObject) -> List[DataType]:\n fields: List[DataModelField] = []\n base_classes: List[DataType] = []\n for all_of_item in obj.allOf:\n if all_of_item.ref: # $ref\n base_classes.append(\n self.data_type(\n type=all_of_item.ref_object_name,\n ref=True,\n version_compatible=True,\n )\n )\n\n else:\n fields_ = self.parse_object_fields(all_of_item)\n fields.extend(fields_)\n\n data_model_type = self.data_model_type(\n name,\n fields=fields,\n base_classes=[b.type for b in base_classes],\n auto_import=False,\n custom_base_class=self.base_class,\n )\n self.append_result(data_model_type)\n\n return [self.data_type(type=name, ref=True, version_compatible=True)]\n\n def parse_object_fields(self, obj: JsonSchemaObject) -> List[DataModelField]:\n requires: Set[str] = set(obj.required or [])\n fields: List[DataModelField] = []\n\n for field_name, filed in obj.properties.items(): # type: ignore\n is_list = False\n field_types: List[DataType]\n if filed.ref:\n field_types = [\n self.data_type(\n type=filed.ref_object_name, ref=True, version_compatible=True\n )\n ]\n elif filed.is_array:\n class_name = self.get_class_name(field_name)\n array_fields, array_field_classes = self.parse_array_fields(\n class_name, filed\n )\n field_types = array_fields[0].data_types\n is_list = True\n elif filed.is_object:\n class_name = self.get_class_name(field_name)\n self.parse_object(class_name, filed)\n field_types = [\n self.data_type(type=class_name, ref=True, version_compatible=True)\n ]\n elif filed.enum:\n enum = self.parse_enum(field_name, filed)\n field_types = [\n self.data_type(type=enum.name, ref=True, version_compatible=True)\n ]\n elif filed.anyOf:\n field_types = self.parse_any_of(field_name, filed)\n elif filed.allOf:\n field_types = self.parse_all_of(field_name, filed)\n else:\n data_type = self.get_data_type(filed)\n field_types = [data_type]\n required: bool = field_name in requires\n fields.append(\n self.data_model_field_type(\n name=field_name,\n data_types=field_types,\n required=required,\n is_list=is_list,\n )\n )\n return fields\n\n def parse_object(self, name: str, obj: JsonSchemaObject) -> None:\n fields = self.parse_object_fields(obj)\n data_model_type = self.data_model_type(\n name, fields=fields, custom_base_class=self.base_class\n )\n self.append_result(data_model_type)\n\n def parse_array_fields(\n self, name: str, obj: JsonSchemaObject\n ) -> Tuple[List[DataModelField], List[DataType]]:\n if isinstance(obj.items, JsonSchemaObject):\n items: List[JsonSchemaObject] = [obj.items]\n else:\n items = obj.items # type: ignore\n item_obj_data_types: List[DataType] = []\n is_union: bool = False\n for item in items:\n if item.ref:\n item_obj_data_types.append(\n self.data_type(\n type=item.ref_object_name, ref=True, version_compatible=True\n )\n )\n elif isinstance(item, JsonSchemaObject) and item.properties:\n singular_name = get_singular_name(name)\n self.parse_object(singular_name, item)\n item_obj_data_types.append(\n self.data_type(\n type=singular_name, ref=True, version_compatible=True\n )\n )\n elif item.anyOf:\n item_obj_data_types.extend(self.parse_any_of(name, item))\n is_union = True\n elif item.allOf:\n singular_name = get_singular_name(name)\n item_obj_data_types.extend(self.parse_all_of(singular_name, item))\n else:\n item_obj_data_types.append(self.get_data_type(item))\n\n field = self.data_model_field_type(\n data_types=item_obj_data_types,\n required=True,\n is_list=True,\n is_union=is_union,\n )\n return [field], item_obj_data_types\n\n def parse_array(self, name: str, obj: JsonSchemaObject) -> None:\n fields, item_obj_names = self.parse_array_fields(name, obj)\n data_model_root = self.data_model_root_type(\n name, fields, custom_base_class=self.base_class\n )\n\n self.append_result(data_model_root)\n\n def parse_root_type(self, name: str, obj: JsonSchemaObject) -> None:\n if obj.type:\n types: List[DataType] = [self.get_data_type(obj)]\n elif obj.anyOf:\n types = self.parse_any_of(name, obj)\n else:\n types = [\n self.data_type(\n type=obj.ref_object_name, ref=True, version_compatible=True\n )\n ]\n\n data_model_root_type = self.data_model_root_type(\n name,\n [self.data_model_field_type(data_types=types, required=not obj.nullable)],\n custom_base_class=self.base_class,\n )\n self.append_result(data_model_root_type)\n\n def parse_enum(self, name: str, obj: JsonSchemaObject) -> DataModel:\n enum_fields = []\n\n for enum_part in obj.enum: # type: ignore\n if obj.type == 'string':\n default = f\"'{enum_part}'\"\n field_name = enum_part\n else:\n default = enum_part\n field_name = f'{obj.type}_{enum_part}'\n enum_fields.append(\n self.data_model_field_type(name=field_name, default=default)\n )\n\n enum = Enum(self.get_class_name(name), fields=enum_fields)\n self.append_result(enum)\n return enum\n\n def parse(\n self, with_import: Optional[bool] = True, format_: Optional[bool] = True\n ) -> str:\n for obj_name, raw_obj in self.base_parser.specification['components'][\n 'schemas'\n ].items(): # type: str, Dict\n obj = JsonSchemaObject.parse_obj(raw_obj)\n if obj.is_object:\n self.parse_object(obj_name, obj)\n elif obj.is_array:\n self.parse_array(obj_name, obj)\n elif obj.enum:\n self.parse_enum(obj_name, obj)\n elif obj.allOf:\n self.parse_all_of(obj_name, obj)\n else:\n self.parse_root_type(obj_name, obj)\n\n result: str = ''\n if with_import:\n if self.target_python_version == PythonVersion.PY_37:\n self.imports.append(IMPORT_ANNOTATIONS)\n result += f'{self.imports.dump()}\\n\\n\\n'\n\n _, sorted_data_models, require_update_action_models = sort_data_models(\n self.results\n )\n\n result += dump_templates(list(sorted_data_models.values()))\n if self.dump_resolve_reference_action:\n result += f'\\n\\n{self.dump_resolve_reference_action(require_update_action_models)}'\n\n if format_:\n result = format_code(result, self.target_python_version)\n\n return result\n","sub_path":"datamodel_code_generator/parser/openapi.py","file_name":"openapi.py","file_ext":"py","file_size_in_byte":10352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"151090469","text":"import asyncio\nfrom asyncio import CancelledError\nimport json\nimport logging.config\nimport os\nimport pathlib\nfrom json import JSONDecodeError\n\nimport yaml\nimport time\nimport aioredis\nfrom aioredis import Redis, Channel\nimport dotenv\n\nfrom quart import Quart\nfrom quart_session import Session as QuartSession\n\n\nwith open(pathlib.Path(__file__).parent / \"logging.conf.yaml\") as logging_file:\n logging.config.dictConfig(yaml.load(logging_file, Loader=yaml.FullLoader))\n\nlogger = logging.getLogger(__name__)\n\napp = Quart(\"xyz\")\n\ndotenv.load_dotenv(verbose=True)\n\napp.secret_key = os.getenv(\"FLASK_SECRET\")\nredis_url = os.getenv(\"REDISTOGO_URL\")\napp.config[\"SESSION_TYPE\"] = \"redis\"\n\napp.config[\"request_number\"] = 1\n\n\n@app.route(\"/\")\nasync def _hello() -> dict:\n request_number = app.config[\"request_number\"]\n app.config[\"request_number\"] = request_number + 1\n request = {\n \"response\": {\"request_number\": request_number},\n \"response_channel\": f\"myreplychannel{request_number}\",\n }\n response = await redis_request_reply(\n redis=app.config[\"SESSION_REDIS\"],\n request=json.dumps(request),\n request_channel=\"mychannel\",\n reply_channel=f\"myreplychannel{request_number}\",\n )\n\n return {\"message\": \"Hello, World!\", \"response\": response}\n\n\n@app.before_serving\nasync def start_redis_listener():\n app.config[\"SESSION_REDIS\"] = await aioredis.create_redis_pool(redis_url)\n QuartSession(app)\n\n loop = asyncio.get_event_loop()\n\n async def task(redis: Redis):\n channel: [Channel, None] = None\n try:\n channel, *_ = await redis.subscribe(\"mychannel\")\n\n async for message in channel.iter(encoding=\"utf-8\"):\n try:\n message = json.loads(message)\n except JSONDecodeError:\n pass\n logger.debug(f\"request from: {str(channel.name)}: {message}\")\n # add some delay\n time.sleep(0.5)\n if isinstance(message, dict):\n response_channel, response = (\n message[\"response_channel\"],\n message[\"response\"],\n )\n logger.debug(\n f\"sending response: {response} to channel {response_channel}\"\n )\n received_count = await redis.publish_json(\n response_channel, response\n )\n logger.debug(f\"received by {received_count} consumers\")\n\n else:\n logger.debug(\"message isn't a dict\")\n\n except CancelledError:\n pass\n finally:\n if channel:\n await redis.unsubscribe(channel.name)\n\n loop.create_task(task(app.config[\"SESSION_REDIS\"]))\n\n\nasync def redis_request_reply(redis, request, request_channel, reply_channel):\n \"\"\"\n Sends a message to a Redis channel, then waits for a singular reply\n\n :param redis: the redis to use\n :param request: the message to send - if a dict, then it is first JSON serialised\n :param request_channel: the channel to send the request to\n :param reply_channel: the channel on which to wait for a reply\n :return: the response received, JSON decoded unless it's not valid JSON\n \"\"\"\n channel: Channel\n channel, *_ = await redis.subscribe(reply_channel)\n try:\n if isinstance(request, dict):\n request = json.dumps(request)\n received_count = await redis.publish(request_channel, request)\n if not received_count:\n logger.warning(\n f'message \"{request}\" sent to channel {request_channel} was not received by any subscriber'\n )\n # TODO should we bail at this point?\n # probably - or put in some retries\n else:\n logger.debug(\n f'message \"{request}\" was received by {received_count} consumers'\n )\n\n async def one_message():\n logger.debug(f\"waiting for one message on channel {channel.name}...\")\n async for message in channel.iter(encoding=\"utf-8\"):\n logger.debug(f\"received response {message}\")\n return json.loads(message)\n\n return await asyncio.wait_for(one_message(), timeout=5)\n finally:\n if channel:\n redis.unsubscribe(channel.name)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\")\n","sub_path":"xyz.py","file_name":"xyz.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"351950325","text":"\"\"\"\nGeoMACH design variable class\nJohn Hwang, July 2014\n\"\"\"\n# pylint: disable=E1101\nfrom __future__ import division\nimport numpy\n\nfrom GeoMACH.PGM.core.PGMobject import PGMobject\n\n\nclass PGMdv(PGMobject):\n\n def __init__(self, shape, val=None, lower=None, upper=None, scale=None):\n super(PGMdv, self).__init__()\n\n self._shape = shape\n self.val = val\n self.lower = lower\n self.upper = upper\n self.scale = scale\n","sub_path":"GeoMACH/PGM/core/PGMdv.py","file_name":"PGMdv.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"445739498","text":"import selenium.webdriver\nfrom selenium.webdriver.common.by import By\n\nclass SelPy:\n\n driver=None\n def __init__(self):\n try:\n dr=selenium.webdriver.Chrome(\"driver/chromedriver.exe\")\n self.driver=dr\n except:\n print(\"Error while starting chrome driver\")\n def navigate(self,url):\n self.driver.delete_all_cookies()\n self.driver.get(url)\n self.driver.find_element(By.XPATH, '//button[text()=\"OK\"]').click()\n \n \n def curURL(self):\n return self.driver.current_url\n","sub_path":"selpy/selpy.py","file_name":"selpy.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"298986501","text":"from rest_framework import serializers\nfrom specials.models import Special\n\nclass SpecialSerializer(serializers.ModelSerializer):\n class Meta:\n model = Special\n fields = ['id','title','description','locations','reoccuring_weekend','start_date','start_time','end_date','end_time','linenos','language','style']\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Special` instance, given the validated data.\n \"\"\"\n return Special.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Special` instance, given the validated data.\n \"\"\"\n instance.title = validated_data.get('title', instance.title)\n instance.description = validated_data.get('description', instance.description)\n instance.locations = validated_data.get('locations', instance.locations)\n instance.reoccuring_weekend = validated_data.get('reoccuring_weekend', instance.reoccuring_weekend)\n instance.start_date = validated_data.get('start_date', instance.start_date)\n instance.start_time = validated_data.get('start_time', instance.start_time)\n instance.end_date = validated_data.get('end_date', instance.end_date)\n instance.end_time = validated_data.get('end_time', instance.end_time)\n instance.save()\n return instance","sub_path":"specials/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"78257663","text":"print(\"Welcome to my game!\")\n\nenemyhealth = 100\nwhile enemyhealth > 0:\n attack = input(\"What is your attack? \")\n if attack == \"heavy\":\n enemyhealth = enemyhealth - 50\n if attack == \"medium\":\n enemyhealth = enemyhealth - 25\nelse:\n print(\"You Won\")\n\n\n\n\n\n\n\n\n \n","sub_path":"My python game 1.py","file_name":"My python game 1.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"25050079","text":"import sys\nfrom pathlib import Path\nfrom LanguageHelpers import isValidLanguage, genClassByLanguage\n\nif len(sys.argv) > 1:\n\n if sys.argv[1][0] == '-':\n if sys.argv[1] == \"--help\":\n print(\"Give this command a language and a filename. See --options or -o for language options\")\n print(\"Like this:\", sys.argv[0], \"c ClassName\")\n print(\"Or this:\", sys.argv[0], \"c++ helpers/HelperClass\")\n elif sys.argv[1] == \"--options\" or sys.argv[1] == \"-o\":\n print(\"Language options are c, c++, cpp, and cxx\")\n else:\n print(\"Invalid argument, see --help for help\")\n else:\n if (isValidLanguage(sys.argv[1])):\n if len(sys.argv) > 2:\n for i in range(2, len(sys.argv)):\n # Guarantees the path does not contain any special characters or special dir paths\n genClassByLanguage(sys.argv[1], str(Path(sys.argv[i]).resolve()))\n print()\n else:\n print(\"You must provide a class name or class path\")\n\n else:\n print(\"Invalid language given, see --options for language options\")\n \nelse:\n print(\"Use --help for help, or --options for language options\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"251253345","text":"#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\n\"\"\"A tool to produce various statistics from files of BNB RDF data.\"\"\"\r\n\r\nimport bnb_rdf_stats\r\nimport sys\r\n\r\n__author__ = 'Victoria Morris'\r\n__license__ = 'MIT License'\r\n__version__ = '1.0.0'\r\n__status__ = '4 - Beta Development'\r\n\r\nbnb_rdf_stats.main(sys.argv[1:])\r\n","sub_path":"bin/bnb_rdf_stats.py","file_name":"bnb_rdf_stats.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"458536151","text":"# Game merge version 1.25\n# Including works of Wessel, Furkan, Onno, Niels and Ziggy\n# Importing code done by Ziggy\n\nimport pygame\nimport random\nimport time\n\n#pygame init ---\npygame.init()\n\n#database functies\nimport psycopg2\n\ndef interact_with_database(command):\n # Connect and set up cursor\n connection = psycopg2.connect(\"dbname=postgres user=postgres password=E2f3446d78xd\")\n cursor = connection.cursor()\n\n # Execute the command\n cursor.execute(command)\n connection.commit()\n\n # Save results\n results = None\n try:\n results = cursor.fetchall()\n except psycopg2.ProgrammingError:\n # Nothing to fetch\n pass\n\n # Close connection\n cursor.close()\n connection.close()\n\n return results\n\n\n# Uploads a score into the hiscore table\ndef upload_score(name, score):\n interact_with_database(\"UPDATE player SET score = {} WHERE name = '{}'\"\n .format(score, name))\n\n# Downloads score data from database\ndef download_scores():\n return interact_with_database(\"SELECT * FROM player\")\n\n\n# Downloads the top score from database\ndef download_top_score():\n result = interact_with_database(\"SELECT * FROM player ORDER BY score DESC\")\n return result\n\n\n\n#colors ---\nwhite = (255,255,255)\nblack = (0,0,0)\nyellow = (255,255,0)\nbright_yellow = (200,200,0)\nblue = (0,0,255)\nbright_blue = (0,0,200)\nred = (200,0,0)\nbright_red = (255,0,0)\ngreen = (0,200,0)\nbright_green = (0,255,0)\nbackground_color = (51, 204, 255)\n\n#board colors ---\ncolors = (red,green,yellow,blue)\ntowercolors = (blue,yellow,green,red)\n\n#text font definitions ---\nsmallText = pygame.font.Font(\"freesansbold.ttf\", 40)\nsmallText2 = pygame.font.Font('freesansbold.ttf', 15)\nsmallText3 = pygame.font.Font(\"freesansbold.ttf\", 50)\nsmallText4 = pygame.font.Font(\"freesansbold.ttf\", 20)\nlargeText = pygame.font.Font('freesansbold.ttf', 90)\nbuttonText = pygame.font.SysFont(\"monospace\", 20)\nhighscoreText = pygame.font.Font('freesansbold.ttf', 20)\n\n#background image definitions ---\nbackground_img = pygame.image.load('euromast-rotterdam.png')\n\n#dice img loading and other settings ---\ndice1_img = pygame.image.load('dice1.png')\ndice2_img = pygame.image.load('dice2.png')\ndice3_img = pygame.image.load('dice3.png')\ndice4_img = pygame.image.load('dice4.png')\ndice5_img = pygame.image.load('dice5.png')\ndice6_img = pygame.image.load('dice6.png')\n\ndice_img = [dice1_img, dice2_img, dice3_img, dice4_img, dice5_img, dice6_img]\n\n# list dice (not used)\n'''dice1 = 1\ndice2 = 2\ndice3 = 3\ndice4 = 4\ndice5 = 5\ndice6 = 6\ndice_list = [dice1,dice2,dice3,dice4,dice5,dice6]'''\n\ndice_choosed = 0\ndice_display = 0\n\n#game settings\ndisplay_width = 1024\ndisplay_height = 650\ndisplay_height_button = 75\ndisplay_x_menu = ((display_width / 2) + (display_width / 3))\ndisplay_x_menu_dice = ((display_width / 2) + (display_width / 3) + ((display_width / 3)/7))\ndisplay_y_menu_dice = ((display_height/4) + (display_height / 6))\ndisplay_height_menu = display_height\ndisplay_width_menu = display_width / 3\n\n#board settings\n#rijen zie Setup and Turn Layout.docx\nrowsx = 8\nrowsy = 16\n\nbeginwidth = (display_width/10)\nblocksizex = (display_width/2)/rowsx\nblocksizey = display_height/rowsy\nrowstower = 5 # hoeveel blokken krijgt de top van de toren?\nprint(\"blocksize is: width=\",blocksizex, \"height=\",blocksizey)\n\n#display window\ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption(\"De Euromast\")\nclock = pygame.time.Clock()\n\n# gamestate ---\ngamestate = 0\n\n#buttonstate ---\nclickedbutton = 0\n\n# state 0 is start of game, state 1 is game state\n\n# quit function ---\ndef game_quit():\n pygame.quit()\n quit()\n\n# text display function ---\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\n#title display\ndef title_display(text):\n TextSurf, TextRect = text_objects(text, largeText)\n TextRect.center = ((display_width / 2), (display_height / 4))\n gameDisplay.blit(TextSurf, TextRect)\n\n# button draw function ---\ndef button(msg, x, y, w, h, ic, ac, action=None):\n global clickedbutton\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n clickedbutton = 1\n action()\n pygame.display.flip()\n else:\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\n\n textSurf, textRect = text_objects(msg, buttonText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n gameDisplay.blit(textSurf, textRect)\n\n#dirty trick to roll dices\ndef dicebutton(msg, x, y, w, h, ic, ac, action=None):\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n action()\n else:\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\n\n textSurf, textRect = text_objects(msg, buttonText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n gameDisplay.blit(textSurf, textRect)\n\n\n#start menu buttons\ndef beginbutton(msg, posx, posy, sizex, sizey, ic, ac, action=None):\n global clickedbutton\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if posx + sizex > mouse[0] > posx and posy + sizey > mouse[1] > posy:\n pygame.draw.rect(gameDisplay, ac, (posx, posy, sizex, sizey))\n\n if click[0] == 1 and action != None:\n action()\n return True\n else:\n pygame.draw.rect(gameDisplay, ic, (posx, posy, sizex, sizey))\n textSurf, textRect = text_objects(msg, smallText3)\n textRect.center = ((posx + (sizex / 2)), (posy + (sizey / 2)))\n gameDisplay.blit(textSurf, textRect)\n\n# a button delay function so you dont click another underlying button, put in every function above the while code\ndef clickedButtonDelay():\n global clickedbutton\n if(clickedbutton == 1):\n pygame.time.delay(160)\n clickedbutton = 0\n\n# board draw function ---\ndef drawgameboard(display_width, display_height, towercolors, colors, beginwidth = (display_width/10), blocksizex = ((display_width/2)/rowsx), blocksizey = (display_height/rowsy), rowsx = 8, rowsy = 16, rowstower = 5):\n color = 0\n towercolor = 0\n for i in range(rowsx):\n for j in range(rowsy):\n if (j > (rowstower - 1)): # toren (dikker) 2 rij blokken\n pygame.draw.rect(gameDisplay, colors[color], (beginwidth + (i * blocksizex), j * blocksizey, blocksizex, blocksizey), 2)\n # cat[color] #this is how you see the body category (prints when block is red: \"Entertainment\")\n else:\n if (i % 2 == 0): # toren 1 rij blokken\n pygame.draw.rect(gameDisplay, towercolors[towercolor], (beginwidth + (i * blocksizex) + (blocksizex / 2), j * blocksizey, blocksizex, blocksizey), 2)\n # cat[towercolor] #this is how you see the body category (prints when block is yellow: \"History\")\n\n #adding colors to each row including the towers top\n if (i % 2 == 1):\n if (color < (len(colors) - 1)):\n color += 1\n else:\n color = 0\n\n if (towercolor < (len(towercolors) - 1)):\n towercolor += 1\n else:\n towercolor = 0\n\n#player functions ---\nclass Player:\n def __init__(self, name, score):\n self.Name = name\n self.Score = score\n\n def score_increase(self):\n self.Score += 1\n\nplayer_1 = Player(\"player 1\", 0)\nplayer_2 = Player(\"player 2\", 0)\nplayer_3 = Player(\"player 3\", 0)\nplayer_4 = Player(\"player 4\", 0)\n\nplayers = download_top_score()\n\n# dice functions ---\ndef dice_roll():\n global dice_choosed\n choice = 0\n click = pygame.mouse.get_pressed()\n if (click != 0):\n choice = random.randint(1, 6)\n if (choice >= 0):\n gameDisplay.blit(pygame.transform.scale(dice_img[choice - 1], (75, 75)),(display_x_menu_dice, display_y_menu_dice))\n dice_choosed = choice\n\n pygame.time.delay(40)\n return dice_choosed\n\ndef final_dice(): # final dice stays visible\n global dice_choosed\n global dice_display\n if (dice_choosed > 0):\n dice_display = dice_choosed # dice_display is de waarde die altijd blijft dice_choosed NIET!\n\n dice_choosed = 0\n\n if (dice_display > 0):\n gameDisplay.blit(pygame.transform.scale(dice_img[dice_display - 1], (75, 75)),(display_x_menu_dice, display_y_menu_dice))\n\n# game instructions draw function ---\ndef game_instructions():\n clickedButtonDelay()\n print(\"Start instructions\")\n\n pygame.draw.rect(gameDisplay, white, (display_width / 18.5, display_height / 8, ((display_width / 2) + (display_width / 2.5)), ((display_height / 2) + (display_height / 4))))\n TextSurf, TextRect = text_objects(\"Instructions\", smallText)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 10)))\n gameDisplay.blit(TextSurf, TextRect)\n\n #regel 1\n TextSurf, TextRect = text_objects(\"1. Elke Player kiest een eigen categorie. Let op! 1 speler per categorie.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 5.5)))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n #regel 2\n TextSurf, TextRect = text_objects(\"2 Player 1 begint het spel. Vervolgens is Player 2 enz.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 4.3)))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n #regel 3\n TextSurf, TextRect = text_objects(\"3 De Player die aan de beurt is krijgt een vraag die hij/zij moet beantwoorden\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 3.5)))\n gameDisplay.blit(TextSurf, TextRect)\n\n TextSurf, TextRect = text_objects(\"door middel van klikken op het antwoord.\",smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 3.0)))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n #regel 4\n TextSurf, TextRect = text_objects(\"4 Als de Player de vraag goed heeft beantwoord mag hij/zij een\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 2.6)))\n gameDisplay.blit(TextSurf, TextRect)\n\n TextSurf, TextRect = text_objects(\"richting kiezen en vervolgens dobbelen.\",smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 2.3)))\n gameDisplay.blit(TextSurf, TextRect)\n\n #regel 5\n TextSurf, TextRect = text_objects(\"5 De player verplaatst het aantal stappen dat is gedobbelt.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 12) + (display_height / 2.0)))\n gameDisplay.blit(TextSurf, TextRect)\n\n #regel 6\n TextSurf, TextRect = text_objects(\"6 Einde Beurt. Volgende Player.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 12) + (display_height / 1.8)))\n gameDisplay.blit(TextSurf, TextRect)\n\n while game_instructions:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n button(\"Start\", ((display_width / 20) * 12), ((display_height / 8.5) * 6),((display_width / 6) + (display_width / 6)), 75, green, bright_green, start_game)\n button(\"Main Menu\",(((display_width / 20)* 2) - (display_width/40)), ((display_height / 8.5) * 6),((display_width / 6) + (display_width / 6)), 75, green, bright_green, startmenu_game)\n\n pygame.display.update()\n\ndef start_game():\n print(\"Start playing game\")\n game_loop()\n\ndef startmenu_game():\n print(\"Start startmenu\")\n startmenu_loop()\n\n\n# pause draw function ---\ndef pause_menu():\n #mouse = pygame.mouse.get_pos()\n clickedButtonDelay()\n pygame.draw.rect(gameDisplay, white, (display_width / 6, display_height / 8, ((display_width / 2) + (display_width /6)), ((display_height / 2) + (display_height /4))))\n smallText = pygame.font.Font(\"freesansbold.ttf\", 40)\n TextSurf, TextRect = text_objects(\"Paused\", smallText)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 10)))\n gameDisplay.blit(TextSurf, TextRect)\n\n print(\"Start pausemenu\")\n\n while pause_menu:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n button(\"Continue\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 5), ((display_width / 6) + (display_width / 6)), 75, green, bright_green, pause_menu_quit)\n button(\"Options\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 8), ((display_width / 6) + (display_width / 6)), 75, blue, bright_blue, pause_menu_options)\n button(\"Restart\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 11), ((display_width / 6) + (display_width / 6)), 75, yellow, bright_yellow, startmenu_game)\n button(\"Quit\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 14), ((display_width / 6) + (display_width / 6)), 75,red, bright_red, game_quit)\n pygame.display.update()\n\ndef pause_options():\n #mouse = pygame.mouse.get_pos()\n clickedButtonDelay()\n pygame.draw.rect(gameDisplay, white, (\n display_width / 6, display_height / 8, ((display_width / 2) + (display_width / 6)),\n ((display_height / 2) + (display_height / 4))))\n TextSurf, TextRect = text_objects(\"Options\", smallText)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 10)))\n gameDisplay.blit(TextSurf, TextRect)\n\n print(\"Start pause options\")\n\n while pause_menu:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n button(\"Continue\", ((display_width / 6) + (display_width / 6)), ((display_height / 20) * 6),((display_width / 6) + (display_width / 6)), 75, green, bright_green, pause_menu_quit)\n\n pygame.display.update()\n\ndef pause_menu_quit():\n game_loop()\n\ndef pause_menu_options():\n pause_options()\n\n# temporary fix\ndef font_temp():\n return pygame.font.Font(None, 30)\n#score functions ---\ndef current_scores(player):\n n = 0\n highscoreTitleSurf, highscoreTitleRect = text_objects(\"Scores\", highscoreText)\n highscoreTitleRect.center = ((display_width / 1.1), (display_height / 1.4))\n gameDisplay.blit(highscoreTitleSurf, highscoreTitleRect)\n # deze for loop gaat door de spelerslijst heen, en plaatst naam en score op het scherm.\n for a in player:\n score_text = font_temp().render(\"{}: {}\".format(a[0], a[1]),1, (0, 0, 0))\n gameDisplay.blit(score_text, (display_width - 140, (display_height - (160 + n))))\n n += -25\n\n# startmenu state function ---\ndef startmenu_loop():\n clickedButtonDelay()\n running = True\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n gameDisplay.fill(background_color)\n gameDisplay.blit(background_img, (-250, -40))\n\n title_display(\"De Euromast\")\n\n # START button\n beginbutton(\"Start\", (display_width / 4), ((display_height / 4) + (display_height / 4)), (display_width / 2), (display_height_button), green, bright_green, game_instructions)\n\n # EXIT button\n beginbutton(\"Quit\", (display_width / 4), ((display_height / 4) + (display_height / 4) + (display_height / 4)), (display_width / 2), (display_height_button), red, bright_red, game_quit)\n\n pygame.display.update()\n\n\n# playing state function ---\ndef game_loop():\n clickedButtonDelay()\n #loop for window + window.QUIT\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n #print(event)\n #background color\n gameDisplay.fill(background_color)\n\n # drawgameboard(width, height, towercolors, colors) # here we draw the gameboard with all the default values\n # or\n drawgameboard(display_width, display_height, towercolors, colors, beginwidth, blocksizex, blocksizey, rowsx, rowsy,rowstower) # here we draw the gameboard WITH non-deafult values\n\n #right menu dice\n pygame.draw.rect(gameDisplay, bright_green, (display_x_menu, 0, (display_width_menu),(display_height_menu)))\n # on window color positon (x) menu position (y) menu menu width menu height\n\n # get mouse pos\n mouse = pygame.mouse.get_pos()\n\n # final dice shows latest dice rolled\n final_dice()\n # PAUSE button\n button(\"Pause\", (display_width + 1 - (display_width_menu / 4)), 0, (display_width_menu / 4), 50, white, white, pause_menu)\n\n #Roll Button\n dice_button = dicebutton(\"Roll!\", (display_x_menu), (display_height / 6), (display_width_menu / 2),display_height_button, bright_red, red, dice_roll)\n\n # dit laat de huidige scores zien rechtsonder het scherm\n current_scores(players)\n # dit zorgt ervoor dat als spelers winnen, ze punten krijgen.\n addScore()\n #test score\n if pygame.mouse.get_pressed()[0]:\n player_1.score_increase()\n\n\n # window update\n pygame.display.update()\n clock.tick(15)\n\n# deze functie is een win conditie: als de speler de laatste tegel behaald.\ndef addScore():\n if player_1.startY > ((display_height * 0.95) + 15 * .063):\n player_1.Score += 100\n return player_1.Score\n elif player_2.startY > ((display_height * 0.95) + 15 * .063):\n player_2.Score += 100\n return player_2.Score\n elif player_3.startY > ((display_height * 0.95) + 15 * .063):\n player_3.Score += 100\n return player_3.Score\n elif player_4.startY > ((display_height * 0.95) + 15 * .063):\n player_4.Score += 100\n return player_4.Score\n\n# winnersmenu function ---\ndef winnersmenu_game():\n clickedButtonDelay()\n print(\"Finished game, winnersmenu here\")\n\n#gamestate calling ---\n#start game by state (default 0)\nif gamestate == 0:\n startmenu_loop()\nelif gamestate == 1:\n game_loop()","sub_path":"merging_24-1-2017/game + database functies furkan.py","file_name":"game + database functies furkan.py","file_ext":"py","file_size_in_byte":18363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"328509652","text":"import muda\nfrom muda.deformers import BackgroundNoise\nimport jams\n\nfrom pydub.generators import WhiteNoise\n\n#%%\nduration = 5000\nwn = WhiteNoise().to_audio_segment(duration=duration)\nwn.export('noise.wav', format='wav')\n\n#%%\nj_ori = muda.load_jam_audio(jams.JAMS(), 'original.wav')\ndeformer = BackgroundNoise(files='noise.wav', n_samples=1, weight_max=0.05, weight_min=0.01)\njam_out = deformer.transform(j_ori)\n\nfor i, jam_out in enumerate(jam_out):\n muda.save('output_{:02d}.wav'.format(i),\n 'output_{:02d}.jams'.format(i),\n jam_out)","sub_path":"MudaTest.py","file_name":"MudaTest.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"509769792","text":"from __future__ import print_function\nimport sys, os, atexit\nimport pprint as pp\nfrom json import JSONEncoder, loads, dumps\nfrom flask import request, send_from_directory, make_response, Response, Blueprint\n# from flask.json import json_encoder, json_decoder\nfrom flask import json\nfrom werkzeug import secure_filename, FileStorage\nfrom urllib3.exceptions import MaxRetryError\n# from PDB2PQR_web import app\n# import storage_utils\nfrom . import storage_utils\nfrom io import BytesIO\n\nstorage_app = Blueprint('storage_app', __name__)\n\n''' \n Below is the endpoint to interact with the storage container.\n Ideally, this will run within its own container via main.py\n'''\n\nMINIO_URL = os.environ.get('MINIO_URL', 'localhost:9000')\nMINIO_CACHE_DIR = os.environ.get('STORAGE_CACHE_DIR', '/apbs-rest/.minio_cache')\nMINIO_ACCESS_KEY = os.environ.get('MINIO_ACCESS_KEY')\nMINIO_SECRET_KEY = os.environ.get('MINIO_SECRET_KEY')\nJOB_BUCKET_NAME = os.environ.get('MINIO_JOB_BUCKET', 'jobs')\n\nminioClient = storage_utils.get_minio_client(MINIO_URL, MINIO_ACCESS_KEY, MINIO_SECRET_KEY)\nstorageClient = storage_utils.StorageClient(MINIO_URL, MINIO_CACHE_DIR, MINIO_ACCESS_KEY, MINIO_SECRET_KEY)\natexit.register(storageClient.clear_cache)\n\n@storage_app.route('/', methods=['GET'])\n@storage_app.route('/check', methods=['GET'])\ndef is_Alive():\n return '', 200\n\n@storage_app.route('/api/storage//', methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS'])\n@storage_app.route('/api/storage/', methods=['DELETE'])\ndef storage_service(job_id, file_name=None):\n # def storage_service(job_id, file_name=None):\n \"\"\"Endpoint serving as the gateway to storage bucket\"\"\"\n \n if file_name:\n object_name = os.path.join(job_id, file_name)\n # print('%s %s' % (request.method, object_name))\n\n if request.method == 'GET':\n return_json = False\n if 'json' in request.args.keys():\n if request.args['json'].lower() == 'true':\n return_json = True\n\n if not return_json:\n '''send_file_from_directory'''\n # file_path_in_cache = storageClient.fget_object(JOB_BUCKET_NAME, object_name)\n # file_dir = os.path.dirname(file_path_in_cache)\n # return send_from_directory(file_dir, file_path_in_cache.split('/')[-1])\n\n try:\n file_path_in_cache = storageClient.fget_object(JOB_BUCKET_NAME, object_name)\n file_dir = os.path.dirname(file_path_in_cache)\n return send_from_directory(file_dir, file_path_in_cache.split('/')[-1])\n except MaxRetryError:\n return 'Error in retrieving file\\n', 500\n except:\n return 'File %s does not exist\\n' % file_name, 404\n else:\n try:\n file_str = storageClient.get_object(JOB_BUCKET_NAME, object_name)\n file_str_json = { object_name: file_str.decode('utf-8') }\n # response = make_response(JSONEncoder().encode(file_str_json))\n response = make_response( dumps(file_str_json) )\n response.headers['Content-Type'] = 'application/json'\n http_response_code = 200\n # return response, http_response_code\n except MaxRetryError:\n json_string = {object_name: None}\n response = make_response(dumps(json_string))\n response.headers['Content-Type'] = 'application/json'\n http_response_code = 500\n # return response, 500\n except Exception as e:\n # import traceback\n # json_string = {object_name: None, 'error': str(e), 'traceback': traceback.format_exc()}\n json_string = {object_name: None}\n response = make_response(dumps(json_string))\n response.headers['Content-Type'] = 'application/json'\n http_response_code = 500\n # return response, 500\n finally:\n return response, http_response_code\n\n elif request.method == 'PUT':\n try:\n payload = loads(request.data)\n except:\n payload = request.data\n\n elif request.method == 'POST':\n EXTENSION_WHITELIST = set(['pqr', 'pdb', 'in', 'p'])\n # pp.pprint(dict(request.files))\n # pp.pprint(request.form['job_id'])\n\n # pp.pprint(request.files.keys())\n print('request.files keys:')\n for key in request.files.keys():\n print(' ', key)\n try:\n file_data = request.files['file_data']\n # print(type(file_data), flush=True)\n except:\n # file_data = BytesIO(request.data)\n # print(request.data.decode('utf-8'))\n\n file_data = FileStorage(\n stream=BytesIO(request.data),\n filename=file_name,\n )\n # print(type(file_data))\n\n if file_data.filename:\n file_name = secure_filename(file_data.filename)\n if file_data.filename and file_name:\n storageClient.put_object(JOB_BUCKET_NAME, object_name, file_data)\n # if file_data.filename and allowed_file(file_name, EXTENSION_WHITELIST):\n # # print('uploading to bucket')\n # storageClient.put_object(JOB_BUCKET_NAME, object_name, file_data)\n # elif not allowed_file(file_name, EXTENSION_WHITELIST):\n # return 'Unsupported media type', 415\n\n # time.sleep(1)\n return 'Success', 201\n\n elif request.method == 'DELETE':\n object_list = []\n if file_name is None:\n # get list of objects with prefix\n # for each object, delete from bucket\n job_objects = storageClient.list_objects(JOB_BUCKET_NAME, prefix=job_id+'/')\n for obj in job_objects:\n object_list.append(obj.object_name)\n\n else:\n # delete single object from bucket\n object_list.append(object_name)\n\n storageClient.remove_objects(JOB_BUCKET_NAME, object_list)\n\n return 'Success', 204\n\n elif request.method == 'OPTIONS':\n options = ['GET', 'PUT', 'POST', 'DELETE']\n response = make_response()\n response = storage_utils.get_request_options(response, options)\n http_response_code = 204\n \n return response, http_response_code","sub_path":"src/storage/service/storage_service.py","file_name":"storage_service.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"512733634","text":"class Solution:\n def asteroidCollision(self, asteroids: List[int]) -> List[int]:\n stack = []\n i = 0\n \n # astroid + ile stack'e at\n # değilse collision var mı\n # varsa durum ne\n while i < len(asteroids):\n if asteroids[i] > 0:\n stack.append(asteroids[i])\n else:\n while len(stack) > 0 and stack[-1] > 0 and stack[-1] < abs(asteroids[i]):\n stack.pop()\n \n if len(stack) <= 0 or stack[-1] < 0:\n stack.append(asteroids[i])\n elif stack[-1] == abs(asteroids[i]):\n stack.pop()\n i += 1\n \n return stack\n","sub_path":"asteroidCollision.py","file_name":"asteroidCollision.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"108269562","text":"import os\nimport sys\nimport subprocess as sp\n\nfrom genomepy.plugin import Plugin\nfrom genomepy.utils import mkdir_p, cmd_ok, run_index_cmd\n\nclass Hisat2Plugin(Plugin):\n def after_genome_download(self, genome):\n if not cmd_ok(\"hisat2-build\"):\n return\n\n # Create index dir\n index_dir = genome.props[\"hisat2\"][\"index_dir\"]\n index_name = genome.props[\"hisat2\"][\"index_name\"] \n mkdir_p(index_dir)\n\n # Create index\n cmd = \"hisat2-build {} {}\".format(genome.filename, index_name)\n run_index_cmd(\"hisat2\", cmd)\n \n def get_properties(self, genome):\n props = {\n \"index_dir\": os.path.join(\n os.path.dirname(genome.filename), \"index\", \"hisat2\"\n ),\n \"index_name\": os.path.join(\n os.path.dirname(genome.filename), \"index\", \"hisat2\", genome.name\n ),\n }\n return props\n","sub_path":"genomepy/plugins/hisat2.py","file_name":"hisat2.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"64746175","text":"\"\"\" Field module. \"\"\"\n\n# ISC License\n#\n# Copyright (c) 2020, Paul Wilhelm, M. Sc. \n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nimport numpy as np\nfrom numba import jit, prange, set_num_threads\nfrom magneticalc.Assert_Dialog import Assert_Dialog\nfrom magneticalc.BiotSavart_CUDA import BiotSavart_CUDA\nfrom magneticalc.BiotSavart_JIT import BiotSavart_JIT\nfrom magneticalc.Debug import Debug\nfrom magneticalc.Theme import Theme\n\n\nclass Field:\n \"\"\" Field class. \"\"\"\n\n def __init__(self, backend: int, _type: int, distance_limit: float, length_scale: float):\n \"\"\"\n Initializes an empty field.\n\n @param backend: Backend index (0: JIT; 1: JIT + CUDA)\n @param _type: Field type to display (0: A-field; 1: B-field)\n @param distance_limit: Distance limit (mitigating divisions by zero)\n @param length_scale: Length scale (m)\n \"\"\"\n Debug(self, \": Init\")\n\n self._backend = backend\n self._type = _type\n self._distance_limit = distance_limit\n self._length_scale = length_scale\n\n self._total_limited = None\n self._vectors = None\n\n def is_valid(self) -> bool:\n \"\"\"\n Indicates valid data for display.\n\n @return: True if data is valid for display, False otherwise\n \"\"\"\n return \\\n self._total_limited is not None and \\\n self._vectors is not None\n\n def invalidate(self):\n \"\"\"\n Resets data, hiding from display.\n \"\"\"\n Debug(self, \".invalidate()\", color=(128, 0, 0))\n\n self._total_limited = None\n self._vectors = None\n\n def get_type(self) -> int:\n \"\"\"\n Gets field type.\n\n @return: Field type (0: A-field; 1: B-field)\n \"\"\"\n return self._type\n\n def get_units(self) -> str:\n \"\"\"\n Gets field units.\n\n @return: Field units\n \"\"\"\n return [\n \"Tm\", # A-field: Tesla · meter\n \"T\" # B-field: Tesla\n ][self._type]\n\n def get_vectors(self):\n \"\"\"\n Gets field vectors. (The selected field type determined which field was calculated.)\n\n @return: Ordered list of 3D vectors (field vectors & corresponding sampling volume points have the same indices)\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated field\")\n\n return self._vectors\n\n def get_total_limited(self) -> int:\n \"\"\"\n Gets total number of distance limited points.\n\n @return: Total number of distance limited points\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated field\")\n\n return self._total_limited\n\n # ------------------------------------------------------------------------------------------------------------------\n\n def recalculate(self, wire, sampling_volume, progress_callback, num_cores: int) -> bool:\n \"\"\"\n Recalculates field vectors.\n\n @param wire: Wire\n @param sampling_volume: Sampling volume\n @param progress_callback: Progress callback\n @param num_cores: Number of cores to use for multiprocessing\n @return: True if successful, False if interrupted (CUDA backend currently not interruptable)\n \"\"\"\n\n # Default to JIT backend if CUDA backend is selected but not available\n if self._backend == 1:\n if not BiotSavart_CUDA.is_available():\n Debug(\n self,\n f\".recalculate(): WARNING: CUDA backend not available, defaulting to JIT backend\",\n color=Theme.WarningColor,\n force=True\n )\n self._backend = 0\n\n if self._backend == 0:\n\n # Initialize Biot-Savart JIT backend\n biot_savart = BiotSavart_JIT(\n self._type,\n self._distance_limit,\n self._length_scale,\n wire.get_dc(),\n wire.get_elements(),\n sampling_volume.get_points(),\n sampling_volume.get_permeabilities(),\n progress_callback\n )\n\n # Fetch result using Biot-Savart JIT backend\n set_num_threads(num_cores)\n tup = biot_savart.get_result()\n\n elif self._backend == 1:\n\n # Initialize Biot-Savart CUDA backend\n biot_savart = BiotSavart_CUDA(\n self._type,\n self._distance_limit,\n self._length_scale,\n wire.get_dc(),\n wire.get_elements(),\n sampling_volume.get_points(),\n sampling_volume.get_permeabilities(),\n progress_callback\n )\n\n # Fetch result using Biot-Savart JIT backend\n set_num_threads(num_cores)\n tup = biot_savart.get_result()\n\n else:\n\n Debug(self, f\".recalculate(): No such backend: {self._backend}\", color=Theme.WarningColor, force=True)\n return False\n\n # Handle interrupt\n if tup is None:\n return False\n\n self._total_limited = tup[0]\n self._vectors = tup[1]\n\n # Prints the sampling volume points, current elements and field vectors; may be used for debugging:\n \"\"\"\n def print_array(array): return \"np.array([\" + \",\".join([f\"[{p[0]},{p[1]},{p[2]}]\" for p in array]) + \"])\"\n\n element_centers = [element[0] for element in wire.get_elements()]\n element_directions = [element[1] for element in wire.get_elements()]\n\n import sys\n import numpy\n numpy.set_printoptions(threshold=sys.maxsize)\n\n print(\"sampling_volume_points =\", print_array(sampling_volume.get_points()))\n print(\"element_centers =\", print_array(element_centers))\n print(\"element_directions =\", print_array(element_directions))\n print(\"vectors =\", print_array(self._vectors))\n \"\"\"\n\n return True\n\n # ------------------------------------------------------------------------------------------------------------------\n\n @staticmethod\n @jit(nopython=True, parallel=True)\n def get_arrows(\n sampling_volume_points,\n field_vectors,\n line_pairs,\n head_points,\n arrow_scale: float,\n magnitude_limit: float\n ):\n \"\"\"\n Returns the field arrow parameters needed by L{VispyCanvas}.\n\n @param sampling_volume_points: Sampling volume points\n @param field_vectors: Field vectors\n @param line_pairs: Arrow line pairs (ordered list of arrow start/stop 3D points)\n @param head_points: Arrow head points (ordered list of arrow stop 3D points)\n @param arrow_scale: Arrow scale\n @param magnitude_limit: Magnitude limit (mitigating divisions by zero)\n \"\"\"\n for i in prange(len(field_vectors)):\n\n # Calculate field vector magnitude (mitigating divisions by zero)\n field_vector_length = np.sqrt(\n field_vectors[i][0] ** 2 + field_vectors[i][1] ** 2 + field_vectors[i][2] ** 2\n )\n if field_vector_length < magnitude_limit:\n field_vector_length = magnitude_limit\n\n # Calculate normalized field direction\n field_direction_norm = field_vectors[i] / field_vector_length\n\n # Calculate arrow start & end coordinates\n p_start = sampling_volume_points[i] + field_direction_norm / 2 / 2 * arrow_scale\n p_end = sampling_volume_points[i] - field_direction_norm / 2 / 2 * arrow_scale\n\n # Populate arrow line & head coordinates\n line_pairs[2 * i + 0] = p_start\n line_pairs[2 * i + 1] = p_end\n head_points[i] = p_end\n\n return line_pairs, head_points\n","sub_path":"magneticalc/Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":8507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"40492549","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# @author: Manuel Guenther \n# @date: Wed May 1 11:33:00 CEST 2013\n#\n# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom __future__ import print_function\n\n# import required bob modules\nimport bob.db.atnt\nimport bob.io.base\nimport bob.io.image\nimport bob.ip.base\nimport bob.ip.gabor\nimport bob.measure\n\nimport os, sys\nimport numpy, math\nimport matplotlib\nmatplotlib.use('pdf')\n# enable LaTeX interpreter\nmatplotlib.rc('text', usetex=True)\nmatplotlib.rc('font', family='serif')\nmatplotlib.rc('lines', linewidth = 4)\nfrom matplotlib import pyplot\n\nfrom .utils import atnt_database_directory, load_images\n\n\n# define Gabor wavelet transform class globally since it is reused for all images\ngabor_wavelet_transform = bob.ip.gabor.Transform(k_max = 0.25 * math.pi)\n# pre-allocate Gabor wavelet transform image in the desired size\ntrafo_image = numpy.ndarray((gabor_wavelet_transform.number_of_wavelets, 112, 92), numpy.complex128)\n\ndef extract_feature(image, extractor):\n \"\"\"Extracts the Gabor graphs from the given image\"\"\"\n\n # perform Gabor wavelet transform on the image\n gabor_wavelet_transform.transform(image, trafo_image)\n\n # extract the Gabor graphs from the feature image\n gabor_graph = extractor.extract(trafo_image)\n\n # return the extracted graph\n return gabor_graph\n\n\n# define a certain Gabor jet similarity function that should be used\nSIMILARITY_FUNCTION = bob.ip.gabor.Similarity('PhaseDiffPlusCanberra', gabor_wavelet_transform)\n\ndef main():\n \"\"\"This function will perform Gabor graph comparison test on the AT&T database.\"\"\"\n\n # Check the existence of the AT&T database and download it if not\n # Also check if the AT&T database directory is overwritten by the command line\n image_directory = atnt_database_directory(sys.argv[1] if len(sys.argv) > 1 else None)\n\n # use the bob.db interface to retrieve information about the Database\n db = bob.db.atnt.Database(original_directory = image_directory)\n\n # The protocol does not exist for AT&T, but to be able to exchange the database, we define it here.\n protocol = None\n\n # The group is 'dev', throughout\n group = 'dev'\n\n # The images of the AT&T database are already cropped, so we don't need to specify a face cropper.\n face_cropper = None\n # For other databases you might want to use:\n# face_cropper = bob.ip.base.FaceEyesNorm(crop_size = (80,64), right_eye = (16,15), left_eye = (16,48))\n\n # After image cropping, we apply an image preprocessor\n preprocessor = bob.ip.base.TanTriggs()\n \n # The image resolution of the (cropped) images, which might change with the database\n image_resolution = (112, 92)\n\n #####################################################################\n ### Training\n\n # for Gabor graphs, no training is required.\n print(\"Creating Gabor graph machine\")\n # create a machine that will produce Gabor graphs with inter-node distance (4,4)\n graph_extractor = bob.ip.gabor.Graph(first=(8,6), last=(image_resolution[0]-8, image_resolution[1]-6), step=(4,4))\n\n #####################################################################\n ### extract Gabor graph features for all model and probe images\n\n #####################################################################\n ### extract eigenface features of model and probe images\n\n model_ids = db.model_ids(groups = group)\n print(\"Extracting %d models\" % len(model_ids))\n # generate models for each model ID\n models = {}\n for model_id in model_ids:\n # load enroll images for the current model ID\n enroll_images = load_images(db, db.enroll_files(protocol = protocol, groups = group, model_id = model_id), face_cropper, preprocessor)\n # extract features for all enroll images and store all of them\n models[model_id] = [extract_feature(enroll_image, graph_extractor) for enroll_image in enroll_images]\n\n probe_files = db.probe_files(protocol = protocol, groups = group)\n print(\"Extracting %d probes\" % len(probe_files))\n probe_images = load_images(db, probe_files, face_cropper, preprocessor)\n # extract probe features and store them by probe ID (which is the File.id)\n probes = {}\n for i in range(len(probe_files)):\n probe_id = probe_files[i].id\n probes[probe_id] = extract_feature(probe_images[i], graph_extractor)\n\n #####################################################################\n ### compute scores, we here choose a simple Euclidean distance measure\n positive_scores = []\n negative_scores = []\n\n print(\"Computing scores\")\n\n # iterate through models and probes and compute scores\n model_count = 1\n for model_id, model in models.items():\n # provide status information\n print(\"\\rModel\", model_count, \"of\", len(models), end='')\n sys.stdout.flush()\n model_count += 1\n\n # the client ID that is attached to the model\n model_client_id = db.get_client_id_from_model_id(model_id)\n # get the probe files, which should be compared with this model\n model_probe_files = db.probe_files(protocol = protocol, groups = group, model_id = model_id)\n for probe_file in model_probe_files:\n # get the according probe feature using the File.id of the probe file\n probe_feature = probes[probe_file.id]\n # compute local scores for each model gabor jet and each probe jet\n score = 0.\n for gabor_jet_index in range(len(probe_feature)):\n scores = []\n # compute the similarity to all model jets\n for model_feature_index in range(len(model)):\n scores.append(SIMILARITY_FUNCTION(model[model_feature_index][gabor_jet_index], probe_feature[gabor_jet_index]))\n # .. and take the most similar one\n score += max(scores)\n # the final score is computed as the average over all positions, taking the most similar model jet\n score /= len(probe_feature)\n\n # check if this is a positive score\n if model_client_id == probe_file.client_id:\n positive_scores.append(score)\n else:\n negative_scores.append(score)\n\n print(\"\\nEvaluation\")\n # convert list of scores to numpy arrays\n positives = numpy.array(positive_scores)\n negatives = numpy.array(negative_scores)\n\n # compute equal error rate\n threshold = bob.measure.eer_threshold(negatives, positives)\n FAR, FRR = bob.measure.farfrr(negatives, positives, threshold)\n\n print(\"Result: FAR\", FAR, \"and FRR\", FRR, \"at threshold\", threshold)\n\n # plot ROC curve\n bob.measure.plot.roc(negatives, positives, CAR=True)\n pyplot.xlabel(\"False Acceptance Rate (\\%)\")\n pyplot.ylabel(\"Correct Acceptance Rate (\\%)\")\n pyplot.title(\"ROC Curve for Gabor phase based AT\\&T Verification Experiment\")\n pyplot.grid()\n pyplot.axis([0.1, 100, 0, 100]) #xmin, xmax, ymin, ymax\n\n # save plot to file\n pyplot.savefig(\"gabor_graph.pdf\")\n print(\"Saved figure 'gabor_graph.pdf'\")\n\n # show ROC curve.\n # enable it if you like. This will open a window and display the ROC curve\n# pyplot.show()\n\n","sub_path":"bob/example/faceverify/gabor_graph.py","file_name":"gabor_graph.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"16272779","text":"'''\njson_to_csv.py\n'''\n\nimport json\nimport io\nimport csv\n\nfrom newdoc2vec.settings import DATASET_LOCATION, DATASET_NAME\n\n#Settings\nBASE_LOC = DATASET_LOCATION+'/json/'+DATASET_NAME+'/'\nOUT_LOC = DATASET_LOCATION\n# OUT_LOC = '../'+DATASET_LOCATION\n\nREPORT_FILE = 'reports'\nREPORT_ATTRBS = ['opening','current_status','current_resolution']\nATTRBS = ['product','component','severity','op_sys','short_desc','resolution']\n\nT_ATTRBS = REPORT_ATTRBS+['resolved_at','resolution_time']+ATTRBS\nN_AN_ATTRBS = ['short_desc','resolution']\n\n\n\ndata = {}\nanom_atrb_map = {}\n\nnorm_file = open(OUT_LOC+'normal.csv','w',newline='',encoding='utf-8')\nanom_file = open(OUT_LOC+'anom.csv','w',newline='',encoding='utf-8')\ncorr_anom_file = open(OUT_LOC+'corr_anom.csv','w',newline='',encoding='utf-8') #correct values for anomalour\n\nnorm_writer = csv.DictWriter(norm_file,quoting=csv.QUOTE_ALL, fieldnames=T_ATTRBS)\nanom_writer = csv.DictWriter(anom_file,quoting=csv.QUOTE_ALL, fieldnames=T_ATTRBS)\ncorr_anom_writer = csv.DictWriter(corr_anom_file,quoting=csv.QUOTE_ALL, fieldnames=T_ATTRBS)\n\nnorm_writer.writeheader()\nanom_writer.writeheader()\ncorr_anom_writer.writeheader()\n\n## Helpers ##\ndef save_bug(bug_id,anomaly):\n '''\n Convert, Filter & Save Bug-id\n :param bug_id:\n :param anomaly:\n :return: Nrows written if bug is saved else False\n '''\n\n row = {}\n corr_row = {}\n for atrb in ATTRBS:\n if anomaly and atrb not in N_AN_ATTRBS:\n row[atrb] = data[atrb][bug_id][0]['what']\n else:\n row[atrb] = data[atrb][bug_id][-1]['what']\n corr_row[atrb] = data[atrb][bug_id][-1]['what']\n\n for atrb in REPORT_ATTRBS:\n corr_row[atrb] = row[atrb] = data[REPORT_FILE][bug_id][atrb]\n corr_row['resolved_at'] = row['resolved_at'] = data['resolution'][bug_id][-1]['when']\n corr_row['resolution_time']= row['resolution_time'] = row['resolved_at'] - row['opening'] if row['resolved_at'] else None\n\n #Filter & Convert rows\n row = filt_conv_component(conv_op_sys_other(filt_current_status(filt_not_eclipse(row))))\n corr_row = filt_conv_component(conv_op_sys_other(filt_current_status(filt_not_eclipse(corr_row))))\n if not row or not corr_row:\n return False\n\n if anomaly:\n corr_anom_writer.writerow(corr_row)\n return anom_writer.writerow(row)\n return norm_writer.writerow(row)\n\n## Convert ##\ndef conv_op_sys_other(row):\n if not row:\n return False\n CONV_OP_SYS = ['windows 95','linux QT','symbianos s60',\n 'windows server 2008','windows mobile 5.0',\n 'windows mobile 2003','windows me',\n 'Other']\n if row['op_sys'].lower() in CONV_OP_SYS:\n row['op_sys'] = 'other'\n return row\n\ndef filt_conv_component(row):\n if not row:\n return False\n if row['component'] == 'PMC':\n return False\n if row['component'] == 'Incubator':\n row['component'] = 'Incubators'\n return row\n\n## Filters ##\ndef filt_current_status(row):\n if not row:\n return False\n if row['current_status'] == 'NEW':\n return False\n return row\n\ndef filt_not_eclipse(row):\n if not row:\n return False\n if row['current_resolution'] == 'NOT_ECLIPSE':\n return False\n return row\n\n## Main ##\n#Populating Bug Details\nwith io.open(BASE_LOC+REPORT_FILE+'.json','r',encoding='utf-8') as data_file:\n data[REPORT_FILE] = json.load(data_file)[REPORT_FILE]\n data_file.close()\n\n#Populating Attributes\nfor atrb in ATTRBS:\n with io.open(BASE_LOC+atrb+'.json','r',encoding='utf-8') as data_file:\n data[atrb] = json.load(data_file)[atrb]\n data_file.close()\n\n#Saving Data\nn_bugs = 0\nanom_bugs = 0\nfor bug_id in data['short_desc'].keys():\n anomaly = False\n for atrb in ATTRBS:\n if atrb not in N_AN_ATTRBS and len(data[atrb][bug_id]) > 1:\n anomaly = True\n break\n\n if save_bug(bug_id, anomaly):\n n_bugs += 1\n if anomaly:\n anom_bugs += 1\n\nprint(\"Total Bugs\", n_bugs)\nprint(\"Anom Bugs\",anom_bugs)\nnorm_file.close()\nanom_file.close()\ncorr_anom_file.close()\n","sub_path":"tools/json_to_csv.py","file_name":"json_to_csv.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"402645434","text":"import argparse\nfrom nilearn import surface\nimport pickle as pkl\nimport numpy as np\nimport os.path as op\nimport os\nimport pandas as pd\nfrom scipy import interpolate\nfrom itertools import product\nfrom tqdm import tqdm\nfrom skimage.filters import gabor_kernel\nfrom skimage.io import imsave\nimport scipy.ndimage as ndi\nimport sharedmem\n\n\ndef main(derivatives,\n subject,\n session):\n\n grid_resolution = .35\n max_frequency = 1.\n min_frequency = 1/10.\n n_frequencies = 50\n n_orientations = 16\n scale_factor = (min_frequency/max_frequency)**(-1/(n_frequencies-1))\n frequencies_mm = scale_factor**-np.arange(0, n_frequencies) * max_frequency\n orientations = np.linspace(0, np.pi, n_orientations, endpoint=False)\n\n frequencies_pix = frequencies_mm * grid_resolution\n\n for hemi in ['lh', 'rh']:\n\n xy = pd.read_pickle(op.join(derivatives, 'coordinate_patches', 'sub-{subject}',\n 'anat', 'sub-{subject}_hemi-{hemi}_coordinatepatch.pkl').format(**locals()))\n\n depths = np.round(np.linspace(0, 1, 8), 3)[1:-1]\n\n pb = tqdm(total=len(frequencies_pix) * len(orientations) * len(depths))\n\n mask_name = 'V1'+hemi[:1]\n\n results = []\n\n for depth in depths:\n zmap = op.join(derivatives, 'sampled_giis', 'sub-{subject}', 'ses-{session}',\n 'func',\n 'sub-{subject}_ses-{session}_left_over_right_desc-zmap-depth-{depth}_hemi-{hemi}.gii'). \\\n format(**locals())\n zmap_v = surface.load_surf_data(zmap)\n zmap = pd.DataFrame(zmap_v, columns=['z_value'])\n df = zmap.merge(xy, left_index=True, right_index=True)\n df = df.loc[(df.z == 0) & df[mask_name]]\n\n x_grid, y_grid = np.meshgrid(np.arange(df['x'].min(), df['x'].max(), grid_resolution),\n np.arange(df['y'].min(), df['y'].max(), grid_resolution))\n\n\n data = interpolate.griddata(df[['x', 'y']],\n df['z_value'],\n np.vstack((x_grid.ravel(), y_grid.ravel())).T,\n fill_value=0,\n method='linear').reshape(x_grid.shape)\n\n results_dir = op.join(derivatives, 'zmap_spatfreq',\n 'sub-{subject}',\n 'ses-{session}',\n 'func').format(**locals())\n\n if not op.exists(results_dir):\n os.makedirs(results_dir)\n\n print('Writing zmap...')\n imsave(op.join(results_dir, f'sub-{subject}_ses-{session}_hemi-{hemi}_depth-{depth}_desc-zmap2d_image.png'), data)\n\n pars = [(freq, ori) for freq, ori in product(frequencies_pix, orientations)]\n\n n_jobs = 16\n\n with sharedmem.MapReduce(np=n_jobs) as pool:\n def reduce(r):\n pb.update()\n return r\n\n def get_power(pars):\n freq, ori = pars\n kernel = gabor_kernel(freq, ori, n_stds=3)\n filtered_real = ndi.convolve(data, np.real(kernel), mode='wrap')\n filtered_imag = ndi.convolve(data, np.imag(kernel), mode='wrap')\n power = np.sqrt(filtered_real**2 + filtered_imag**2)\n power[data == 0] = np.nan\n\n power = pd.DataFrame([power.ravel()],\n index=pd.MultiIndex.from_tuples([(depth, freq/grid_resolution, ori)], names=['depth', 'frequency', 'orientation']))\n\n return power\n\n results += pool.map(get_power, pars, reduce)\n\n results = pd.concat(results, axis=0)\n\n results_vertex = interpolate.griddata(np.vstack((x_grid.ravel(),\n y_grid.ravel())).T,\n results.T,\n df[['x', 'y']])\n\n results_vertex = pd.DataFrame(results_vertex.T,\n index=results.index,\n columns=df.index)\n results_vertex.loc[:, df[df['z_value'] == 0].index] = np.nan\n\n\n results_vertex.to_pickle(op.join(results_dir,\n 'sub-{subject}_ses-{session}_hemi-{hemi}_energies.pkl').format(**locals()))\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"subject\", \n type=str,\n help=\"subject to process\")\n parser.add_argument(\"session\", \n type=str,\n help=\"subject to process\")\n parser.add_argument(\"--derivatives\", \n type=str,\n default='/derivatives',\n help=\"Folder where derivatives reside\")\n args = parser.parse_args()\n\n main(derivatives=args.derivatives,\n subject=args.subject,\n session=args.session)\n","sub_path":"analysis/surface_convolutions/gabor_filtering.py","file_name":"gabor_filtering.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"290924609","text":"from decimal import Decimal\n\nfrom cryptofeed.defines import BUY, LIQUIDATIONS, SELL, TRADES\nfrom cryptofeed.exchanges import Deribit\nfrom cryptofeed.standards import timestamp_normalize\n\n\nclass DeribitBlotter(Deribit):\n async def _trade(self, msg: dict, timestamp: float):\n \"\"\"\n {\n \"params\":\n {\n \"data\":\n [\n {\n \"trade_seq\": 933,\n \"trade_id\": \"9178\",\n \"timestamp\": 1550736299753,\n \"tick_direction\": 3,\n \"price\": 3948.69,\n \"instrument_name\": \"BTC-PERPETUAL\",\n \"index_price\": 3930.73,\n \"direction\": \"sell\",\n \"amount\": 10\n }\n ],\n \"channel\": \"trades.BTC-PERPETUAL.raw\"\n },\n \"method\": \"subscription\",\n \"jsonrpc\": \"2.0\"\n }\n \"\"\"\n for trade in msg[\"params\"][\"data\"]:\n price = Decimal(trade[\"price\"])\n volume = Decimal(trade[\"amount\"])\n notional = volume / price\n await self.callback(\n TRADES,\n feed=self.id,\n uid=trade[\"trade_id\"],\n symbol=trade[\"instrument_name\"], # Do not normalize\n timestamp=timestamp_normalize(self.id, trade[\"timestamp\"]),\n price=price,\n volume=volume,\n notional=notional,\n tickRule=1 if trade[\"direction\"] == \"buy\" else -1,\n )\n if \"liquidation\" in trade:\n await self.callback(\n LIQUIDATIONS,\n feed=self.id,\n symbol=trade[\"instrument_name\"],\n side=BUY if trade[\"direction\"] == \"buy\" else SELL,\n leaves_qty=Decimal(trade[\"amount\"]),\n price=Decimal(trade[\"price\"]),\n order_id=trade[\"trade_id\"],\n timestamp=timestamp_normalize(self.id, trade[\"timestamp\"]),\n receipt_timestamp=timestamp,\n )\n","sub_path":"cryptoblotter/exchanges/deribit.py","file_name":"deribit.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"17403823","text":"#!/usr/bin/env python\n\n# Advent of Code 2019 - Day 1 - Puzzle 1\n\ndef calculate_fuel(mass):\n divthree = mass/3\n return divthree - 2\n\ntotal_fuel = 0\nwith open('p1_input.txt', 'r') as fin:\n for line in fin:\n total_fuel += calculate_fuel(int(line))\n\nprint(\"Total Fuel: {}\".format(total_fuel))\n","sub_path":"Day1/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"202928381","text":"#\n# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed \n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either \n# express or implied. See the License for the specific language governing \n# permissions and limitations under the License.\n#\n\n#\n# Data Protection\n# kms_cmk_rotation_enabled\n#\n\nimport json\nimport boto3\nimport sys\nimport time\nfrom datetime import datetime\n\nSTS_SESSION = ''\n\ndef get_sts_session(event, region_name=False):\n sts = boto3.client(\"sts\")\n RoleArn = event[\"executionRoleArn\"]\n if not region_name:\n region_name = event['configRuleArn'].split(\":\")[3]\n response = sts.assume_role(\n RoleArn=RoleArn,\n RoleSessionName='ComplianceAudit',\n DurationSeconds=900)\n sts_session = boto3.Session(\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken'],\n region_name=region_name,\n botocore_session=None,\n profile_name=None)\n return sts_session\n\ndef kms_cmk_rotation_enabled(event):\n configuration_item = {}\n\n regions = STS_SESSION.client(\"ec2\").describe_regions()['Regions']\n for region in regions:\n region_session = get_sts_session(event, region['RegionName'])\n kms_client = region_session.client('kms')\n keys = kms_client.list_keys()\n if len(keys['Keys']) == 0:\n continue\n else:\n for key in keys['Keys']:\n eval = {}\n eval[\"ComplianceResourceType\"] = \"AWS::KMS::Key\"\n eval[\"ComplianceResourceId\"] = key['KeyArn']\n if kms_client.describe_key(KeyId=key['KeyId'])[\"KeyMetadata\"][\"KeyManager\"] == \"AWS\":\n continue\n if kms_client.get_key_rotation_status(KeyId=key['KeyId'])['KeyRotationEnabled'] == True:\n response = {\n \"ComplianceType\": \"COMPLIANT\",\n \"Annotation\": \"The yearly rotation is activated for this key.\"\n }\n else:\n response = {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The yearly rotation is not activated for this key.\"\n }\n eval[\"ComplianceType\"]=response[\"ComplianceType\"]\n eval[\"Annotation\"]=response[\"Annotation\"]\n eval[\"OrderingTimestamp\"]=json.loads(event[\"invokingEvent\"])['notificationCreationTime']\n put_eval(eval, result_token) \n\ndef put_eval(eval, token):\n config = STS_SESSION.client(\"config\")\n config.put_evaluations(\n Evaluations=[\n {\n \"ComplianceResourceType\": eval[\"ComplianceResourceType\"],\n \"ComplianceResourceId\": eval[\"ComplianceResourceId\"],\n \"ComplianceType\": eval[\"ComplianceType\"],\n \"Annotation\": eval[\"Annotation\"],\n \"OrderingTimestamp\": eval[\"OrderingTimestamp\"]\n },\n ],\n ResultToken=token\n )\n \n# This is the handler that's invoked by Lambda\ndef lambda_handler(event, context):\n global STS_SESSION\n global result_token\n if \"resultToken\" in event:\n result_token = event[\"resultToken\"]\n\n rule_parameters={}\n if 'ruleParameters' in event:\n rule_parameters = json.loads(event['ruleParameters'])\n\n STS_SESSION = get_sts_session(event)\n \n kms_cmk_rotation_enabled(event)\n","sub_path":"compliance-account-rulesets-setup/rule-code/KMS_CMK_ROTATION_ENABLED.py","file_name":"KMS_CMK_ROTATION_ENABLED.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"487589164","text":"import os\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport argparse\nimport re\n\ndef argument_choice():\n \"\"\"\n Function dedicated to being able to take the input from the user, whether be from the command line or from the python program directly.\n \"\"\"\n parse = argparse.ArgumentParser(description=\"Script that allows a user to download all the possible chapters in a manga from Mangadex.org.\")\n parse.add_argument('url', help=\"Enter the Mangadex URL\", nargs=\"?\", const=None, default=None)\n arguments = parse.parse_args()\n\n if arguments.url != None: #If the input is from command line\n answer = arguments.url\n else: #If the input is via python itself\n py_input = input(\"Enter the full URL from Mangadex: \")\n answer = py_input\n \n if re.match(r\"(https:)//(mangadex).(com|org)/(chapter)/(\\d{6})/(\\d)\", answer) is not None: #There is a valid URL\n return answer\n else:\n print(\"The page is not valid for scraping, please make sure the text is from mangadex's URL.\")\n try:\n input(\"Press Enter to continue\")\n except SyntaxError:\n pass\n exit()\n\ndef browser_shutdown(web_browser): # Closing Everything\n web_browser.close()\n web_browser.quit()\n\nmangadex_url = argument_choice() # URL source for the manga \n\ndriver = webdriver.Firefox() # Utilizing Firefox as the main driver\ndriver.get(mangadex_url) # Opening the URL of the manga\nstart_time = time.perf_counter() # Timer to find out how long this takes\n\n# Fixing the settings to make it Long Scroll\ndriver.find_element_by_id('settings-button').click() # Goes to the Settings\ndriver.find_element_by_xpath('/html/body/div[1]/div[4]/div/div/div[2]/div[1]/div[4]/div/div/button[3]').click() # Clicks the Long Page\ndriver.find_element_by_xpath('/html/body/div[1]/div[4]/div/div/div[1]/button').click() # Closes the settings\n\nchapter_num = 1\nmulti_page_flag = True\n\nwhile multi_page_flag == True:\n try:\n time.sleep(2)\n \n # Page Loading and Jumping\n total_manga_pages = int(driver.find_element_by_class_name('total-pages').text) # Get the total amount of pages.\n current_manga_pages = int(driver.find_element_by_class_name('current-page').text) # Get the current page from the jump\n while current_manga_pages < total_manga_pages: # Keeps scrolling down till it satisfies the condition\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight)\")\n time.sleep(1.5)\n current_manga_pages = int(driver.find_element_by_class_name('current-page').text) # Get the current page from the jump\n print(f\"Loaded {current_manga_pages} pages out of {total_manga_pages}.\\n\")\n time.sleep(1)\n\n # The Beautiful Soup Scraping Section\n html_source = driver.page_source\n soup = BeautifulSoup(html_source, 'html.parser')\n\n # Laddering Down Pt 1: For the images\n body = soup.body\n content = body.find('div', id='content')\n main_row = content.find('div', class_='reader-main col row no-gutters flex-column flex-nowrap noselect')\n manga_scraping_zone = main_row.find('div', class_='reader-images col-auto row no-gutters flex-nowrap m-auto text-center cursor-pointer directional')\n\n # Laddering Down Pt 2: For the title\n content_2 = content.find('div', class_='container reader-controls-container p-0')\n content_3 = content_2.find('div', class_='reader-controls-wrapper bg-reader-controls row no-gutters flex-nowrap')\n content_4 = content_3.find('div', class_='reader-controls col row no-gutters flex-column flex-nowrap')\n content_5 = content_4.find('div', class_='reader-controls-title col-auto text-center p-2')\n content_6 = content_5.find('div', style='font-size:1.25em')\n title = content_6.find('a', class_='manga-link')['title']\n print(title)\n\n # Folder Creating for the Images to be saved\n character_source = (title)\n try:\n os.mkdir(character_source) # Makes the directory of the title, the directory is placed where the script was exectuted.\n if chapter_num == 1:\n print(\"Directory \" , character_source , \" has been created.\\n\")\n except FileExistsError:\n if chapter_num == 1:\n print(\"Directory \" , character_source , \" already exists.\\n\")\n\n try:\n os.mkdir(character_source + f\"\\\\Chapter_{chapter_num}\")\n print(f\"Chapter {chapter_num} has been created.\\n\")\n except FileExistsError:\n print(f\"Chapter {chapter_num} already exists.\\n\")\n\n for manga_images in manga_scraping_zone.find_all('div', class_='reader-image-wrapper col-auto my-auto justify-content-center align-items-center noselect nodrag row no-gutters'):\n x = manga_images.find('img', class_='noselect nodrag cursor-pointer')['src']\n print(x)\n filename = x.split('/')\n if os.path.exists(f\"{character_source}\\\\Chapter_{chapter_num}\\\\\" + filename[5]):\n print(\"File already Exist.\\n\")\n else:\n # The downloading segment.\n print(\"Downloading...\\n\")\n second_request = requests.get(x)\n with open(f\"{character_source}\\\\Chapter_{chapter_num}\\\\\" + filename[5], 'wb') as f:\n f.write(second_request.content)\n\n # Find if there are multiple pages for the manga\n driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div[2]/div[2]/a[2]').click()\n multi_page_flag = True\n chapter_num += 1\n except:\n if chapter_num == 1:\n print(\"An error occurred while attempting to access page, try again later or in another page\")\n multi_page_flag = False\n \n\n# End of the line.\nbrowser_shutdown(driver) # Closes the browser completely.\nend_time = time.perf_counter()\nif chapter_num != 1:\n print(f\"All possible images have successfully been downloaded in {round(end_time-start_time, 2)} seconds.\")\nend = input(\"Press Enter to end the program \\n\")","sub_path":"Mangadex Image Scrapper Test (Full Manga).py","file_name":"Mangadex Image Scrapper Test (Full Manga).py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"407581421","text":"#!/usr/bin/env python3\n\n#-----------------------------------\n# @nagayosi 2018.2.11\n#\n# how to use: python3 main.py --train --iter 1000 --test\n# if you only train: python3 main.py --train --iter 1000\n# if you only test: python3 main.py --test\n#-----------------------------------\n\nimport chainer\nfrom chainer.dataset import convert\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer.links import Convolution2D as Conv2D, Deconvolution2D as Deconv2D\n\nimport glob, argparse, random, os, time\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport _pickle as pickle\n\n\n## Training data directory path\nHOME = os.path.expanduser('~') + '/'\nTrain_file = 'pokemon_train.txt'\npokemon_file = 'pokemon.txt'\nwaza_file = 'pokemon_waza.txt'\nseikaku_file = 'pokemon_seikaku.txt'\n\n\n## Config containing hyper-parameters\ncf = {\n 'Iteration': 1000,\n 'Minibatch': 10,\n 'LearningRate': 0.01,\n 'WeightDecay':0.0005,\n 'FineTuning': False,\n 'SaveModel': 'MyNet.npz'\n}\n\n## Network model\nclass MyNet(chainer.Chain):\n def __init__(self):\n super(MyNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, 1024, nobias=True)\n self.fc2 = L.Linear(None, 1024, nobias=True)\n self.out_doryokuchi = L.Linear(None, 6, nobias=True)\n self.out_seikaku = L.Linear(None, 25, nobias=True)\n self.out_waza = L.Linear(None, 668, nobias=True)\n\n def acti(self, x):\n return F.tanh(x)\n \n def __call__(self, x):\n fc1 = self.acti(self.fc1(x))\n fc2 = self.acti(self.fc2(fc1))\n out1 = F.sigmoid(self.out_doryokuchi(fc2))\n out2 = self.out_seikaku(fc2)\n out3 = F.sigmoid(self.out_waza(fc2))\n return out1, out2, out3\n\n\n## Image Load function\ndef load_data(shuffle=True):\n\n datas = [x.strip() for x in open(Train_file, 'r').readlines()]\n pokemon = [x.strip() for x in open(pokemon_file, 'r').readlines()]\n waza = [x.strip() for x in open(waza_file, 'r').readlines()]\n seikaku = [x.strip() for x in open(seikaku_file, 'r').readlines()]\n \n\n for i, line in enumerate(datas):\n item = line.split(',')\n if len(item) < 12:\n continue\n\n name = np.zeros(len(pokemon), dtype=np.float32)\n name[pokemon.index(item[0])] = 1.\n d = np.array(list(map(int, item[1:7]))) / 4. / 63.\n s = seikaku.index(item[7])\n w = np.zeros(len(waza), dtype=np.float32)\n for j in item[8:]:\n w[waza.index(j)] = 1.\n \n x = np.array((name)).astype(np.float32)\n t = np.hstack((d, s, w)).astype(np.float32)\n\n if i == 0:\n data1 = x\n data2 = t\n else:\n data1 = np.vstack((data1, x))\n data2 = np.vstack((data2, t))\n\n if shuffle: \n inds = np.arange(len(datas))\n random.shuffle(inds)\n data1 = data1[inds]\n data2 = data2[inds]\n \n data = [data1, data2]\n\n return data\n\n\n## Fine-tuning function\ndef copy_model(src, dst):\n assert isinstance(src, chainer.Chain)\n assert isinstance(dst, chainer.Chain)\n for child in src.children():\n if child.name not in dst.__dict__: continue\n dst_child = dst[child.name]\n if type(child) != type(dst_child): continue\n if isinstance(child, chainer.Chain):\n copy_model(child, dst_child)\n if isinstance(child, chainer.Link):\n match = True\n for a, b in zip(child.namedparams(), dst_child.namedparams()):\n if a[0] != b[0]:\n match = False\n break\n if a[1].data.shape != b[1].data.shape:\n match = False\n break\n if not match:\n print('Ignore %s because of parameter mismatch' % child.name)\n continue\n for a, b in zip(child.namedparams(), dst_child.namedparams()):\n b[1].data = a[1].data\n print('Copy %s' % child.name)\n\n\ndef get_batch(data, batch, last):\n\n ins, gts = data\n\n data_num = len(ins)\n ind = last + batch\n\n if ind < data_num:\n in_data = ins[last : ind]\n gt = gts[last : ind]\n last = ind\n else:\n resi = ind - data_num\n in1, gt1 = ins[last:], gts[last:]\n\n inds = np.arange(len(ins))\n random.shuffle(inds)\n ins = ins[inds]\n gts = gts[inds]\n data = [ins, gts]\n\n in2, gt2 = ins[:resi], gts[:resi]\n in_data = np.vstack((in1, in2))\n gt = np.vstack((gt1, gt2))\n last = resi\n\n return in_data, gt, last, data\n\n\ndef parse(data):\n d = data[:, :6].astype(np.float32)\n s = data[:, 6].astype(np.int32)\n w = data[:, 7:].astype(np.float32)\n return d, s, w\n\n\n## Train function\ndef main_train(args):\n\n ## Prepare Images\n train = load_data()\n test = load_data()\n\n if len(train) < 1 or len(test) < 1:\n raise Exception('train num : {}, test num: {}'.format(len(train), len(test)))\n \n train_count = len(train)\n test_count = len(test)\n\n print('# train images: {}'.format(train_count))\n print('# test images: {}'.format(test_count))\n\n \n ## Prepare Network\n model = MyNet()\n if args.gpu_id >=0:\n model.to_gpu()\n\n if cf['FineTuning']:\n orig = pickle.load(open(\"../bvlc_alexnet.pkl\", \"rb\"))\n copy_model(orig, model)\n #serializers.load_npz(\"result/mynet_epoch_100.model\", model)\n\n ## Prepare Optimizer\n optimizer = chainer.optimizers.MomentumSGD(cf['LearningRate'])\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(cf['WeightDecay']))\n\n\n ## Training start!!\n sum_accuracy_train = 0\n sum_loss_train = 0\n start = time.time()\n \n print('epoch train_loss train_accuracy test_loss test_accuracy Elapsed-Time')\n \n last = 0\n\n for i in range(cf['Iteration']):\n i += 1\n\n x, y, last, train = get_batch(train, cf['Minibatch'], last)\n d, s, w = parse(y)\n\n train_losses = []\n train_accuracies = []\n\n if args.gpu_id >= 0:\n x = chainer.cuda.to_gpu(x)\n d = chainer.cuda.to_gpu(d)\n s = chainer.cuda.to_gpu(s)\n w = chainer.cuda.to_gpu(w)\n\n x = chainer.Variable(x)\n t_d = chainer.Variable(d)\n t_s = chainer.Variable(s)\n t_w = chainer.Variable(w)\n\n y_d, y_s, y_w = model(x)\n\n loss_train1 = F.mean_squared_error(y_d, t_d)\n loss_train2 = F.softmax_cross_entropy(y_s, t_s)\n loss_train3 = F.mean_squared_error(y_w, t_w)\n\n model.cleargrads()\n loss_train1.backward()\n loss_train2.backward()\n loss_train3.backward()\n optimizer.update()\n\n train_losses.append(chainer.cuda.to_cpu(loss_train1.data))\n #acurracy_train.to_cpu()\n #train_accuracies.append(accuracy_train.data)\n train_accuracies.append(chainer.cuda.to_cpu(loss_train1.data))\n \n #sum_loss_train += float(model.loss.data) * len(t.data)\n #sum_accuracy_train += float(model.accuracy.data) * len(t.data)\n\n \"\"\"\n if train_iter.is_new_epoch and train_iter.epoch % 20 == 0:\n #print('epoch: ', train_iter.epoch)\n #print('train mean loss: {:.2f}, accuracy: {:.2f}'.format( sum_loss_train / train_count, sum_accuracy_train / train_count))\n # evaluation\n\n test_losses = []\n test_accuracies = []\n\n sum_accuracy_test = 0\n sum_loss_test = 0\n \n #model.predictor.train = False\n for batch in test_iter:\n x_array, t_array = convert.concat_examples(batch,args.gpu_id)\n x = chainer.Variable(x_array)\n t = chainer.Variable(t_array)\n\n y = model(x)\n\n loss_test = F.mean_squared_error(y, t)\n #accuracy_test = F.accuracy(y, t)\n \n test_losses.append(chainer.cuda.to_cpu(loss_test.data))\n #accuracy_test.to_cpu()\n #test_accuracies.append(accuracy_test.data)\n test_accuracies.append(chainer.cuda.to_cpu(loss_test.data))\n\n test_iter.reset()\n #model.predictor.train = True\n #print('test mean loss: {:.2f}, accuracy: {:.2f}'.format( sum_loss_test / test_count, sum_accuracy_test / test_count))\n \n print('{:>5} {:^10.4f} {:^14.4f} {:^9.4f} {:^13.4f} {:^12.2f}'.format(train_iter.epoch, np.mean(train_losses), np.mean(train_accuracies), np.mean(test_losses), np.mean(test_accuracies), time.time()-start))\n \"\"\"\n\n print('{:>5} {:^10.4f}'.format(i, np.mean(train_losses)))\n\n\n # Save the model and the optimizer\n print('\\nsave the model --> {}'.format(cf['SaveModel']) )\n chainer.serializers.save_npz(cf['SaveModel'], model)\n model_name = cf['SaveModel'].split('.')[-2]\n print('save the optimizer --> {}'.format(model_name + '.state'))\n chainer.serializers.save_npz(model_name + '.state', optimizer)\n print()\n\n## Test function \ndef main_test(args):\n \n ## Prepare Network\n model = MyNet()\n chainer.serializers.load_npz(cf['SaveModel'], model)\n\n if args.gpu_id >= 0:\n model.to_gpu()\n\n ## Test data\n td = ['ガブリアス', 'ボーマンダ']\n\n pokemon = [x.strip() for x in open(pokemon_file, 'r').readlines()]\n waza = [x.strip() for x in open(waza_file, 'r').readlines()]\n seikaku = [x.strip() for x in open(seikaku_file, 'r').readlines()]\n\n\n ## Test start!!\n print('-- test --')\n \n for i in td:\n x = np.zeros(len(pokemon), dtype=np.float32)\n x[pokemon.index(i)] = 1.\n \n # Reshape 1-dimention to [minibatch, data]\n x = x[None, ...]\n \n if args.gpu_id >= 0:\n x = chainer.cuda.to_gpu(x, 0)\n\n y_d = model(x)[0].data[0]\n y_s = model(x)[1].data[0]\n y_w = model(x)[2].data[0]\n\n if args.gpu_id >= 0:\n y_d = chainer.cuda.to_cpu(y_d)\n y_s = chainer.cuda.to_cpu(y_s)\n y_w = chainer.cuda.to_cpu(y_w)\n \n print()\n print(i)\n\n ## Doryokuchi\n d_sum = y_d.sum()\n d = np.round(y_d /d_sum * 126.).astype(np.int) * 4\n\n print(' H | A | B | C | D | S |')\n print('{:>4d}|{:>4d}|{:>4d}|{:>4d}|{:>4d}|{:>4d}|'.format(d[0], d[1], d[2], d[3], d[4], d[5]))\n\n ## Seikaku\n print('seikaku: {}'.format(seikaku[y_s.argmax()]))\n\n ## Waza\n for j, w in enumerate(y_w.argsort()[::-1].astype(int)[:4]):\n j += 1\n print('waza{} : {}'.format(j, waza[w]))\n #print(waza[w] for w in y_w[0].argsort()[::-1].astype(int)[:4])\n\n\n\ndef print_config(args):\n print('-- config parameters --')\n print('GPU ID : {}'.format(args.gpu_id))\n print('Train file : {}'.format(Train_file))\n print('Train pokemon file : {}'.format(pokemon_file))\n print('Train seikaku file : {}'.format(seikaku_file))\n print('Train waza file : {}'.format(waza_file))\n \n for k, v in cf.items():\n print('{} : {}'.format(k, v))\n print('----\\n')\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Faster R-CNN demo')\n parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',default=-1, type=int)\n parser.add_argument('--cpu', dest='cpu_mode',help='Use CPU (overrides --gpu)',action='store_true')\n parser.add_argument('--train', dest='train', help='train', action='store_true')\n parser.add_argument('--test', dest='test', help='test', action='store_true')\n args = parser.parse_args()\n return args\n\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n print_config(args)\n\n if args.gpu_id >= 0:\n chainer.cuda.get_device(args.gpu_id).use()\n \n if args.train:\n main_train(args)\n if args.test:\n main_test(args)\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"636808707","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom crawlers.tickers import getTickers\nfrom crawlers.info import getInfo\n\ndef getFundamentus(table='fundamentalista'):\n base_url = 'https://www.fundamentus.com.br/detalhes.php?papel='\n\n table_index = {\n 'fundamentalista': 2,\n 'balanco': 3,\n 'demonstrativo': 4\n }\n tickers = list(getTickers())\n\n data = []\n \n for ticker in tickers:\n \n print(f'Requisitando dados {ticker} ...')\n\n try:\n url = base_url + ticker\n response = requests.get(url)\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n alltables = soup.find_all('table', 'w728')\n t = alltables[table_index[table]]\n\n label = t.find_all('td', {'class': 'label'})\n content = t.find_all('td', {'class': 'data'})\n except:\n continue\n\n ticker_data = {}\n for l, c in zip(label, content):\n data_label = l.find('span', {'class': 'txt'}).getText()\n data_content = c.getText()\n if fix_string(data_label):\n ticker_data[data_label] = string_to_float(data_content)\n ticker_data['ticker'] = ticker\n\n data.append(ticker_data)\n\n return data\n\ndef string_to_float(string):\n is_percentage = True if '%' in string else False\n replaced_string = string.replace('\\n', '') \\\n .replace('%', '').replace(' ', '').replace('.', '').replace(',', '.')\n if replaced_string == '-':\n return 0.0\n if is_percentage:\n return float(replaced_string) / 100\n return float(replaced_string)\n\ndef fix_string(string):\n return string.replace(' ', '').replace('\\n', '')","sub_path":"crawlers/fundamentus.py","file_name":"fundamentus.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"34233404","text":"# Ugh - he doesn't even have the decency to provide this\nimport numpy as np\nimport pylab as plt\nimport os\n\npath = '/Users/jaguirre/PyModules/ionFR/'\n#path = os.path.expanduser('~/radionopy/')\n\nUT, TEC, B, RM, dRM = np.loadtxt(os.path.join(path, 'IonRM.txt'), unpack=True)\n\nplt.clf()\nplt.errorbar(UT[0:24], RM[0:24], yerr=dRM[0:24], marker='o', ls='None')\nplt.ylim([0, 3])\nplt.xlim([0, 25])\nplt.savefig('test.pdf')\nplt.show()\n","sub_path":"plot_IonRM.py","file_name":"plot_IonRM.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"225971109","text":"\n# coding: utf-8\n\n# TODO List:\n# - Reset/Scramble\n# - Move\n# - More elegent translate action (maybe store in array)\n# - More elegant move\n# - Render\n\n# In[7]:\n\n\nimport numpy as np\nfrom PIL import Image\nimport copy\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport matplotlib.pyplot as plt\n\nclass RubiksEnv(gym.Env):\n \"\"\"\n See cartpole on Github\n Description:\n \n Source:\n \n Observation:\n \n Actions:\n \n Reward:\n The reward\n \n Starting State:\n \n \n Episode Termination:\n Episode terminates when either a cube is in the solved state (i.e. each side only has tiles of one colour) or when the step limit is reached.\n \"\"\"\n \n def __init__(self, size=3, metric='quarter', pomdp=False, solved_reward=1.0, unsolved_reward=0.0, seed=None):\n self.size = size\n \n #Allocate space for Rubik's Cube sides. Each side get's a corresponding integer.\n self.U = (0*np.ones((self.size,self.size))).astype(int)\n self.L = (1*np.ones((self.size,self.size))).astype(int)\n self.F = (2*np.ones((self.size,self.size))).astype(int)\n self.R = (3*np.ones((self.size,self.size))).astype(int)\n self.B = (4*np.ones((self.size,self.size))).astype(int)\n self.D = (5*np.ones((self.size,self.size))).astype(int)\n \n self.orientation = (0,1,3)\n \n self.metric = metric\n self.pomdp = pomdp\n \n if self.metric is 'quarter':\n if self.pomdp:\n self.action_space = spaces.Discrete(16)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(3, self.size, self.size))\n else:\n self.action_space = spaces.Discrete(12)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(6, self.size, self.size))\n else:\n if self.pomdp:\n self.action_space = spaces.Discrete(23)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(3, self.size, self.size))\n else:\n self.action_space = spaces.Discrete(18)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(6, self.size, self.size))\n \n self._action_set = [i for i in range(self.action_space.n)] \n \n self.solved_reward = solved_reward\n self.unsolved_reward = unsolved_reward\n \n self.seed(seed)\n\n self.ACTION_MEANING_QUARTER_METRIC = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\"\n }\n\n self.ACTION_MEANING_QUARTER_METRIC_POMDP = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\",\n 12 : \"North\",\n 13 : \"West\",\n 14 : \"South\",\n 15 : \"East\"\n }\n\n self.ACTION_MEANING_HALF_METRIC = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\",\n 12 : \"U2\",\n 13 : \"L2\",\n 14 : \"F2\",\n 15 : \"R2\",\n 16 : \"B2\",\n 17 : \"D2\"\n }\n\n self.ACTION_MEANING_HALF_METRIC_POMDP = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\",\n 12 : \"U2\",\n 13 : \"L2\",\n 14 : \"F2\",\n 15 : \"R2\",\n 16 : \"B2\",\n 17 : \"D2\",\n 18 : \"North\",\n 19 : \"West\",\n 20 : \"South\",\n 21 : \"East\",\n 22 : \"Antipode\"\n }\n \n def seed(self, seed=None):\n \"\"\"\"\"\"\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n \n def reset(self, steps = 20, orientation = False):\n \"\"\"\"\"\"\n self.U = (0*np.ones((self.size, self.size))).astype(int)\n self.L = (1*np.ones((self.size, self.size))).astype(int)\n self.F = (2*np.ones((self.size, self.size))).astype(int)\n self.R = (3*np.ones((self.size, self.size))).astype(int)\n self.B = (4*np.ones((self.size, self.size))).astype(int)\n self.D = (5*np.ones((self.size, self.size))).astype(int)\n \n for step in range(steps):\n action = self.np_random.choice(self._action_set)\n self.step(int(action))\n if self.solved():\n self.reset(steps, orientation)\n observation = self.get_observation()\n return observation\n\n def reset_to_action(self, actions, orientation = False):\n \"\"\"\"\"\"\n self.U = (0*np.ones((self.size, self.size))).astype(int)\n self.L = (1*np.ones((self.size, self.size))).astype(int)\n self.F = (2*np.ones((self.size, self.size))).astype(int)\n self.R = (3*np.ones((self.size, self.size))).astype(int)\n self.B = (4*np.ones((self.size, self.size))).astype(int)\n self.D = (5*np.ones((self.size, self.size))).astype(int)\n\n for action in actions:\n self.step(int(action))\n\n observation = self.get_observation()\n return observation\n\n def curriculum_reset(self, level=12*20 - 1, orientation = False):\n \"\"\"\"\"\"\n self.U = (0*np.ones((self.size, self.size))).astype(int)\n self.L = (1*np.ones((self.size, self.size))).astype(int)\n self.F = (2*np.ones((self.size, self.size))).astype(int)\n self.R = (3*np.ones((self.size, self.size))).astype(int)\n self.B = (4*np.ones((self.size, self.size))).astype(int)\n self.D = (5*np.ones((self.size, self.size))).astype(int)\n\n for step in range((level // self._n_actions)):\n action = self.np_random.choice(self._action_set)\n self.step(int(action))\n\n action = self.np_random.choice(self._action_set[:(level % self._n_actions) + 1])\n self.step(int(action))\n\n if self.solved():\n self.curriculum_reset(level, orientation)\n\n observation = self.get_observation()\n\n return observation\n \n def move(self, side, sign, times, orientation):\n \"\"\"\"\"\"\n if orientation is None:\n if side is 0:\n self.U = np.rot90(self.U, times*-sign)\n if times < 2:\n if sign > 0:\n Ftmp = copy.copy(self.F[0,:])\n self.F[0,:] = self.R[0,:]\n Ltmp = copy.copy(self.L[0,:])\n self.L[0,:] = Ftmp\n Btmp = copy.copy(self.B[0,:])\n self.B[0,:] = Ltmp\n self.R[0,:] = Btmp\n if sign < 0:\n Ftmp = copy.copy(self.F[0,:])\n self.F[0,:] = self.L[0,:]\n Rtmp = copy.copy(self.R[0,:])\n self.R[0,:] = Ftmp\n Btmp = copy.copy(self.B[0,:])\n self.B[0,:] = Rtmp\n self.L[0,:] = Btmp\n else:\n Ftmp = copy.copy(self.F[0,:])\n self.F[0,:] = self.B[0,:]\n self.B[0,:] = Ftmp\n Rtmp = copy.copy(self.R[0,:])\n self.R[0,:] = self.L[0,:]\n self.L[0,:] = Rtmp\n \n if side is 1:\n self.L = np.rot90(self.L,times*-sign)\n if times < 2:\n if sign > 0:\n Ftmp = copy.copy(self.F[:,0])\n self.F[:,0] = self.U[:,0]\n Dtmp = copy.copy(self.D[:,0][::-1])\n self.D[:,0] = Ftmp\n Btmp = copy.copy(self.B[:,-1][::-1])\n self.B[:,-1] = Dtmp\n self.U[:,0] = Btmp\n if sign < 0:\n Ftmp = copy.copy(self.F[:,0])\n self.F[:,0] = self.D[:,0]\n Utmp = copy.copy(self.U[:,0][::-1])\n self.U[:,0] = Ftmp\n Btmp = copy.copy(self.B[:,-1][::-1])\n self.B[:,-1] = Utmp\n self.D[:,0] = Btmp\n else:\n Ftmp = copy.copy(self.F[:,0][::-1])\n self.F[:,0] = self.B[:,-1][::-1]\n self.B[:,-1] = Ftmp\n Utmp = copy.copy(self.U[:,0])\n self.U[:,0] = self.D[:,0]\n self.D[:,0] = Utmp\n \n \n if side is 2:\n self.F = np.rot90(self.F,times*-sign)\n if times < 2:\n if sign > 0:\n Utmp = copy.copy(self.U[-1,:])\n self.U[-1,:] = self.L[:,-1][::-1]\n Rtmp = copy.copy(self.R[:,0][::-1])\n self.R[:,0] = Utmp\n Dtmp = copy.copy(self.D[0,:])\n self.D[0,:] = Rtmp\n self.L[:,-1] = Dtmp\n if sign < 0:\n Utmp = copy.copy(self.U[-1,:][::-1])\n self.U[-1,:] = self.R[:,0]\n Ltmp = copy.copy(self.L[:,-1])\n self.L[:,-1] = Utmp\n Dtmp = copy.copy(self.D[0,:][::-1])\n self.D[0,:] = Ltmp\n self.R[:,0] = Dtmp\n else:\n Utmp = copy.copy(self.U[-1,:][::-1])\n self.U[-1,:] = self.D[0,:][::-1]\n self.D[0,:] = Utmp\n Rtmp = copy.copy(self.R[:,0][::-1])\n self.R[:,0] = self.L[:,2]\n self.L[:,-1] = Rtmp\n \n if side is 3:\n self.R = np.rot90(self.R,times*-sign)\n if times < 2:\n if sign > 0:\n Utmp = copy.copy(self.U[:,-1][::-1])\n self.U[:,-1] = self.F[:,-1]\n Btmp = copy.copy(self.B[:,0][::-1])\n self.B[:,0] = Utmp\n Dtmp = copy.copy(self.D[:,-1])\n self.D[:,-1] = Btmp\n self.F[:,-1] = Dtmp\n if sign < 0:\n Utmp = copy.copy(self.U[:,-1]) \n self.U[:,-1] = self.B[:,0][::-1]\n Ftmp = copy.copy(self.F[:,-1])\n self.F[:,-1] = Utmp\n Dtmp = copy.copy(self.D[:,-1][::-1])\n self.D[:,-1] = Ftmp\n self.B[:,0] = Dtmp\n else:\n Utmp = copy.copy(self.U[:,-1])\n self.U[:,-1] = self.D[:,-1]\n self.D[:,-1] = Utmp\n Ftmp = copy.copy(self.F[:,-1][::-1])\n self.F[:,-1] = self.B[:,0][::-1]\n self.B[:,0] = Ftmp\n \n \n if side is 4:\n self.B = np.rot90(self.B,times*-sign)\n if times < 2:\n if sign > 0:\n Utmp = copy.copy(self.U[0,:][::-1])\n self.U[0,:] = self.R[:,-1]\n Ltmp = copy.copy(self.L[:,0])\n self.L[:,0] = Utmp\n Dtmp = copy.copy(self.D[-1,:][::-1])\n self.D[-1,:] = Ltmp\n self.R[:,-1] = Dtmp\n \n if sign < 0:\n Utmp = copy.copy(self.U[0,:])\n self.U[0,:] = self.L[:,0][::-1]\n Rtmp = copy.copy(self.R[:,-1][::-1])\n self.R[:,-1] = Utmp\n Dtmp = copy.copy(self.D[-1,:])\n self.D[-1,:] = Rtmp\n self.L[:,0] = Dtmp\n else:\n Utmp = copy.copy(self.U[0,:][::-1])\n self.U[0,:] = self.D[-1,:][::-1]\n self.D[-1,:] = Utmp\n Rtmp = copy.copy(self.R[:,-1][::-1])\n self.R[:,-1] = self.L[:,0][::-1]\n self.L[:,0] = Rtmp\n \n \n if side is 5:\n self.D = np.rot90(self.D,times*-sign)\n if times < 2:\n if sign > 0:\n Ftmp = copy.copy(self.F[-1,:])\n self.F[-1,:] = self.L[-1,:]\n Rtmp = copy.copy(self.R[-1,:])\n self.R[-1,:] = Ftmp\n Btmp = copy.copy(self.B[-1,:])\n self.B[-1,:] = Rtmp\n self.L[-1,:] = Btmp\n if sign < 0:\n Ftmp = copy.copy(self.F[-1,:])\n self.F[-1,:] = self.R[-1,:]\n Ltmp = copy.copy(self.L[-1,:])\n self.L[-1,:] = Ftmp\n Btmp = copy.copy(self.B[-1,:])\n self.B[-1,:] = Ltmp\n self.R[-1,:] = Btmp\n else:\n Ftmp = copy.copy(self.F[-1,:])\n self.F[-1,:] = self.B[-1,:]\n self.B[-1,:] = Ftmp\n Ltmp = copy.copy(self.L[-1,:])\n self.L[-1,:] = self.R[-1,:]\n self.R[-1,:] = Ltmp\n else:\n raise NotImplementedError('Orientation')\n \n def translate_action(self, action):\n \"\"\"\"\"\"\n #TODO encode this in ACTION_MEANING_QUARTER_METRIC\n side = None\n sign = None\n times = None\n orientation = None\n \n if action in [6,7,8,9,10,11]:\n sign = -1.0\n times = 1.0\n \n if action in [0,1,2,3,4,5]:\n sign = 1.0\n times = 1.0\n \n if action is 0 or action is 6:\n side = 0\n if action is 1 or action is 7:\n side = 1\n if action is 2 or action is 8:\n side = 2\n if action is 3 or action is 9:\n side = 3\n if action is 4 or action is 10:\n side = 4\n if action is 5 or action is 11:\n side = 5\n\n if self.metric is 'half':\n sign = 1.0\n times = 2.0\n \n if action is 12:\n side = 0\n if action is 13:\n side = 1\n if action is 14:\n side = 2\n if action is 15:\n side = 3\n if action is 16:\n side = 4\n if action is 17:\n side = 5\n \n if self.pomdp:\n assert side is None\n assert sign is None\n assert times is None \n \n if action in [12, 18]:\n orientation = \"North\"\n if action in [13, 19]:\n orientation = \"West\"\n if action in [14, 20]:\n orientation = \"South\"\n if action in [15, 21]:\n orientation = \"East\"\n if action is 22:\n orientation = \"Antipode\"\n \n return side, sign, times, orientation\n \n def step(self, action):\n assert self.action_space.contains(action), \"Invalid action\"\n side, sign, times, orientation = self.translate_action(action)\n self.move(side, sign, times, orientation)\n \n observation = self.get_observation()\n done = self.solved()\n if done:\n reward = self.solved_reward\n else:\n reward = self.unsolved_reward\n \n information = {}\n \n return observation, reward, done, information\n \n def solved(self):\n \"\"\"\"\"\"\n sides = [self.U, self.L, self.F, self.R, self.B, self.D]\n \n for index, side in enumerate(sides):\n if not np.all(side == index):\n return 0\n \n return 1\n \n def pretty_print(self):\n \"\"\"\"\"\"\n emptysymbol = np.chararray((self.size, self.size), unicode=True)\n emptysymbol[:] = '-'\n matrix = np.vstack((np.hstack((emptysymbol,self.U.astype(int),emptysymbol,emptysymbol)),\n np.hstack((self.L.astype(int),self.F.astype(int),self.R.astype(int),self.B.astype(int))),\n np.hstack((emptysymbol,self.D.astype(int),emptysymbol,emptysymbol))))\n \n print(matrix)\n \n @property\n def _n_actions(self):\n \"\"\"\"\"\"\n return len(self._action_set)\n \n def render(self):\n \"\"\"\"\"\"\n colordict = {0: [255, 0, 0],\n 1: [0, 0, 255],\n 2: [255, 255, 255],\n 3: [0, 255, 0],\n 4: [255, 255, 0],\n 5: [255, 127, 0]}\n\n factor = 60\n square = int(factor/self.size)\n width = factor*4\n height = factor*3\n\n image = np.ones((height, width, 3), dtype='uint8')*127\n for i in range(self.size):\n for j in range(self.size):\n # UP\n image[i*square:(i+1)*square, factor + j*square:factor + (j+1)*square] = colordict[self.U[i, j]]\n\n # RIGHT\n image[factor + i*square: factor + (i+1)*square, j*square:(j+1)*square] = colordict[self.L[i, j]]\n\n # FRONT\n image[factor + i*square: factor + (i+1)*square, factor + j*square: factor + (j+1)*square] = colordict[self.F[i, j]]\n\n # Right\n image[factor + i*square: factor + (i+1)*square, 2*factor + j*square: 2*factor + (j+1)*square] = colordict[self.R[i, j]]\n\n # Back\n image[factor + i*square: factor + (i+1)*square, 3*factor + j*square: 3*factor + (j+1)*square] = colordict[self.B[i, j]]\n\n # DOWN\n image[2*factor + i*square: 2*factor + (i+1)*square, factor + j*square: factor + (j+1)*square] = colordict[self.D[i, j]]\n plt.imshow(image)\n plt.show()\n \n def close(self):\n \"\"\"\"\"\"\n raise NotImplementedError('close not implemented')\n \n def get_action_meanings(self):\n \"\"\"\"\"\"\n if self.metric is 'quarter':\n if self.pomdp:\n return [self.ACTION_MEANING_QUARTER_METRIC_POMDP[i] for i in self._action_set]\n else:\n return [self.ACTION_MEANING_QUARTER_METRIC[i] for i in self._action_set]\n else:\n if self.pomdp:\n return [self.ACTION_MEANING_HALF_METRIC_POMDP[i] for i in self._action_set]\n else:\n return [self.ACTION_MEANING_HALF_METRIC[i] for i in self._action_set]\n \n \n def get_observation(self):\n \"\"\"\"\"\"\n sides = [self.U, self.L, self.F, self.R, self.B, self.D]\n if self.pomdp:\n raveled_cube = np.array([sides[self.orientation[0]],sides[self.orientation[1]],sides[self.orientation[2]]]).ravel()\n one_hot = np.eye(6)[raveled_cube]\n return one_hot.reshape(-1)\n else:\n raveled_cube = np.array(sides).ravel()\n one_hot = np.eye(6)[raveled_cube]\n \n return one_hot.reshape(-1)\n \n \n \n\n\nif __name__ == \"__main__\":\n env = RubiksEnv(size=3, metric='quarter', pomdp=False, solved_reward=1.0, unsolved_reward=0.0, seed=None)\n for x in range(12):\n env.curriculum_reset(level=0)\n\n env.render()\n\n","sub_path":"rubiks.py","file_name":"rubiks.py","file_ext":"py","file_size_in_byte":20198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"151196173","text":"# Processing Lists\n# ----------------\n# Two of the most common operations on a list are modifying\n# each element of a list, and excluding elements of a list.\n#\n# A function that accepts an element of a list and returns\n# a modified value for that element is called a map function.\n#\n# A function that accepts an element of a list and returns\n# whether the element should be kept or excluded is called\n# a filter function.\n#\n# For example, given a list of numbers, we want to produce\n# another list which contains the squares of only the even\n# numbers in the list.\n\ndef even_squares(t):\n result = []\n for e in t:\n if e % 2 == 0:\n result.append(e ** 2)\n return result\n\nnaturals = [1, 2, 3, 4, 5]\nes = even_squares(naturals)\nprint('naturals ==', naturals)\nprint('es ==', es)\n\n# A nested list is a list within a list.\n# Using the above idiom, we can create lists of lists as well.\n\n# For example, let's produce a list whose elements are lists\n# containing the number and its square.\n\ndef number_square_pairs(t):\n result = []\n for e in t:\n result.append([e, e ** 2])\n return result\n\nnsp = number_square_pairs(naturals)\nprint('naturals ==', naturals)\nprint('nsp ==', nsp)\n\nnames = ['alaa', 'fiha', 'amna', 'hamza', 'mahnoor', 'hajra', 'safina', 'dua']\n\ndef map_to_upper(t):\n result = []\n for e in t:\n result.append(e.upper())\n return result\n\nupper_names = map_to_upper(names)\nprint('names ==', names)\nprint('upper_names ==', upper_names)\n\n# In general, a function that both maps and filters a list has\n# the following pattern:\n\ndef map_filter_list(t):\n result = []\n for e in t:\n if filter_func(e):\n result.append(map_true_func(e))\n else:\n result.append(map_false_func(e))\n return result\n\n# where:\n# map_*_func takes one element as its argument and returns\n# its modified value; and\n# filter_func takes one element as its argument and returns\n# True if the element is kept, False if it is to be excluded.\n\n\n\n# Exercise 1a\n# Write a function which accepts a list of numbers and an\n# exponent as arguments and returns a list of numbers with each\n# element raised to the exponent.\n\ndef exponents(t, exp):\n result = []\n for e in t:\n result.append(e ** exp)\n return result\n\nprint('naturals ==', naturals)\nprint('exponents(naturals, 2) ==', exponents(naturals, 2))\n# -> [1, 4, 9, 16, 25]\n\n# Exercise 1b\n# Modify the above function such that it produces nested lists\n# which contain the number and its exponent.\n\ndef number_exponent_pairs(t, exp):\n result = []\n for e in t:\n result.append([e, e ** exp])\n return result\n\nprint('number_exponent_pairs(naturals, 2) ==', number_exponent_pairs(naturals, 2))\n# -> [[1, 1], [2, 4], [3, 9], [4, 16], [5, 25]]\n\n# Exercise 1c\n# Modify the above function such that it only return the odd\n# numbers in a given list and their exponent.\n\ndef odd_number_exponent_pairs(t, exp):\n result = []\n for e in t:\n if e % 2 == 1:\n result.append([e, e ** exp])\n return result\n\nprint('odd_number_exponent_pairs(naturals, 2) ==', odd_number_exponent_pairs(naturals, 2))\n# -> [[1, 1], [3, 9], [5, 25]]\n\n\n\n# Exercise 2\n# Write a function that returns the list of numbers that are\n# divisible by both x and y.\n\ndef multiples(t, x, y):\n result = []\n for e in t:\n if e % x == 0 and e % y == 0:\n result.append(e)\n return result\n\nprint('multiples(range(50), 2, 5) ==', multiples(range(50), 2, 5))\n# -> [0, 10, 20, 30, 40]\n\n# Note: the above pattern works with any sequence (e.g., range).\n\n\n\n# Exercise 3\n# Write a function that returns a nested list with each element\n# and its parity.\n\ndef number_parity_pairs(t):\n result = []\n for e in t:\n if e % 2 == 0:\n result.append([e, 'even'])\n else:\n result.append([e, 'odd'])\n return result\n\nprint('number_parity_pairs(naturals) ==', number_parity_pairs(naturals))\n# -> [[1, 'odd'], [2, 'even'], [3, 'odd'], [4, 'even'], [5, 'odd']]\n\n\n\n# Exercise 4a\n# Write a function that converts a list of masses in kilograms to\n# grams. Note: to convert from kg to g, multiply by one thousand.\n\ndef convert_kg_to_g(t):\n result = []\n for e in t:\n result.append(e * 1000)\n return result\n\nprint('convert_kg_to_g(naturals) ==', convert_kg_to_g(naturals))\n# -> [1000, 2000, 3000, 4000, 5000]\n\n# Exercise 4b\n# Write a function that converts a list of masses in kilograms to\n# pounds. Note: to convert from kg to lbs, multiply by 2.2046.\n\ndef convert_kg_to_lbs(t):\n result = []\n for e in t:\n result.append(e * 2.2046)\n return result\n\nprint('convert_kg_to_lbs(naturals) ==', convert_kg_to_lbs(naturals))\n# -> [2.2046, 4.4092, 6.6138, 8.8184, 11.023]\n\n# Exercise 4c\n# Modify the above funtion to accept the conversion unit as an\n# argument and make the appropriate conversion.\n# If the unit is unrecognized, return an unmodified list.\n\ndef convert_kg_to(t, unit):\n result = []\n for e in t:\n if unit == 'g':\n result.append(e * 1000)\n elif unit == 'lbs':\n result.append(e * 2.2046)\n else:\n result.append(e)\n return result\n\nprint(\"convert_kg_to(naturals, 'g') ==\", convert_kg_to(naturals, 'g'))\n# -> [1000, 2000, 3000, 4000, 5000]\nprint(\"convert_kg_to(naturals, 'lbs') ==\", convert_kg_to(naturals, 'lbs'))\n# -> [2.2046, 4.4092, 6.6138, 8.8184, 11.023]\nprint(\"convert_kg_to(naturals, 'st') ==\", convert_kg_to(naturals, 'st'))\n# -> [1, 2, 3, 4, 5]\n\n# Alternative:\ndef convert_kg_to(t, unit):\n if unit == 'g':\n return convert_kg_to_g(t)\n if unit == 'lbs':\n return convert_kg_to_lbs(t)\n # There is a potential bug if the list isn't copied.\n return t.copy()\n\nprint(\"convert_kg_to(naturals, 'g') ==\", convert_kg_to(naturals, 'g'))\n# -> [1000, 2000, 3000, 4000, 5000]\nprint(\"convert_kg_to(naturals, 'lbs') ==\", convert_kg_to(naturals, 'lbs'))\n# -> [2.2046, 4.4092, 6.6138, 8.8184, 11.023]\nprint(\"convert_kg_to(naturals, 'st') ==\", convert_kg_to(naturals, 'st'))\n# -> [1, 2, 3, 4, 5]\n\n\n\n# Exercise 5\n# Read a file containing full names and form a nested list where\n# every element is a list containing first name and last name.\n\ndef first_last_name_pairs(filename):\n f = open(filename)\n names = f.readlines()\n result = []\n for name in names:\n result.append(name.split())\n return result\n\nprint(\"first_last_name_pairs('mynames.txt') ==\", first_last_name_pairs('mynames.txt'))\n# -> [['Fiha', 'Ali'], ['Hamza', 'Jafri'], ['Mahnoor',\n# -> 'Mahboob'], ['Vaneeza', 'Iqbal'], ['Afifa', 'Bashir'],\n# -> ['Aala', 'Siddiqi'], ['Zunairah', 'Qureshi'],\n# -> ['Safina', 'Shalwani']]\n","sub_path":"Week 11/Lab11/week11_lab_sol.py","file_name":"week11_lab_sol.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"562349123","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render, get_object_or_404\n\n\n# Create a basic object\ndef new_object(request, form, operation, template_name, redirect_success):\n context = dict()\n\n object_form = form(request.POST or None, prefix='object')\n\n if request.method == 'POST':\n if object_form.is_valid():\n object_form.save()\n return redirect(redirect_success)\n else:\n context['errors'] = object_form.errors\n\n context['edit'] = False\n context['form'] = object_form\n context['operation'] = operation\n\n return render(request=request, template_name=template_name, context=context)\n\n\n# Update a basic object\ndef update_object(request, pk, object_model, form, operation, template_name, redirect_success):\n context = dict()\n\n object_instance = get_object_or_404(object_model, pk=pk)\n object_form = form(request.POST or None, instance=object_instance)\n\n if request.method == 'POST':\n if object_form.is_valid():\n object_form.save()\n\n return redirect(redirect_success)\n else:\n context['errors'] = object_form.errors\n else:\n object_form = form(instance=object_instance)\n\n context['form'] = object_form\n context['operation'] = operation\n context['edit'] = True\n return render(request=request, template_name=template_name, context=context)\n\n\n# Delete ajax object\ndef delete_object(request, pk, object_model):\n if request.method == 'POST':\n try:\n object_instance = object_model.objects.get(pk=pk)\n except object_model.DoesNotExist:\n return JsonResponse('Does Not Exist', status=404, safe=False)\n object_instance.delete()\n return JsonResponse('Success', status=200, safe=False)\n else:\n return JsonResponse('Bad Request', status=405, safe=False)\n","sub_path":"basic-crud/basic_crud.py","file_name":"basic_crud.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"646689401","text":"import sys\n#import math\nimport serial\nimport time\n#import pdb\nimport select\nimport struct\nimport termstatus\n\ndef find_prologix():\n \"\"\"\n Searches the ports for prologix devices to connect to.\n \n Return\n ======\n List of prologix ports available, list(serial.tools.list_ports_common.ListPortInfo)\n \"\"\"\n from serial.tools import list_ports\n\n prologix = [p for p in list_ports.comports() if 'prologix' in p.description.lower()]\n\n if len(prologix) == 0: raise Exception(\"Can't find a Prologix\")\n \n return prologix\n\n\nclass usbGPIB:\n def __init__(self, device, gpibAddr, baud=9600, timeout=0.5, \n eot=b'\\004', debug=0, auto=False, log=False, tSleep=0.1):\n\n #End of Transmission character\n self.eot = eot\n # EOT character number in the ASCII table\n self.eotNum = struct.unpack('B',eot)[0]\n self.debug = debug\n self.auto = auto\n self.tSleep = tSleep\n self.log = log\n self.gpibAddr = gpibAddr\n self.device = device\n\n #Connect to the GPIB-USB converter\n self.ser = serial.Serial(device, baud, timeout=timeout)\n\n self.refresh()\n \n def refresh(self):\n \"\"\"\n Sets up the GPIB connection\n \"\"\"\n self.command(\"++addr \"+str(self.gpibAddr)+\"\\n\", sleep=0.1)\n self.command(\"++eos 3\\n\", sleep=0.1)\n self.command(\"++mode 1\\n\", sleep=0.1)\n \n if self.auto:\n self.command(\"++auto 1\\n\", sleep=0.1)\n else:\n self.command(\"++auto 0\\n\", sleep=0.1)\n \n self.command(\"++ifc\\n\",0.1)\n self.command(\"++read_tmo_ms 3000\\n\",0.1)\n self.command(\"++eot_char \"+str(self.eotNum)+\"\\n\",0.1)\n self.command(\"++eot_enable 1\\n\",0.1)\n \n def getData(self, buf, sleep=None):\n if sleep is None: sleep = self.tSleep + 0.1\n\n data=b\"\"\n dlen=0\n if self.debug == True:\n progressInfo = termstatus.statusTxt(\"0 bytes received\")\n \n while 1: # Repeat reading data until eot is found\n while 1: # Read some data\n readSock, writeSock, errSock = select.select([self.ser],[],[],3)\n if len(readSock) == 1:\n data1 = readSock[0].read(buf)\n if self.debug == True:\n dlen=dlen+len(data1)\n progressInfo.update(str(dlen)+' bytes received')\n break\n \n if data1.endswith(self.eot): #if eot is found at the end\n data = data + data1[:-1] #remove eot\n break\n else:\n data = data + data1\n time.sleep(sleep)\n\n if self.debug == True:\n progressInfo.end()\n return data\n \n def query(self, string, buf=100, sleep=None):\n \"\"\"Send a query to the device and return the result.\"\"\"\n if sleep is None: sleep=self.tSleep\n if self.log: print(sys.stderr, \"?? %s\" % string)\n \n cmd = string.encode() + b'\\n'\n \n self.ser.write(cmd)\n \n if not self.auto:\n self.ser.write(\"++read eoi\\n\".encode()) #Change to listening mode\n \n self.ser.flush()\n time.sleep(sleep)\n \n ret = self.getData(buf)\n \n if self.log: print(sys.stderr, \"== %s\" % ret.strip())\n \n return ret\n\n def srq(self):\n \"\"\"Poll the device's SRQ\"\"\"\n self.command(\"++srq\")\n \n while True: # Read some data\n readSock, writeSock, errSock = select.select([self.ser],[],[],3)\n if len(readSock) == 1:\n data = readSock[0].read(100)\n break\n\n return data[:-2]\n \n def command(self, string, sleep=None):\n \"\"\"Send a command to the device.\"\"\"\n if sleep is None: sleep = self.tSleep\n if self.log: print(sys.stderr, \">> %s\" % string)\n \n cmd = string.encode() + b'\\n'\n self.ser.write(cmd)\n self.ser.flush()\n time.sleep(sleep)\n\n def spoll(self):\n \"\"\"Perform a serial polling and return the result.\"\"\"\n self.command(\"++spoll\")\n while 1: # Read some data\n readSock, writeSock, errSock = select.select([self.ser],[],[],3)\n if len(readSock) == 1:\n data = readSock[0].read(100)\n break\n\n return data[:-2]\n \n def close(self):\n self.ser.close()\n \n def setDebugMode(self, debugFlag):\n if debugFlag:\n self.debug=1\n else:\n self.debug=0","sub_path":"usbgpib.py","file_name":"usbgpib.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"346006891","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nfrom collections import defaultdict\n\nfrom django.contrib.auth.decorators import permission_required\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.decorators.cache import never_cache as force_cache_validation\nfrom django.views.generic import View\nfrom django.utils.translation import ugettext as _\nfrom django.core.cache import caches\nfrom django.views.generic.detail import BaseDetailView\nfrom mapentity.serializers import GPXSerializer\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import same_structure_required\nfrom geotrek.common.utils import classproperty\nfrom geotrek.common.views import PublicOrReadPermMixin\nfrom geotrek.core.models import AltimetryMixin\n\nfrom .models import Path, Trail, Topology\nfrom .forms import PathForm, TrailForm\nfrom .filters import PathFilterSet, TrailFilterSet\nfrom . import graph as graph_lib\nfrom django.http.response import HttpResponse\nfrom django.contrib import messages\nfrom django.db.models import Sum\nfrom django.db.models.functions import Coalesce\nfrom geotrek.api.v2.functions import Length\nfrom django.db.models.fields import FloatField\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreateFromTopologyMixin(object):\n def on_topology(self):\n pk = self.request.GET.get('topology')\n if pk:\n try:\n return Topology.objects.existing().get(pk=pk)\n except Topology.DoesNotExist:\n logger.warning(\"Intervention on unknown topology %s\" % pk)\n return None\n\n def get_initial(self):\n initial = super(CreateFromTopologyMixin, self).get_initial()\n # Create intervention with an existing topology as initial data\n topology = self.on_topology()\n if topology:\n initial['topology'] = topology.serialize(with_pk=False)\n return initial\n\n\nclass PathLayer(MapEntityLayer):\n properties = ['name', 'draft']\n queryset = Path.objects.all()\n\n def get_queryset(self):\n qs = super(PathLayer, self).get_queryset()\n if self.request.GET.get('no_draft'):\n qs = qs.exclude(draft=True)\n return qs\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake')\n filterform = PathFilterSet\n\n @classproperty\n def columns(cls):\n columns = ['id', 'checkbox', 'name', 'networks', 'length', 'length_2d']\n if settings.TRAIL_MODEL_ENABLED:\n columns.append('trails')\n return columns\n\n def get_template_names(self):\n return (u\"core/path_list.html\",)\n\n def get_queryset(self):\n \"\"\"\n denormalize ``trail`` column from list.\n \"\"\"\n qs = super(PathList, self).get_queryset()\n denormalized = {}\n if settings.TRAIL_MODEL_ENABLED:\n paths_id = qs.values_list('id', flat=True)\n paths_trails = Trail.objects.filter(aggregations__path__id__in=paths_id)\n by_id = dict([(trail.id, trail) for trail in paths_trails])\n trails_paths_ids = paths_trails.values_list('id', 'aggregations__path__id')\n for trail_id, path_id in trails_paths_ids:\n denormalized.setdefault(path_id, []).append(by_id[trail_id])\n\n for path in qs:\n path_trails = denormalized.get(path.id, [])\n setattr(path, '_trails', path_trails)\n return qs\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n def get_context_data(self, **kwargs):\n context = super(PathJsonList, self).get_context_data(**kwargs)\n context[\"sumPath\"] = round(self.object_list.aggregate(sumPath=Coalesce(Sum(Length('geom'), output_field=FloatField()), 0))['sumPath'] / 1000, 1)\n return context\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n columns = [\n 'id', 'valid', 'visible', 'name', 'comments', 'departure', 'arrival',\n 'comfort', 'source', 'stake', 'usages', 'networks',\n 'structure', 'date_insert', 'date_update',\n 'cities', 'districts', 'areas', 'length_2d'\n ] + AltimetryMixin.COLUMNS\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n context = super(PathDetail, self).get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass PathGPXDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Path.objects.all()\n\n def render_to_response(self, context):\n gpx_serializer = GPXSerializer()\n response = HttpResponse(content_type='application/gpx+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.gpx\"' % self.object\n gpx_serializer.serialize([self.object], stream=response, geom_field='geom')\n return response\n\n\nclass PathKMLDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Path.objects.all()\n\n def render_to_response(self, context):\n response = HttpResponse(self.object.kml(),\n content_type='application/vnd.google-earth.kml+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.kml\"' % self.object\n return response\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n language = self.request.LANGUAGE_CODE\n self.get_object().prepare_elevation_chart(language, self.request.build_absolute_uri('/'))\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n def dispatch(self, *args, **kwargs):\n if self.request.user.has_perm('core.add_path') or self.request.user.has_perm('core.add_draft_path'):\n return super(MapEntityCreate, self).dispatch(*args, **kwargs)\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n path = self.get_object()\n if path.draft and not self.request.user.has_perm('core.change_draft_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if not path.draft and not self.request.user.has_perm('core.change_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if path.draft and self.request.user.has_perm('core.change_draft_path'):\n return super(MapEntityUpdate, self).dispatch(*args, **kwargs)\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n path = self.get_object()\n if path.draft and not self.request.user.has_perm('core.delete_draft_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if not path.draft and not self.request.user.has_perm('core.delete_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if path.draft and self.request.user.has_perm('core.delete_draft_path'):\n return super(MapEntityDelete, self).dispatch(*args, **kwargs)\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(PathDelete, self).get_context_data(**kwargs)\n topologies_by_model = defaultdict(list)\n if 'geotrek.core' in settings.INSTALLED_APPS:\n for trail in self.object.trails:\n topologies_by_model[_('Trails')].append({'name': trail.name, 'url': trail.get_detail_url()})\n if 'geotrek.trekking' in settings.INSTALLED_APPS:\n for trek in self.object.treks:\n topologies_by_model[_('Treks')].append({'name': trek.name, 'url': trek.get_detail_url()})\n for service in self.object.services:\n topologies_by_model[_('Services')].append({'name': service.type.name, 'url': service.get_detail_url()})\n for poi in self.object.pois:\n topologies_by_model[_('Pois')].append({'name': poi.name, 'url': poi.get_detail_url()})\n if 'geotrek.infrastructure' in settings.INSTALLED_APPS:\n for signage in self.object.signages:\n topologies_by_model[_('Signages')].append({'name': signage.name, 'url': signage.get_detail_url()})\n for infrastructure in self.object.infrastructures:\n topologies_by_model[_('Infrastructures')].append({'name': infrastructure.name, 'url': infrastructure.get_detail_url()})\n if 'geotrek.maintenance' in settings.INSTALLED_APPS:\n for intervention in self.object.interventions:\n topologies_by_model[_('Interventions')].append({'name': intervention.name, 'url': intervention.get_detail_url()})\n context['topologies_by_model'] = dict(topologies_by_model)\n return context\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\n@force_cache_validation\ndef get_graph_json(request):\n cache = caches['fat']\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.exclude(draft=True))\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailLayer(MapEntityLayer):\n queryset = Trail.objects.existing()\n properties = ['name']\n\n\nclass TrailList(MapEntityList):\n queryset = Trail.objects.existing()\n filterform = TrailFilterSet\n columns = ['id', 'name', 'departure', 'arrival', 'length']\n\n\nclass TrailJsonList(MapEntityJsonList, TrailList):\n pass\n\n\nclass TrailFormatList(MapEntityFormat, TrailList):\n columns = [\n 'id', 'name', 'comments', 'departure', 'arrival',\n 'structure', 'date_insert', 'date_update',\n 'cities', 'districts', 'areas',\n ] + AltimetryMixin.COLUMNS\n\n\nclass TrailDetail(MapEntityDetail):\n queryset = Trail.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super(TrailDetail, self).get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass TrailGPXDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Trail.objects.existing()\n\n def render_to_response(self, context):\n gpx_serializer = GPXSerializer()\n response = HttpResponse(content_type='application/gpx+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.gpx\"' % self.object\n gpx_serializer.serialize([self.object], stream=response, geom_field='geom')\n return response\n\n\nclass TrailKMLDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Trail.objects.existing()\n\n def render_to_response(self, context):\n response = HttpResponse(self.object.kml(),\n content_type='application/vnd.google-earth.kml+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.kml\"' % self.object\n return response\n\n\nclass TrailDocument(MapEntityDocument):\n queryset = Trail.objects.existing()\n\n\nclass TrailCreate(CreateFromTopologyMixin, MapEntityCreate):\n model = Trail\n form_class = TrailForm\n\n\nclass TrailUpdate(MapEntityUpdate):\n queryset = Trail.objects.existing()\n form_class = TrailForm\n\n @same_structure_required('core:trail_detail')\n def dispatch(self, *args, **kwargs):\n return super(TrailUpdate, self).dispatch(*args, **kwargs)\n\n\nclass TrailDelete(MapEntityDelete):\n queryset = Trail.objects.existing()\n\n @same_structure_required('core:trail_detail')\n def dispatch(self, *args, **kwargs):\n return super(TrailDelete, self).dispatch(*args, **kwargs)\n\n\n@permission_required('core.change_path')\ndef merge_path(request):\n \"\"\"\n Path merging view\n \"\"\"\n response = {}\n\n if request.method == 'POST':\n ids_path_merge = request.POST.getlist('path[]')\n\n assert len(ids_path_merge) == 2\n\n path_a = Path.objects.get(pk=ids_path_merge[0])\n path_b = Path.objects.get(pk=ids_path_merge[1])\n\n if not path_a.same_structure(request.user) or not path_b.same_structure(request.user):\n response = {'error': _(u\"You don't have the right to change these paths\")}\n return HttpJSONResponse(response)\n\n if path_a.draft != path_b.draft:\n response = {'error': _(u\"You can't merge 1 draft path with 1 normal path\")}\n return HttpJSONResponse(response)\n\n try:\n result = path_a.merge_path(path_b)\n except Exception as exc:\n response = {'error': u'%s' % exc, }\n return HttpJSONResponse(response)\n\n if result == 2:\n response = {'error': _(u\"You can't merge 2 paths with a 3rd path in the intersection\")}\n elif result == 0:\n response = {'error': _(u\"No matching points to merge paths found\")}\n else:\n response = {'success': _(u\"Paths merged successfully\")}\n messages.success(request, _(u\"Paths merged successfully\"))\n\n return HttpJSONResponse(response)\n\n\nclass ParametersView(View):\n def get(request, *args, **kwargs):\n response = {\n 'geotrek_admin_version': settings.VERSION,\n }\n return HttpResponse(json.dumps(response), content_type=\"application/json\")\n","sub_path":"geotrek/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"130631657","text":"import argparse\nimport torch\nimport torch.distributions as distributions\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nimport networks\nfrom sklearn import datasets\nfrom matplotlib.colors import ListedColormap\nimport data_utils\ndevice = torch.device('cuda:' + str(0) if torch.cuda.is_available() else 'cpu')\n\n\n\n\n\ndef decision_boundary(net, X):\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, .02),\n np.arange(y_min, y_max, .02))\n xxt = torch.from_numpy(xx.ravel()).float()\n yyt = torch.from_numpy(yy.ravel()).float()\n xxyy = torch.cat([xxt[:, None], yyt[:, None]], dim=1)\n logits = net(xxyy)\n Z = logits.argmax(1)\n plt.pcolormesh(xx, yy, Z.numpy().reshape(xx.shape), cmap=ListedColormap(['r', 'b']), alpha=.1)\n\n\n\n\ndef main(args):\n utils.makedirs(args.save)\n net = networks.SmallMLP(2, 2, n_hid=args.hid)\n\n if args.dataset == \"moons\":\n Xf, Y = datasets.make_moons(1000, noise=.1)\n Xfte, Yte = datasets.make_moons(1000, noise=.1)\n Xoh, Xohte = [], []\n elif args.dataset == \"circles\":\n Xf, Y = datasets.make_circles(1000, noise=.03)\n Xfte, Yte = datasets.make_circles(1000, noise=.03)\n Xoh, Xohte = [], []\n elif args.dataset == \"adult\":\n with open(\"data/adult/adult.data\", 'r') as f:\n Xf, Xoh, Y = data_utils.load_adult()\n with open(\"data/adult/adult.test\", 'r') as f:\n Xfte, Xohte, Yte = data_utils.load_adult()\n\n else:\n raise NotImplementedError\n\n Xf = Xf.astype(np.float32)\n Xfl, Xohl, Yl = [], [], []\n if args.n_labels_per_class != -1:\n Xfl.extend(Xf[Y == 0][:args.n_labels_per_class])\n Xfl.extend(Xf[Y == 1][:args.n_labels_per_class])\n Yl.extend([0] * args.n_labels_per_class)\n Yl.extend([1] * args.n_labels_per_class)\n if Xoh is not None:\n Xohl.extend(Xf[Y == 0][:args.n_labels_per_class])\n Xohl.extend(Xf[Y == 1][:args.n_labels_per_class])\n else:\n Xfl, Xohl, Yl = Xf, Xoh, Y\n\n def plot_data(fname=\"data.png\"):\n plt.clf()\n decision_boundary(net, Xf)\n plt.scatter(Xf[:, 0], Xf[:, 1], c='grey')\n plt.scatter(Xfl[:args.n_labels_per_class, 0], Xfl[:args.n_labels_per_class, 1], c='r')\n plt.scatter(Xfl[args.n_labels_per_class:, 0], Xfl[args.n_labels_per_class:, 1], c='b')\n plt.savefig(\"{}/{}\".format(args.save, fname))\n\n optim = torch.optim.Adam(params=net.parameters(), lr=args.lr)\n\n xl = torch.from_numpy(Xl).to(device)\n yl = torch.from_numpy(np.array(Yl)).to(device)\n x_te, y_te = torch.from_numpy(Xte).float(), torch.from_numpy(Yte)\n inds = list(range(X.shape[0]))\n for i in range(args.n_iters):\n batch_inds = np.random.choice(inds, args.batch_size, replace=False)\n x = X[batch_inds]\n x = torch.from_numpy(x).to(device).requires_grad_()\n\n logits = net(xl)\n clf_loss = nn.CrossEntropyLoss(reduction='none')(logits, yl).mean()\n\n logits_u = net(x)\n logpx_plus_Z = logits_u.logsumexp(1)\n sp = utils.keep_grad(logpx_plus_Z.sum(), x)\n e = torch.randn_like(sp)\n eH = utils.keep_grad(sp, x, grad_outputs=e)\n trH = (eH * e).sum(-1)\n\n sm_loss = trH + .5 * (sp ** 2).sum(-1)\n sm_loss = sm_loss.mean()\n\n loss = (1 - args.sm_lam) * clf_loss + args.sm_lam * sm_loss\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n if i % 100 == 0:\n if args.dataset in (\"rings\", \"moons\"):\n plot_data(\"data_{}.png\".format(i))\n te_logits = net(x_te.float())\n te_preds = torch.argmax(te_logits, 1)\n te_acc = (te_preds == y_te).float().mean()\n print(\"Iter {}: Clf Loss = {}, SM Loss = {} | Test Accuracy = {}\".format(i,\n clf_loss.item(), sm_loss.item(),\n te_acc.item()))\n\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Energy Based Models and Shit\")\n # logging + evaluation\n parser.add_argument(\"--save\", type=str, default='.')\n parser.add_argument(\"--seed\", type=int, default=1234)\n parser.add_argument(\"--dim\", type=int, default=10)\n parser.add_argument(\"--hid\", type=int, default=100)\n parser.add_argument(\"--n_labels_per_class\", type=int, default=3)\n parser.add_argument(\"--n_iters\", type=int, default=10000)\n parser.add_argument(\"--sm_lam\", type=float, default=.8)\n parser.add_argument(\"--dist\", type=str, default=\"gaussian\")\n parser.add_argument(\"--posterior\", type=str, default=\"gaussian-1\")\n parser.add_argument(\"--dataset\", type=str, default=\"moons\")\n parser.add_argument(\"--std_sample\", type=str, default=\"log\", choices=[\"log\", \"linear\"])\n # optimization\n parser.add_argument(\"--lr\", type=float, default=1e-3)\n parser.add_argument(\"--decay_epochs\", nargs=\"+\", type=int, default=[])\n parser.add_argument(\"--decay_rate\", type=float, default=.1)\n parser.add_argument(\"--optimizer\", choices=[\"adam\", \"sgd\"], default=\"adam\")\n parser.add_argument(\"--batch_size\", type=int, default=500)\n # regularization\n parser.add_argument(\"--weight_decay\", type=float, default=0.0)\n # network\n parser.add_argument(\"--network\", type=str, default=\"mlp\", choices=[\"mlp\", \"resnet\"])\n # EBM specific\n parser.add_argument(\"--n_steps\", type=int, default=10000)\n parser.add_argument(\"--sgld_lr\", type=float, default=1.0)\n parser.add_argument(\"--sgld_std\", type=float, default=1e-2)\n parser.add_argument(\"--ckpt_every\", type=int, default=10)\n parser.add_argument(\"--eval_every\", type=int, default=1)\n parser.add_argument(\"--print_every\", type=int, default=100)\n parser.add_argument(\"--sample_every\", type=int, default=100)\n parser.add_argument(\"--load_path\", type=str, default=None)\n parser.add_argument(\"--print_to_log\", action=\"store_true\")\n parser.add_argument(\"--form\", type=str, default=\"critic\")\n parser.add_argument(\"--direct_loss\", action=\"store_true\")\n parser.add_argument(\"--logit\", action=\"store_true\")\n\n args = parser.parse_args()\n main(args)\n","sub_path":"jemi_supervised_toy.py","file_name":"jemi_supervised_toy.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"184778087","text":"from typing import TYPE_CHECKING\n\nfrom vk_api.keyboard import VkKeyboard, VkKeyboardColor\nfrom vk_api.longpoll import Event\n\nfrom vkinder.helpers import write_msg\nfrom vkinder.models import User\nfrom vkinder.state._base import State\n\nif TYPE_CHECKING:\n from vkinder.bot import Bot\n from vkinder.state import StateName\n\n\nclass HelloState(State):\n key = \"hello\"\n\n text = (\n \"Привет, {first_name}! \"\n \"Я бот-сваха, прямо как Роза Сябитова, только со мной не страшно. \"\n \"Я помогу тебе найти идеальную пару! \"\n \"Ну, или хотя бы какую-нибудь. Приступим? \"\n \"Жми на кнопку!\"\n )\n\n @classmethod\n def enter(cls, bot: \"Bot\", event: Event) -> None:\n user = bot.storage.get(User, event.user_id)\n\n keyboard = VkKeyboard(one_time=True)\n keyboard.add_button(\"Новый поиск\", color=VkKeyboardColor.PRIMARY)\n\n write_msg(\n bot.group_session,\n event.user_id,\n cls.text.format(first_name=user.first_name),\n keyboard=keyboard.get_keyboard(),\n )\n\n @classmethod\n def leave(cls, bot: \"Bot\", event: Event) -> \"StateName\":\n from vkinder.state import StateName\n\n if event.text == \"Новый поиск\":\n return StateName.SELECT_COUNTRY\n else:\n return StateName.HELLO_ERROR\n\n\nclass HelloErrorState(HelloState):\n key = \"hello_error\"\n\n text = (\n \"Извини, {first_name}, я не знаю такой команды. \"\n \"Используй, пожалуйста, кнопки, чтобы мне было проще тебя понимать. \"\n \"Нажми на кнопку ниже, чтобы начать новый поиск.\"\n )\n\n\nclass HelloAgainState(HelloState):\n key = \"hello_again\"\n\n text = (\n \"Ты находишься в главном меню, {first_name}. Начнём новый поиск? \"\n \"Если ты уже искал людей раньше, то можно просмотреть результаты \"\n \"предыдущих поисков.\"\n )\n","sub_path":"vkinder/state/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"410479687","text":"# Словари для выплывающих неменяемых списков\n# Способ проверки @then('\"{sl_block}\" has default \"{table}\" items')\n#\n# Метод списания (по умолчанию)\nrouteWithdrawType = [\n 'По тех. карте',\n 'По продукту',\n 'Сначала по продукту, затем по тех. карте',\n 'Не списывать'\n]\n#\n# (Базовая) Единица измерения по умолчанию (для новых модификаторов (ингредиентов, полуфабрикатов))\nnewMeasureUnit = [\n 'шт',\n 'кг',\n 'л',\n 'пц'\n]\n#\n# Ед. изм.\nmeasureUnit = [\n 'шт',\n 'кг',\n 'л',\n 'пц'\n]\n#\n# Способ реализации (по умолчанию)\nsellingType = [\n 'Не может продаваться частями',\n 'Может продаваться частями',\n 'Требовать ввод веса при продаже'\n]\n#\n# НДС\nvat = [\n '<без НДС>',\n '0 %',\n '10 %',\n '18 %'\n]\n#\n# Тип покупателя\ncustomerType = [\n 'Гость',\n 'Сотрудник',\n 'Контрагент'\n]\n#\n# Тип корреспондента\ncorrespondentType = [\n 'Клиент',\n 'Сотрудник',\n 'Контрагент',\n 'Списание'\n]\n#\n# Алкоголь/Остатки по ЕГАИС\nunitType = [\n 'Фасованная, шт.',\n 'Нефасованная, дал'\n]\n#\n# CRM/Клиенты\n# Пол\nsexType = [\n 'Мужской',\n 'Женский'\n]\n#\n# Специальное предложение\nbonusConditionType = [\n 'Полное списание',\n 'Кратное количеству блюд по акции'\n]\n# Финанся/Статьи\n# Тип статьи\naccountType = [\n 'Расходная',\n 'Приходная',\n 'Приходно-расходная'\n]\n#\n# Справочники/Типы оплат\n# Тип операции\noperationType = [\n 'Фискальный',\n 'Нефискальный',\n 'За счет заведения'\n]\n#\n# Механизм платежа\npaymentMechanism = [\n 'Налчные',\n 'POS',\n 'Бонусы'\n]\n#\n# Тип контрагента\npartnerType = [\n 'Сотрудник',\n 'Клиент',\n 'Контрагент',\n 'Не определен'\n]\n#\n# Устройства / Банковские терминалы\nposType = [\n 'Не определен',\n 'iBox',\n 'POS'\n]\n# Предприятие/Настройки\n# Вклад отрицательных остатков\nnegativeBalanceContribution = [\n 'Отсутствует',\n '1 единица',\n 'Абсолютная величина'\n]\n#\n# Вклад нулевых остатков\nzeroBalanceContribution = [\n 'Отсутствует',\n '1 единица'\n]\n#\n# Себестоимость для отрицательных и нулевых остатков\nbalancePrimeCost = [\n 'Отображать только для положительных остатков',\n 'Отображать только для положительных и нулевых остатков',\n 'Отображать только для положительных и отрицательных остатков',\n 'Отображать всегда'\n]\n#\n# Себестоимость продуктов\nrouteWithdrawMode = [\n 'По типу списания',\n 'По продукту',\n 'По тех. карте рекурсивно',\n 'По тех. карте нерекурсивно'\n]\n#\n# Текущие остатки\nactualBalancePrimeCost = [\n 'На текущий момент',\n 'Вместе с будущими операциями']\n#\n# Исторические остатки\nsnapshotBalancePrimeCost = [\n 'До проведения документа',\n 'После проведения документа',\n 'На текущий момент',\n 'Вместе с будущими операциями'\n]\n#\n# Метод расчета себестоимости\nprimeCostCalculationMethod = [\n 'Среднее арифметическое для положительных остатков',\n 'Среднее арифметическое',\n 'По последнему приходу'\n]\n#\n# Наложение скидок (надбавок)\noverlayType = [\n 'Наложение',\n 'Максимальное значение',\n 'Минимальное значение'\n]\n#\n# Режим расчета операционной прибыли\noperatingRevenueCalculationMode = [\n 'Себестоимость продаж минус приходные накладные',\n 'Отображать только себестоимость продаж'\n]\n# Создание номенклатурной единицы из тех карты\nadd_nomenclature = [\n 'Блюдо',\n 'Модификатор',\n 'Ингредиент',\n 'П/ф',\n]\n","sub_path":"resources/rus/dropdown.py","file_name":"dropdown.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"437283806","text":"from bs4 import BeautifulSoup\nimport urllib3\n\nclass LyricsSearcher():\n def __init__(self):\n self.sites = ['azlyrics'] \n \n \n def _get_next_site(self):\n \"\"\"\n generator to fetch the next website\n \"\"\"\n for site in self.sites:\n yield(site)\n\n \n def _az_format(self,link):\n \"\"\"\n Function that creates formatting specifically for azlyrics.com\n \"\"\"\n link = link.lower()\n \n #azlyrics currently strips single quotes out of names\n link = link.replace(\"\\'\",\"\")\n \n #this removes all spaces in the title\n link = link.split()\n\n return ''.join(link)\n \n #creates a properly formatted url for the Lyrics Searcher to use\n def _construct_link(self,link,artist,title):\n if link == 'azlyrics':\n artist = self._az_format(artist)\n title = self._az_format(title)\n return \"http://www.\" + link + \".com/lyrics/\" + artist + \"/\" + title + \".html\"\n else:\n return NotImplemented\n \n def _fetch_site_data(self,artist,title): \n \"\"\" \n \n Sends a GET request to obtain the requested lyrics\n returns both a HTTPResponse object as well as the correct url\n \"\"\"\n http = urllib3.PoolManager()\n \n for site in self.sites: \n \n url = self._construct_link(site,artist,title)\n \n #this heuristic attempts to find a url from a given site\n #and will start the next one if it does not \n try:\n requested_data = http.request(\"GET\",url)\n \n #if r exists but is not an OK from the website, continue\n if requested_data.status != 200:\n continue\n \n break\n\n # if url is not found, continue\n except urllib3.exceptions.LocationValueError:\n continue\n \n if not requested_data:\n return None \n \n return [requested_data,url]\n\n def _az_fetch(self,artist,title,requested_data): \n \"\"\"\n Function fetches lyrics from azlyrics specifically\n \"\"\"\n soup = BeautifulSoup(requested_data.data, \"lxml\") \n #fetches data\n if not requested_data:\n return None\n #fetches the lyrics\n lyrics = soup.find_all(\"div\", attrs={\"class\": None, \"id\": None})\n \n if not lyrics:\n return None\n \n lyrics = [line.getText() for line in lyrics]\n lyrics = [x.rstrip() for x in lyrics[0].split(\"\\n\")]\n lyrics = [\">\" + x for x in lyrics]\n\n lyrics.insert(0,title.upper())\n lyrics.insert(0,artist.upper())\n \n \n #reddit requires TWO newlines in order to be interpreted \n return \"\\n\\n\".join(lyrics)\n\n def get_lyrics(self,artist,title):\n \"\"\"\n Function returns lyrics from a requested artist and title\n \"\"\"\n site_data = self._fetch_site_data(artist,title)\n requested_data = site_data[0]\n url = site_data[1] \n \n #implementation works for az_lyrics currently\n if \"azlyrics\" in url: \n return self._az_fetch(artist,title,requested_data) \n \n return None\n","sub_path":"Bots/LyricsSearcher/LyricsSearcher.py","file_name":"LyricsSearcher.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"323130336","text":"import sys\nfrom estimator import Estimator\nfrom classifier import Classifier\n\nfrom lib import db\nfrom lib import cli\nfrom csv import DictWriter\nfrom lib import logger\nfrom lib import aggregator as ag\nfrom lib.node import nodegen\nfrom collections import namedtuple\nfrom configparser import ConfigParser\nfrom multiprocessing import Pool\n\nResults = namedtuple('Results', [ 'keys', 'values', ])\nclass ResultsWriter:\n def __init__(self, header):\n self.header = header\n self.writer = None\n\n def write(self, results):\n if not results.values:\n return\n \n if not self.writer:\n self.writer = DictWriter(sys.stdout, results.keys, delimiter=';')\n if self.header:\n self.writer.writeheader()\n \n self.writer.writerows(results.values)\n\n#\n# Mappings between configuration options and learning\n# interfaces. Dictionary keys should have a corresponding key in the\n# .ini file!\n#\nmachine_ = {\n 'classification': Classifier,\n 'estimation': Estimator,\n}\n\naggregator_ = {\n 'simple': ag.simple,\n 'change': ag.change,\n 'average': ag.average,\n 'difference': ag.difference,\n}\n\n#\n# Run the prediction!\n#\ndef run(args):\n (index, node, (config,)) = args\n\n log = logger.getlogger()\n log.info('node: {0}'.format(node))\n\n opts = config['machine']\n machine = machine_[opts['model']]\n aggregator = aggregator_[opts['feature-transform']]\n model = machine(node, config, aggregator)\n\n keys = model.header()\n values = []\n try:\n values = model.predict(model.classify())\n except ValueError as v:\n log.error(v)\n\n return Results(keys, values)\n\n#\n# Setup\n#\n\nlog = logger.getlogger(True)\nlog.info('phase 1')\nlog.info('db version: {0}'.format(db.mark()))\n\ncargs = cli.CommandLine(cli.optsfile('prediction')) # /etc/opts/prediction\n\nconfig = ConfigParser()\nconfig.read(cargs.args.config) # --config\n\nparams = config['parameters']\nwriter = ResultsWriter(config['output'].getboolean('print-header'))\n\n# Establish the database credentials. Passing None uses the\n# defaults.\ndbinfo = config['database'] if 'database' in config else None\ndb.EstablishCredentials(**dbinfo)\n\n#\n# Processing\n#\nlog.info('phase 2')\n\nif 'node' in params:\n results = run((0, int(params['node']), config))\n writer.write(results)\nelse:\n with Pool() as pool:\n for i in pool.imap_unordered(run, nodegen(config), 1):\n writer.write(i)\n\n#\n# Tear down\n#\nlog.info('phase 3')\n","sub_path":"src/prediction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"85847965","text":"\"\"\"Save Humans!\n\nUsage:\n\tsimulator \n\nOptions:\n -h --help Show this screen.\n --version Show version.\n\n\"\"\"\n\nfrom os import kill\nfrom sys import stderr\nfrom typing import Any, Callable, Dict, List, Tuple, Type, Union\n\nfrom docopt import docopt\n\nfrom codeingame import Ash, Human, Point, Zombie, Game, Field\n\nimport sys\nimport os\n\ntry:\n\timport pygame\nexcept:\n\tpass\n\ndef main(simulation: str = None, enable_graphics: bool = True):\n\tif not simulation:\n\t\targuments = docopt(__doc__)\n\t\tsimulation = arguments['']\n\n\tash = None\n\thumans = []\n\tzombies = []\n\n\tfile = os.path.join(os.getcwd(), 'simulations', f'{simulation}.siml')\n\n\tif not os.path.exists(file):\n\t\tprint(f'Simulation {file}.siml does not exists')\n\t\tsys.exit(1)\n\n\twith open(file) as f:\n\t\tfor entity in f.read().split('\\n'):\n\t\t\tprint(f'parsing entity {entity}', entity)\n\n\t\t\tif entity.startswith('A'):\n\t\t\t\tash = Ash(*[int(i) for i in entity.split()[1:]])\n\n\t\t\telif entity.startswith('H'):\n\t\t\t\thumans.append(Human(*[int(i) for i in entity.split()[1:]]))\n\n\t\t\telif entity.startswith('Z'):\n\t\t\t\tzombies.append(Zombie(*[int(i) for i in entity.split()[1:]]))\n\n\t\t\telse:\n\t\t\t\tif entity:\n\t\t\t\t\tprint(f'unparsable entity: {entity}', file=sys.stderr, flush=True)\n\n\tif not ash or not humans or not zombies:\n\t\tprint('missing something')\n\t\tprint('ash', ash)\n\t\tprint('humans', humans)\n\t\tprint('zombies', zombies)\n\t\tsys.exit(1)\n\n\treturn GameController(ash, humans, zombies, graphic_engine=enable_graphics).run_game()\n\n# === GameController === ===================================================== #\n\nBACKGROUND = (255, 255, 255)\nTEXT = (0, 0, 0)\nASH = (0, 0, 255)\nASH_RANGE = (255, 255, 0)\nHUMAN = (0, 255, 0)\nZOMBIE = (255, 0, 0)\nZOMBIE_RANGE = (0, 255, 255)\n\ndef make_interpolater(left_min, left_max, right_min, right_max):\n\t# Figure out how 'wide' each range is\n\tleft = left_max - left_min\n\tright = right_max - right_min\n\n\t# Compute the scale factor between left and right values\n\tscale = float(right) / float(left) if left != 0 else float(left)\n\n\t# create interpolation function using pre-calculated scaleFactor\n\tdef interp_fn(value):\n\t\t\treturn right_min + (value - left_min) * scale\n\n\treturn interp_fn\n\n\ndef animate(fn):\n\tdef wrapper(game_controller):\n\t\tif not game_controller.graphic_engine:\n\t\t\treturn fn(game_controller)\n\n\t\t# before = game_controller.entities.copy()\n\t\t# retr = fn(game_controller)\n\t\t# after = game_controller.entities\n\n\t\t# print('equals', before == after)\n\n\t\t# return retr\n\t\treturn fn(game_controller)\n\n\treturn wrapper\n\n\nclass GameController(object):\n\tENGINE: Type[Game] = Game\n\n\tash: Ash\n\thumans: List[Human]\n\tzombies: List[Zombie]\n\n\tTICK: 60\n\n\tSCALE: int = 10\n\tWIDTH: int = int(Field.WIDTH / SCALE)\n\tHEIGHT: int = int(Field.HEIGHT / SCALE)\n\n\tentities: Dict[Union[Ash, Human, Zombie], Tuple[int, int]]\n\told_entities: Dict[Union[Ash, Human, Zombie], Tuple[int, int]]\n\n\tinterpolator_w: Callable\n\tinterpolator_h: Callable\n\n\tscreen: Any\n\tfont: Any\n\tclock: Any\n\n\tgraphic_engine: bool\n\n\tdef __init__(self, ash: Ash, humans: List[Human], zombies: List[Zombie], graphic_engine: bool = True):\n\t\tself.graphic_engine = graphic_engine\n\n\t\tif self.graphic_engine:\n\t\t\tpygame.init()\n\n\t\t\tself.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))\n\t\t\tself.clock = pygame.time.Clock()\n\n\t\t\tself.interpolator_w = make_interpolater(0, Field.WIDTH, 0, self.WIDTH)\n\t\t\tself.interpolator_h = make_interpolater(0, Field.HEIGHT, 0, self.HEIGHT)\n\t\t\tself.entities = {}\n\n\t\t\tself.font = pygame.font.SysFont(None, 48)\n\n\n\t\tself.ash = ash\n\t\tself.humans = humans\n\t\tself.zombies = zombies\n\t\tself.score = 0\n\n\tdef run_game(self):\n\n\t\tif self.graphic_engine:\n\t\t\tself.entities[self.ash] = self.translate(self.ash)\n\n\t\t\tfor human in self.humans:\n\t\t\t\tself.entities[human] = self.translate(human)\n\n\t\t\tfor zombie in self.zombies:\n\t\t\t\tself.entities[zombie] = self.translate(zombie)\n\n\t\t\tprint(self.entities)\n\n\t\twhile self.humans and self.zombies:\n\t\t\tif self.graphic_engine:\n\t\t\t\tfor event in pygame.event.get():\n\t\t\t\t# Did the user hit a key?\n\t\t\t\t\tif event.type == pygame.constants.KEYDOWN:\n\t\t\t\t\t\t# Was it the Escape key? If so, stop the loop.\n\t\t\t\t\t\tif event.key == pygame.constants.K_ESCAPE:\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t# Did the user click the window close button? If so, stop the loop.\n\t\t\t\t\telif event.type == pygame.constants.QUIT:\n\t\t\t\t\t\tbreak\n\n\t\t\tself.move_zombies()\n\t\t\tself.ash_move()\n\t\t\tself.ash_attak()\n\t\t\tself.zombie_attak()\n\n\t\t\tif self.graphic_engine:\n\t\t\t\tself.update()\n\t\t\t\tself.clock.tick(1)\n\n\t\tif self.graphic_engine:\n\t\t\tif self.humans:\n\t\t\t\ttext = self.font.render(\"You Win\", True, TEXT)\n\t\t\t# pygame.quit()\n\n\t\t\telse:\n\t\t\t\ttext = self.font.render(\"You Lost\", True, TEXT)\n\n\t\telse:\n\n\t\t\tif self.humans:\n\t\t\t\t# Win\n\t\t\t\treturn True\n\n\t\t\telse:\n\t\t\t\t# Lose\n\t\t\t\treturn False\n\n\t@animate\n\tdef move_zombies(self):\n\t\tfor zombie in self.zombies:\n\t\t\tzombie.move_to(zombie.nearest(self.humans + [self.ash]))\n\n\t\t\tif self.graphic_engine:\n\t\t\t\tself.entities[zombie] = self.translate(zombie.point())\n\n\t@animate\n\tdef ash_move(self):\n\n\t\ttry:\n\t\t\tgame = self.ENGINE(self.ash, self.humans, self.zombies)\n\t\t\tpoint = Point(*[int(i) for i in str(game.play()).split()])\n\n\t\texcept BaseException as e:\n\t\t\tprint(e)\n\t\t\tsys.exit(1)\n\n\t\tself.ash.move_to(point)\n\n\t\tif self.graphic_engine:\n\t\t\tself.entities[self.ash] = self.translate(self.ash.point())\n\n\t@animate\n\tdef ash_attak(self):\n\t\tkills = [zombie for zombie in self.zombies if self.ash.reach(zombie)]\n\n\t\tif kills:\n\t\t\tfor kill in kills:\n\t\t\t\tself.zombies.remove(kill)\n\n\t\t\t\tif self.graphic_engine:\n\t\t\t\t\tdel self.entities[kill]\n\n\t@animate\n\tdef zombie_attak(self):\n\t\tfor zombie in self.zombies:\n\t\t\tkillables = [human for human in self.humans if zombie.reach(human)]\n\n\t\t\tif killables:\n\t\t\t\tnearest = zombie.nearest(killables)\n\t\t\t\tzombie.move_to(nearest)\n\t\t\t\tself.humans.remove(nearest)\n\n\t\t\t\tif self.graphic_engine:\n\t\t\t\t\tdel self.entities[nearest]\n\n\tdef update(self):\n\t\tself.screen.fill(BACKGROUND)\n\n\t\tfor entity, position in self.entities.items():\n\t\t\tif isinstance(entity, Ash):\n\t\t\t\tpygame.draw.circle(self.screen, ASH_RANGE, position, entity.RANGE / self.SCALE)\n\t\t\t\tpygame.draw.circle(self.screen, ASH, self.translate(entity), self.SCALE)\n\n\t\t\telif isinstance(entity, Human):\n\t\t\t\tpygame.draw.circle(self.screen, HUMAN, position, self.SCALE)\n\n\t\t\telif isinstance(entity, Zombie):\n\t\t\t\tpygame.draw.circle(self.screen, ZOMBIE_RANGE, position, entity.RANGE / self.SCALE)\n\t\t\t\tpygame.draw.circle(self.screen, ZOMBIE, position, self.SCALE)\n\n\t\tpygame.display.flip()\n\n\tdef animate(self, fn):\n\t\tpass\n\n\tdef translate(self, point: Point) -> Tuple[int, int]:\n\t\treturn (self.interpolator_w(point.x),\n\t\t\t\t\t\tself.interpolator_h(point.y))\n","sub_path":"simulator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"434651343","text":"import os\nimport sys\nimport django\nimport logging\n\n\nSITE_ROOT = os.path.dirname(os.path.realpath(__file__))\nBASE_ROOT = os.path.abspath(os.path.join(SITE_ROOT, os.pardir))\n# Django settings for conserte_me project.\n\nDEBUG = os.environ['DEBUG']\nTEMPLATE_DEBUG = DEBUG\n\nRUNNING_DEVSERVER = (sys.argv[1] == 'runserver')\n\nADMINS = (\n ('Leonardo Korndorfer', 'leokorndorfer@gmail.com'),\n)\n\nMANAGERS = ['leokorndorfer@gmail.com', 'cristianobfernandes@gmail.com']\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': os.environ['DATABASE_ENGINE'], # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.environ['DATABASE_NAME'], # Or path to database file if using sqlite3.\n 'USER': os.environ['DATABASE_USER'], # Not used with sqlite3.\n 'PASSWORD': os.environ['DATABASE_PASSWORD'], # Not used with sqlite3.\n 'HOST': os.environ['DATABASE_HOST'], # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': os.environ['DATABASE_PORT'], # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n\n# email settings\n# These are the smtp settings for gmail\nEMAIL_HOST = 'mail.conserte.me'\nEMAIL_HOST_USER = 'avisos@conserte.me'\nEMAIL_HOST_PASSWORD = 'Mj?*j!6m'\nDEFAULT_FROM_EMAIL = 'avisos@conserte.me'\nSERVER_EMAIL = 'avisos@conserte.me'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\nAPPEND_SLASH = True\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/login'\n\nLOGOUT_REDIRECT_URL = '/'\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = BASE_ROOT + '/media/'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = 'media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = BASE_ROOT + '/assets/'\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/assets/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n BASE_ROOT + '/static/assets/',\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 's#_3ozw)&(h+2=^t26h1w&z47!pd#0ptsz!*+&nex7vsul_o&i'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'hamlpy.template.loaders.HamlPyFilesystemLoader',\n 'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'conserte_me.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'conserte_me.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n BASE_ROOT + '/templates',\n BASE_ROOT + '/profiles',\n '/home/conserte_me/website/conserte_me',\n '/home/conserte_me/website/issues',\n '/home/conserte_me/website/templates',\n)\n\n# TEMPLATE_CONTEXT_PROCESSORS = 'django.contrib.messages.context_processors.messages'\nMESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n # 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # 'django.contrib.comments',\n\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n\n # thumbs\n 'easy_thumbnails',\n\n # migrations\n 'south',\n\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n\n 'conserte_me',\n 'issues',\n 'profiles',\n\n # plugins\n 'rest_framework',\n\n # custom comments\n # 'issue_comments',\n)\n\n# Custom comment application\n# COMMENTS_APP = 'issue_comments'\n\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'square': {'size': (50, 50), 'crop': 'smart'},\n 'big_square': {'size': (100, 100), 'crop': 'smart'},\n 'medium': {'size': (250, 250), 'crop': False},\n 'big': {'size': (500, 500), 'crop': False},\n },\n}\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# setup logger\nimport logging\nPROJECT_DIR = os.path.dirname(__file__)\nPARENT_DIR = os.path.dirname(PROJECT_DIR)\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s',\n filename=os.path.join(PARENT_DIR, 'django.log'),\n filemode='a+')\n","sub_path":"conserte_me/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"356308916","text":"from random import choice, randint\nimport string, re\nfrom time import sleep\n\ndef clean_data(file):\n with open (file) as f: \n data = f.read()\n clean = data.replace('\\n',' ')\n clean = clean.lower()\n clean = re.sub(r'[^\\w\\s]', '', clean)\n \n return clean\n\ndef generate_model(text, order): \n \n model = {}\n \n for i in range(0,len(text) - order): \n fragment = text[i:i + order]\n next_letter = text[i + order]\n \n if fragment not in model: \n model[fragment] = {}\n \n if next_letter not in model[fragment]:\n model[fragment][next_letter] = 1 \n \n else:\n model[fragment][next_letter] += 1\n\n return(model)\n \ndef get_next_character(model,fragment):\n \n letters = []\n \n for letter in model[fragment].keys():\n \n try:\n for times in range(0, model[fragment][letter]):\n letters.append(letter)\n \n except KeyError:\n print('key not present')\n continue\n \n return choice(letters)\n\ndef generate_text(text, text2, order, length):\n \n data = clean_data(text)\n data2 = clean_data(text2)\n\n # print(data, data2)\n\n data_final=' '.join([data,data2])\n \n model = generate_model(data_final,order)\n \n current_fragment = data[0:order]\n \n output = \"\"\n \n for i in range(0, length - order):\n try :\n new_character = get_next_character(model,current_fragment)\n output += new_character\n current_fragment = current_fragment[1:] + new_character\n \n except KeyError:\n continue \n \n return output\n\ndef loop():\n rand_num = randint(100, 1000)\n resultat = generate_text(\"data.txt\", \"data2.txt\", 4, rand_num)\n return resultat\n \n# déclaration fonction pour enregister\n\nif __name__ == \"__main__\":\n while True:\n print(loop(), '\\n')\n # appel fonction enregister\n sleep(2)","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"558054780","text":"import re as re # regular expressions\nimport pprint # pretty printing\nimport os, fnmatch # to retrieve file information from path\nimport pickle # serialize the data structure\nimport copy\n\ndef processDirectoryOfBreakthroughFiles(path, playerList):\n for playerGameHistoryData in findFiles(path, '*.txt'):\n playerList.append(processBreakthroughFile(path, playerGameHistoryData))\n\ndef processBreakthroughFile(path, playerGameHistoryData):\n fileName = playerGameHistoryData[\n len(path):len(playerGameHistoryData) - len('.txt')] # trim path & extension\n fileName = fileName.split('間') # user間2000間687687 -> ['user',2000, 687687]\n playerName = str(fileName[0])\n playerID = int(fileName[2])\n rank = int(fileName[1])\n gamesList, numWins, numLosses = formatGameList(playerGameHistoryData, playerName)\n return {'Player': playerName, 'PlayerID': playerID, 'Rank': rank, 'Games': gamesList, 'Wins': numWins, 'Losses': numLosses}\n\ndef writeToDisk(input, path):\n outputFile = open(path + r'PlayerDataPythonDataset.p', 'wb')\n pickle.dump(input, outputFile)\n\ndef findFiles(path, filter): # recursively find files at path with filter extension; pulled from StackOverflow\n for root, dirs, files in os.walk(path):\n for file in fnmatch.filter(files, filter):\n yield os.path.join(root, file)\n\ndef preprocessGamesList(playerGameHistoryData): #normalized regex/iterable friendly list\n gamesList = [y[1] for y in list(\n enumerate([x.strip() for x in open(playerGameHistoryData, \"r\")]))] # read in file and convert to list\n gamesList = filter(None, gamesList) # remove empty strings from list\n gamesList = list(filter(lambda a: a != \"[Site \\\"www.littlegolem.net\\\"]\", gamesList)) # remove site from list\n return gamesList\n\ndef formatGameList(playerGameHistoryData, playerName):\n quotesRegex = re.compile(r'\"(.*)\"')\n eventEntry = 0\n whiteEntry = 1\n blackEntry = 2\n resultEntry = 3\n moveEntry = 4\n games = []\n gamesList = preprocessGamesList(playerGameHistoryData)\n numWins = 0\n numLosses = 0\n # flags to indicate if something wasn't set properly\n opponentName = None\n event = None\n playerColor = None\n opponentColor = None\n win = None\n # format game list\n for j in range(0, len(gamesList)):\n thisRow = j % 5\n if thisRow != moveEntry:\n rowData = quotesRegex.search(gamesList[j]).group(1)\n if thisRow == eventEntry:\n # [Event \"Tournament null\"] -> Event: 'Tournament null'\n event = rowData\n elif thisRow == whiteEntry:\n if playerName.lower() == rowData.lower(): # ignore case just in case (no pun intended)\n playerColor = 'White'\n opponentColor = 'Black'\n else:\n opponentName = rowData\n playerColor = 'Black'\n opponentColor = 'White'\n elif thisRow == blackEntry:\n # assignment case handled above\n if playerName.lower() != rowData.lower():\n opponentName = rowData\n elif thisRow == resultEntry:\n #\n if playerColor == 'White':\n if rowData[0] == '1':\n win = True\n elif rowData[0] == '0':\n win = False\n elif rowData[0] == '*':\n win = \"Game In Progress\"\n elif playerColor == 'Black':\n if rowData[0] == '0':\n win = True\n elif rowData[0] == '1':\n win = False\n elif rowData[0] == '*':\n win = \"Game In Progress\"\n else:\n print(\"UNEXPECTED DATA FORMAT\")\n win = \"Undefined at line \" + str(j)\n elif thisRow == moveEntry:\n # format move list\n moveList = formatMoveList(gamesList[j])\n boardStates = generateBoardStates(moveList, playerColor, win) # generate board states from moveList\n assert (playerColor != opponentColor and opponentName != playerName)\n if len(moveList) > 3 and boardStates['Win'] != \"Game In Progress\":\n #non-spurrious games, remove if statement for all games.\n if win == True:\n numWins += 1\n elif win == False:\n numLosses += 1\n\n games.append({'Event': event, 'PlayerColor': playerColor, 'OpponentColor': opponentColor,\n 'OpponentName': opponentName, 'Win': win,\n 'Moves': moveList, 'BoardStates': boardStates}) # append new game after formatting move list\n return games, numWins, numLosses\n\n\ndef generateBoardStates(moveList, playerColor, win):\n if win == \"Game In Progress\":\n return {'Win': win, 'States': []}\n #for human readability version\n empty = 'e'\n white = 'w'\n black = 'b'\n\n # win/loss 'value' symmetrical\n if win == True:\n win = 1\n elif win == False:\n win = -1\n state = [\n {\n 9: 0,\n 8: {'a': black, 'b': black, 'c': black, 'd': black, 'e': black, 'f': black, 'g': black, 'h': black},\n 7: {'a': black, 'b': black, 'c': black, 'd': black, 'e': black, 'f': black, 'g': black, 'h': black},\n 6: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 5: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 4: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 3: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 2: {'a': white, 'b': white, 'c': white, 'd': white, 'e': white, 'f': white, 'g': white, 'h': white},\n 1: {'a': white, 'b': white, 'c': white, 'd': white, 'e': white, 'f': white, 'g': white, 'h': white}\n }, win]\n mirrorState = mirrorBoardState(state)\n boardStates = {'Win': win, 'States': [state], 'MirrorStates': [mirrorState]}\n for i in range(0, len(moveList)):\n assert (moveList[i]['#'] == i + 1)\n if isinstance(moveList[i]['White'], dict): # if string, then == resign or NIL\n whoseMove = 'White'\n state = [movePiece(state[0], moveList[i]['White']['To'], moveList[i]['White']['From'], playerColor, 'White'), win]\n boardStates['States'].append(state)\n mirrorState = mirrorBoardState(state)\n boardStates['MirrorStates'].append(mirrorState)\n if isinstance(moveList[i]['Black'], dict): # if string, then == resign or NIL\n whoseMove = 'Black'\n state= [movePiece(state[0], moveList[i]['Black']['To'], moveList[i]['Black']['From'], playerColor, 'Black'), win]\n boardStates['States'].append(state)\n mirrorState = mirrorBoardState(state)\n boardStates['MirrorStates'].append(mirrorState)\n # for data transformation; inefficient to essentially compute board states twice, but more error-proof\n boardStates = convertBoardStatesToArrays(boardStates, playerColor)\n return boardStates\n\ndef mirrorBoardState(state):#since a mirror image has the same strategic value\n mirrorStateWithWin = copy.deepcopy(state) # edit copy of boardState\n mirrorState = mirrorStateWithWin[0]\n state = state[0] #the board state; state[1] is the win or loss value\n for row in sorted(state):\n whoseMoveIndex = 9\n if row != whoseMoveIndex: #don't touch the index that shows whose move generated this state\n for column in sorted(state[row]):\n if column == 'a':\n mirrorState[row]['h'] = state[row][column]\n elif column == 'b':\n mirrorState[row]['g'] = state[row][column]\n elif column == 'c':\n mirrorState[row]['f'] = state[row][column]\n elif column == 'd':\n mirrorState[row]['e'] = state[row][column]\n elif column == 'e':\n mirrorState[row]['d'] = state[row][column]\n elif column == 'f':\n mirrorState[row]['c'] = state[row][column]\n elif column == 'g':\n mirrorState[row]['b'] = state[row][column]\n elif column == 'h':\n mirrorState[row]['a'] = state[row][column]\n return mirrorStateWithWin\n\n\n\ndef convertBoardStatesToArrays(boardStates, playerColor):\n newBoardStates = boardStates\n states = boardStates['States']\n mirrorStates = boardStates['MirrorStates']\n assert len(states) == len(mirrorStates)\n newBoardStates['States'] = []\n newBoardStates['MirrorStates'] = []\n for i in range (0, len (states)):\n newBoardStates['States'].append(convertBoardTo1DArray(states[i], playerColor))\n newBoardStates['MirrorStates'].append(convertBoardTo1DArray(mirrorStates[i], playerColor))\n return newBoardStates\n\n\ndef convertBoardTo1DArray(boardState, playerColor):\n state = boardState[0]\n whoseMoveIndex = 9\n oneDArray = []\n for row in sorted(state):\n if row != whoseMoveIndex: #don't touch the index that shows whose move generated this state\n for column in sorted(state[row]):\n #needs to be sorted to traverse dictionary in lexicographical order\n value = -5\n if state[row][column] == 'e':\n value = 0\n elif state[row][column] == 'w':\n if playerColor == 'White':\n value = 1\n else:\n value = -1\n elif state[row][column] == 'b':\n if playerColor == 'Black':\n value = 1\n else:\n value = -1\n else:\n print(\"error in convertBoard\")\n exit(-190)\n oneDArray.append(value)\n oneDArray.append(state[whoseMoveIndex])#65th element is a flag indicating who generated this state\n newBoardState = [oneDArray, boardState[1]] # [x vector, y scalar]\n return newBoardState\n\n\ndef movePiece(boardState, To, From, playerColor, whoseMove):\n empty = 'e'\n whoseMoveIndex = 9\n nextBoardState = copy.deepcopy(boardState) # edit copy of boardState\n nextBoardState[int(To[1])][To[0]] = nextBoardState[int(From[1])][From[0]]\n nextBoardState[int(From[1])][From[0]] = empty\n if (playerColor == 'White' and whoseMove == 'White') or (playerColor == 'Black' and whoseMove == 'Black'):\n nextBoardState[whoseMoveIndex] = 1 #player made move\n else:\n nextBoardState[whoseMoveIndex] = -1 #opponent made move\n return nextBoardState\n\n\ndef formatMoveList(moveListString):\n moveRegex = re.compile(r'(\\d+)\\.\\s(resign|[a-h]\\d.[a-h]\\d)\\s(resign|[a-h]\\d.[a-h]\\d|\\d-\\d)',\n re.IGNORECASE) # fix last group to solve for resign first or second\n moveList = moveRegex.findall(moveListString)\n for i in range(0, len(moveList)):\n move = list(moveList[i])\n move[0] = int(move[0])\n assert (move[0] == i + 1)\n if move[1] == \"resign\":\n move[2] = \"NIL\"\n else:\n move[1] = {'From': move[1][0:2], 'To': move[1][3:len(move[1])]} # set White's moves\n if move[2] != \"resign\" and move[2] != \"NIL\": # set Black's moves\n if len(move[2]) > 3:\n move[2] = {'From': move[2][0:2], 'To': move[2][3:len(move[2])]}\n else:\n move[2] = \"NIL\"\n moveList[i] = {'#': move[0], 'White': move[1], 'Black': move[2]}\n return moveList\n\n\n #main script\nplayerList = []\npathToCheck = r'/Users/TeofiloZosa/BreakthroughData/AutomatedData/'\nprocessDirectoryOfBreakthroughFiles(pathToCheck, playerList)\n# for i in range(0, len(playerList)):\n# pprint.pprint(\"Player # \" + str(i + 1) + \": \" + playerList[i]['Player'])\n\nwriteToDisk(playerList, pathToCheck)\n\n# Verified Working.\n# #double check\n#pathToCheck2 = r'/Users/TeofiloZosa/BreakthroughData/'\n# newList = pickle.load(open(pathToCheck+r'PlayerDataPython.p', 'rb'))\n# oldList = pickle.load(open(pathToCheck2+r'PlayerDataPython.p', 'rb'))\n# assert (playerList == newList == oldList)\n\n\n","sub_path":"PlayerDataDirectoryToAnalysisFormat.py","file_name":"PlayerDataDirectoryToAnalysisFormat.py","file_ext":"py","file_size_in_byte":12383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"190384505","text":"# -*- coding:utf-8 -*-\n# @lc app=leetcode.cn id=387 lang=python\n#\n# [387] 字符串中的第一个唯一字符\n#\n\n# @lc code=start\nfrom collections import Counter\n\nclass Solution(object):\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n # cnt = Counter(s)\n # minVal = float('inf')\n # for x,i in cnt.items():\n # if i == 1:\n # minVal = min(minVal, s.index(x)) \n # return minVal if minVal != float('inf') else -1\n for c in s:\n if s.find(c) == s.rfind(c):\n return s.find(c)\n return -1\n \n# @lc code=end\nif __name__ == \"__main__\":\n obj = Solution()\n ret = obj.firstUniqChar(\"loveleetcode\")\n print(ret)\n ret = obj.firstUniqChar(\"cc\")\n print(ret)\n ret = obj.firstUniqChar(\"\")\n print(ret)\n ret = obj.firstUniqChar(\"leetcode\")\n print(ret)","sub_path":"Week_08/G20200343030585/LeetCode_387_585.py","file_name":"LeetCode_387_585.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"366317141","text":"#encoding:utf-8\nfrom StringIO import StringIO\nimport gzip,urllib\n#压缩\ndef compress(raw): \n out_file=StringIO()\n try:\n with gzip.GzipFile(fileobj=out_file,mode=\"wb\") as gzip_out:\n gzip_out.write(raw)\n gzip_out.flush()\n gzip_out.close()\n out_file.seek(0)\n return out_file.read()\n finally:\n out_file.close()\n#解压\ndef uncompress(raw):\n file_in=StringIO(raw)\n try:\n gzip_in=gzip.GzipFile(fileobj=file_in,mode=\"r\")\n return gzip_in.read()\n finally:\n file_in.close()\n\ndef urldecode(query,charset=\"utf-8\"):\n d = {}\n a = query.split('&')\n for s in a:\n if s.find('='):\n k,v = map(urllib.unquote, s.split('='))\n try:\n d[k]=v.decode(charset)\n except KeyError:\n d[k] = [v]\n return d\n","sub_path":"deb-src/uucin-zabbix-longlive/etc/zabbix/zabbix_plugin.conf.d/longlive_checker/longlive_checker_env/local/lib/python2.7/site-packages/tornado-3.1.1.alpha.0-py2.7.egg/tornado/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"7638616","text":"from collections import deque\nimport copy\nimport math\nimport random\n\n'''\n IMPORTANT: Read through the code before beginning implementation!\n Your solution should fill in the various \"TODO\" items within this starter code.\n'''\ng_CYCLE_TIME = .100\n\n\n# Default parameters will create a 4x4 grid to test with\ng_MAP_SIZE_X = 2. # 2m wide\ng_MAP_SIZE_Y = 1.5 # 1.5m tall\ng_MAP_RESOLUTION_X = 0.5 # Each col represents 50cm\ng_MAP_RESOLUTION_Y = 0.375 # Each row represents 37.5cm\ng_NUM_X_CELLS = int(g_MAP_SIZE_X // g_MAP_RESOLUTION_X) # Number of columns in the grid map\ng_NUM_Y_CELLS = int(g_MAP_SIZE_Y // g_MAP_RESOLUTION_Y) # Number of rows in the grid map\n\n# Map from Lab 4: values of 0 indicate free space, 1 indicates occupied space\ng_WORLD_MAP = [0] * g_NUM_Y_CELLS*g_NUM_X_CELLS # Initialize graph (grid) as array\n\n# Source and Destination (I,J) grid coordinates\ng_dest_coordinates = (2,2)\ng_src_coordinates = (0,0)\n\ncost_matrix = []\n\n\ndef create_test_map(map_array):\n # Takes an array representing a map of the world, copies it, and adds simulated obstacles\n num_cells = len(map_array)\n map_matrix = copy.copy(map_array)\n # Add obstacles to up to sqrt(n) vertices of the map\n for i in range(int(math.sqrt(len(map_array)))):\n random_cell = random.randint(0, num_cells-1)\n map_matrix[random_cell] = 1\n\n return map_matrix\n\ndef vertex_index_to_ij(vertex_index):\n '''\n vertex_index: unique ID of graph vertex to be convered into grid coordinates\n Returns COL, ROW coordinates in 2D grid\n '''\n global g_NUM_X_CELLS\n return (vertex_index % g_NUM_X_CELLS, vertex_index // g_NUM_X_CELLS)\n\ndef ij_to_vertex_index(i,j):\n '''\n i: Column of grid map\n j: Row of grid map\n\n returns integer 'vertex index'\n '''\n global g_NUM_X_CELLS\n return j*g_NUM_X_CELLS + i\n\n\ndef ij_coordinates_to_xy_coordinates(i,j):\n '''\n i: Column of grid map\n j: Row of grid map\n\n returns (X, Y) coordinates in meters at the center of grid cell (i,j)\n '''\n global g_MAP_RESOLUTION_X, g_MAP_RESOLUTION_Y\n return (i+0.5)*g_MAP_RESOLUTION_X, (j+0.5)*g_MAP_RESOLUTION_Y\n\ndef xy_coordinates_to_ij_coordinates(x,y):\n '''\n i: Column of grid map\n j: Row of grid map\n\n returns (X, Y) coordinates in meters at the center of grid cell (i,j)\n '''\n global g_MAP_RESOLUTION_X, g_MAP_RESOLUTION_Y\n return int(i // g_MAP_RESOLUTION_X), int(j // g_MAP_RESOLUTION_Y)\n\n# **********************************\n# * Core Dijkstra Functions *\n# **********************************\n\ndef get_travel_cost(vertex_source, vertex_dest):\n # Returns the cost of moving from vertex_source (int) to vertex_dest (int)\n # INSTRUCTIONS:\n '''\n This function should return 1 if:\n vertex_source and vertex_dest are neighbors in a 4-connected grid (i.e., N,E,S,W of each other but not diagonal) and neither is occupied in g_WORLD_MAP (i.e., g_WORLD_MAP isn't 1 for either)\n\n This function should return 1000 if:\n vertex_source corresponds to (i,j) coordinates outside the map\n vertex_dest corresponds to (i,j) coordinates outside the map\n vertex_source and vertex_dest are not adjacent to each other (i.e., more than 1 move away from each other)\n '''\n if vertex_source < len(g_WORLD_MAP) and vertex_dest < len(g_WORLD_MAP):\n start_i, start_j = vertex_index_to_ij(vertex_source)\n dest_i, dest_j = vertex_index_to_ij(vertex_dest)\n manDist = abs(start_i - dest_i) + abs(start_j - dest_j)\n\n if manDist == 1 and g_WORLD_MAP[vertex_source] != 1 and g_WORLD_MAP[vertex_dest] != 1:\n return 1\n\n return 1000\n\n\ndef run_dijkstra(source_vertex):\n '''\n source_vertex: vertex index to find all paths back to\n returns: 'prev' array from a completed Dijkstra's algorithm run\n\n Function to return an array of ints corresponding to the 'prev' variable in Dijkstra's algorithm\n The 'prev' array stores the next vertex on the best path back to source_vertex.\n Thus, the returned array prev can be treated as a lookup table: prev[vertex_index] = next vertex index on the path back to source_vertex\n '''\n global g_NUM_X_CELLS, g_NUM_Y_CELLS, cost_matrix\n\n\n source_index = source_vertex\n # Array mapping vertex_index to distance of shortest path from vertex_index to source_vertex.\n dist = [1000] * g_NUM_X_CELLS * g_NUM_Y_CELLS\n dist[source_index] = 0\n # Queue for identifying which vertices are up to still be explored:\n # Will contain tuples of (vertex_index, cost), sorted such that the min cost is first to be extracted (explore cheapest/most promising vertices first)\n Q_cost = [1000] * g_NUM_X_CELLS * g_NUM_Y_CELLS\n\n Q_cost[source_index] = 0\n\n # Array of ints for storing the next step (vertex_index) on the shortest path back to source_vertex for each vertex in the graph\n prev = [-1] * g_NUM_X_CELLS * g_NUM_Y_CELLS\n\n # Insert your Dijkstra's code here. Don't forget to initialize Q_cost properly!\n\n for i in range(0, g_NUM_X_CELLS * g_NUM_Y_CELLS):\n for j in range(0, g_NUM_X_CELLS * g_NUM_Y_CELLS):\n j_x, j_y = vertex_index_to_ij(j)\n alt = dist[i] + get_travel_cost(i, j)\n\n if(alt < dist[j]):\n # print(\"Q_Cost[i] (%s) + cost (%s) = %s\" % (dist[i], get_travel_cost(j_x, j_y), alt))\n # print(\"Update %s to %s\" % (j, alt))\n Q_cost[j] = alt\n dist[j] = alt\n prev[j] = i\n\n # print(\"Q_Cost:\", dist)\n # print(\"Prevl:\", prev)\n # Return results of algorithm run\n cost_matrix = Q_cost\n return prev\n\n\ndef reconstruct_path(prev, source_vertex, dest_vertex):\n '''\n Given a populated 'prev' array, a source vertex_index, and destination vertex_index,\n allocate and return an integer array populated with the path from source to destination.\n The first entry of your path should be source_vertex and the last entry should be the dest_vertex.\n If there is no path between source_vertex and dest_vertex, as indicated by hitting a '-1' on the\n path from dest to source, return an empty list.\n '''\n final_path = deque()\n vertex = dest_vertex\n\n while vertex != source_vertex:\n # print(vertex)\n if vertex == -1:\n return []\n\n final_path.append(vertex)\n vertex = prev[vertex]\n\n final_path.append(source_vertex)\n\n return final_path\n\n\ndef render_map(map_array):\n '''\n DONE-\n Display the map in the following format:\n Use \" . \" for free grid cells\n Use \"[ ]\" for occupied grid cells\n\n Example:\n For g_WORLD_MAP = [0, 0, 1, 0,\n 0, 1, 1, 0,\n 0, 0, 0, 0,\n 0, 0, 0, 0]\n There are obstacles at (I,J) coordinates: [ (2,0), (1,1), (2,1) ]\n The map should render as:\n . . . .\n . . . .\n . [ ][ ] .\n . . [ ] .\n\n\n Make sure to display your map so that I,J coordinate (0,0) is in the bottom left.\n (To do this, you'll probably want to iterate from row 'J-1' to '0')\n '''\n for i in range(len(map_array)-1,0,-g_NUM_X_CELLS):\n for j in range(i-g_NUM_X_CELLS+1, i+1):\n if map_array[j] == 0:\n print(' . '),\n else:\n print('[ ]'),\n\n print('\\n')\n\n\n\ndef main():\n global g_WORLD_MAP, g_NUM_X_CELLS, cost_matrix\n #Just a little test case for the reconstruction\n # test = [1,2,-1,0,-1,-1,3,-1,-1]\n # stack = reconstruct_path(test,2,6)\n # while stack:\n # print(stack.pop())\n\n # DONE: Initialize a grid map to use for your test -- you may use create_test_map for this, or manually set one up with obstacles\n g_WORLD_MAP = create_test_map(g_WORLD_MAP)\n # g_NUM_X_CELLS = 4\n # g_WORLD_MAP = [0, 0, 1, 0,\n # 0, 1, 1, 0,\n # 0, 0, 0, 0,\n # 0, 0, 0, 0]\n # g_NUM_X_CELLS = 3\n # g_WORLD_MAP = [0, 0, 0,\n # 0, 1, 1,\n # 0, 0, 0]\n\n # Use render_map to render your initialized obstacle map\n render_map(g_WORLD_MAP)\n\n # TODO: Find a path from the (I,J) coordinate pair in g_src_coordinates to the one in g_dest_coordinates using run_dijkstra and reconstruct_path\n prev = run_dijkstra(ij_to_vertex_index(g_src_coordinates[0], g_src_coordinates[1]))\n stack = reconstruct_path(prev,ij_to_vertex_index(g_src_coordinates[0], g_src_coordinates[1]),ij_to_vertex_index(g_dest_coordinates[0], g_dest_coordinates[1]))\n\n '''\n DONE-\n Display the final path in the following format:\n Source: (0,0)\n Goal: (3,1)\n 0 -> 1 -> 2 -> 6 -> 7\n '''\n\n print('Source: ', g_src_coordinates)\n print('Destination: ', g_dest_coordinates)\n if len(stack) > 0:\n while stack:\n print(stack.pop()),\n print(' -> '),\n print('\\n')\n else:\n print('There is no path from source to destination.')\n\n print('\\n\\n')\n print('Cost Matrix: ')\n for i in range(g_NUM_X_CELLS-1,-1,-1):\n for j in range(g_NUM_Y_CELLS):\n print(cost_matrix[j + i*g_NUM_X_CELLS]),\n\n print('\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lab_6/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":8945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"299319153","text":"import cv2\nimport os\nimport json\nimport requests\nfrom collections import namedtuple\n\nheaders = {'Content-type': 'application/json'}\nurl='http://localhost:8080/api/camera'\nparams={'page':1,'size':5}\nr=requests.get(url, params=params, headers=headers)\nx=json.loads(r.text)\nresultCamera = x[\"data\"][\"cameraList\"]\ncamera={}\nprint(x[\"data\"][\"cameraList\"])\nfor i in range(0,5):\n camera.update({resultCamera[i]['id']:resultCamera[i][\"resource\"]})\n##print(camera)\nprint('camera',camera)\nfor keys,values in camera.items():\n print(keys)\n print(values)\n\n","sub_path":"Python Detection-NotUsed/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"247137706","text":"### https://www.projecteuler.net/problem=43\n\nimport sys\nsys.path.append(r'..\\euler')\nimport common as euler\nimport time\n\ndef has_duplicate_digits(n: int):\n ''' Returns true if the integer n has\n at least one repeated digit'''\n digits = [dig for dig in str(n)]\n digits.sort()\n\n for i in range(len(digits) - 1):\n if (digits[i] == digits[i + 1]):\n return True\n\n return False\n\ndef add_next_digit(n: list, div: list):\n ''' Given a number n and a list of divisors,\n adds the next digit so that the three left-most\n digits will divide the given divisor, as\n stated by the problem.'''\n cumulative_sum = 0\n digits = [str(i) for i in range(10) if str(i) not in n]\n for i in digits:\n # Check if adding i as a digit satisfy the divisibility condition\n if ((100 * int(i) + 10 * int(n[0]) + int(n[1])) % div[0] == 0):\n # If we have more than one remaining divisor to satisfy, call the function recursively\n if (len(div) > 1):\n cumulative_sum += add_next_digit([i] + n, div[1:])\n # If we are left with only one divisor to deal with (which will be 2), and two digits to fit, then they can be fit both ways\n else:\n new_number = [int(dig) * (10 ** ((len(n) + 1) - ndx)) for ndx, dig in enumerate([dig for dig in digits if dig is not i] + [i] + n)]\n print ('Found new number: %i' % sum(new_number))\n cumulative_sum += sum(new_number)\n\n return cumulative_sum\n\ndef Problem43():\n ''' Returns the sum of all pandigital numbers\n satistying the problem conditions'''\n\n # Initialise time\n start_time = time.time()\n res = 0\n\n i17 = 1\n while (i17 * 17 < 1000):\n # Starting by defining the number as a multiple of 17\n num = i17 * 17\n if (not has_duplicate_digits(num)):\n digits = [dig for dig in str(num)]\n if (num < 100):\n digits.insert(0, '0')\n \n # For each multiple of 17 with no repeated digits, loop through each divisor adding digits\n res = res + add_next_digit(digits, [13, 11, 7, 5, 3, 2])\n \n i17 = i17 + 1\n\n # Return result and time elapsed\n return res, '%.3f s' % (time.time() - start_time)","sub_path":"Page1/problem43.py","file_name":"problem43.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"479596805","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport rsa\nimport base64 #编码库\nfrom OpenSSL.crypto import PKey #处理公钥\nfrom OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM, FILETYPE_ASN1 #处理文件\nfrom OpenSSL.crypto import dump_privatekey, dump_publickey #key写入文件\n\n\n# pip install pyopenssl\n\npk = PKey() # 调用openssl加密标准\npk.generate_key(TYPE_RSA, 1024) # 1024位rsa类型\nprint(pk)\n\n# 秘钥写入文件\npubkey_file = dump_publickey(FILETYPE_PEM, pk)\nprivatekey_file = dump_privatekey(FILETYPE_ASN1, pk)\n\n# 秘钥读取\npubkey = rsa.PublicKey.load_pkcs1_openssl_pem(pubkey_file)\nprivatekey = rsa.PrivateKey.load_pkcs1(privatekey_file, \"DER\")\n# privatekey1 = rsa.PublicKey.load_pkcs1_openssl_pem(privatekey_file)\n\nprint(pubkey.save_pkcs1())\nprint(privatekey.save_pkcs1())\n# print(pubkey, privatekey, privatekey1)\n\ndata = rsa.encrypt(\"自古多情空余恨,此恨绵绵无绝期\".encode(\"utf-8\"), pubkey) # 加密\ndata = base64.b64encode(data)\nprint(data)\ndata_d = rsa.decrypt(base64.b64decode(data), privatekey) # 解密\nprint(data_d.decode(\"utf-8\"))\n","sub_path":"algorithm/OpenSSLTest.py","file_name":"OpenSSLTest.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"447334231","text":"class C2: ... # Make superclass objects\r\nclass C3: ...\r\nclass C1(C2, C3): # Make and link class C1\r\n def setname(self, who): # Assign name: C1.setname\r\n self.name = who # Self is either I1 or I2\r\nI1 = C1() # Make two instances\r\nI2 = C1()\r\nI1.setname('bob') # Sets I1.name to 'bob'\r\nI2.setname('sue') # Sets I2.name to 'sue'\r\nprint(I1.name)\r\nprint(I2.name)\r\n","sub_path":"VI/class_check.py","file_name":"class_check.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"422142425","text":"\"\"\"Common steps for Oneprovider.\n\"\"\"\n__author__ = \"Jakub Liput\"\n__copyright__ = \"Copyright (C) 2016 ACK CYFRONET AGH\"\n__license__ = \"This software is released under the MIT license cited in \" \\\n \"LICENSE.txt\"\n\nimport re\nimport pyperclip\n\nfrom tests.utils.acceptance_utils import list_parser\nfrom tests.gui.conftest import WAIT_BACKEND, WAIT_FRONTEND, MAX_REFRESH_COUNT, \\\n WAIT_REFRESH\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait as Wait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom pytest_bdd import given, parsers, when, then\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\n\nfrom ..utils.inspect import is_active\nfrom ..utils.generic import refresh_and_call, click_on_element, parse_url\nfrom pytest_selenium_multi.pytest_selenium_multi import select_browser\n\n\nmain_menu_tab_to_url = {'spaces': 'spaces',\n 'groups': 'groups',\n 'data': 'data',\n 'shared': 'shares'}\n\n\ndef _click_on_tab_in_main_menu_sidebar(driver, tab):\n def _load_main_menu_tab_page():\n def _check_url():\n try:\n found = parse_url(driver.current_url).group('tab')\n except AttributeError:\n return False\n else:\n return main_menu_tab_to_url[tab] == found.lower()\n\n click_on_element(driver, item_name=tab,\n css_path='.primary-sidebar a#main-{:s}'\n ''.format(main_menu_tab_to_url[tab]),\n msg='clicking on {:s} tab in main menu')\n\n return Wait(driver, WAIT_FRONTEND).until(\n lambda _: _check_url(),\n message='waiting for url to change.'\n 'Current url: {:s}'.format(driver.current_url)\n )\n\n Wait(driver, WAIT_BACKEND).until(\n lambda _: _load_main_menu_tab_page(),\n message='waiting for {:s} main menu tab page to load'\n ''.format(tab)\n )\n\n\n@given(parsers.re('users? of (?P.*) clicked on the '\n '\"(?P.*)\" tab in main menu sidebar'))\ndef g_op_click_on_the_given_main_menu_tab(selenium, browser_id_list,\n main_menu_tab):\n for browser_id in list_parser(browser_id_list):\n driver = select_browser(selenium, browser_id)\n _click_on_tab_in_main_menu_sidebar(driver, main_menu_tab)\n\n\n@when(parsers.re('users? of (?P.*) clicks on the '\n '\"(?P.*)\" tab in main menu sidebar'))\n@then(parsers.re('users? of (?P.*) clicks on the '\n '\"(?P.*)\" tab in main menu sidebar'))\ndef wt_op_click_on_the_given_main_menu_tab(selenium, browser_id_list,\n main_menu_tab):\n for browser_id in list_parser(browser_id_list):\n driver = select_browser(selenium, browser_id)\n _click_on_tab_in_main_menu_sidebar(driver, main_menu_tab)\n\n\n@when(parsers.parse('user of {browser_id} refreshes Oneprovider site'))\n@then(parsers.parse('user of {browser_id} refreshes Oneprovider site'))\ndef op_refresh_op_site_by_rm_hashtag(selenium, browser_id):\n driver = select_browser(selenium, browser_id)\n op_url = parse_url(driver.current_url).group('base_url')\n driver.get(op_url)\n\n\n@when(parsers.parse('user of {browser_id} selects \"{item_name}\" '\n 'from {item_type} list'))\n@then(parsers.parse('user of {browser_id} selects \"{item_name}\" '\n 'from {item_type} list'))\ndef op_select_item_from_list(selenium, browser_id, item_name, item_type):\n driver = select_browser(selenium, browser_id)\n click_on_element(driver, item_name=item_name,\n ignore_case=False,\n css_path='ul.{:s}-list '\n '.secondary-sidebar-item'.format(item_type),\n msg='clicking on {{:s}} item in {type} '\n 'list'.format(type=item_type))\n\n\n@when(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible token'))\n@then(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible token'))\n@when(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible url'))\n@then(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible url'))\ndef op_copy_visible_token_to_clipboard(selenium, browser_id):\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '.input-with-button '\n 'button.copy-btn'))\n ).click()\n\n\n@when(parsers.parse('user of {browser_id} sends copied {item_type} '\n 'to users of {browser_list}'))\n@then(parsers.parse('user of {browser_id} sends copied {item_type} '\n 'to users of {browser_list}'))\ndef op_send_visible_token_to_other_users(selenium, browser_id, item_type,\n browser_list, tmp_memory):\n select_browser(selenium, browser_id)\n item = pyperclip.paste()\n for browser in list_parser(browser_list):\n if browser in tmp_memory:\n tmp_memory[browser][item_type] = item\n else:\n tmp_memory[browser] = {item_type: item}\n\n\n@when(parsers.parse('user of {browser_id} clicks on the \"{button_name}\" '\n 'button in {main_menu_tab} sidebar'))\n@then(parsers.parse('user of {browser_id} clicks on the \"{button_name}\" '\n 'button in {main_menu_tab} sidebar'))\ndef op_click_on_button_in_main_menu_tab_sidebar(selenium, browser_id,\n button_name,\n main_menu_tab):\n driver = select_browser(selenium, browser_id)\n assert main_menu_tab in ('spaces', 'groups')\n\n click_on_element(driver, item_name=button_name,\n css_path='.secondary-sidebar '\n 'figure.icon',\n msg='clicking on {{:s}} '\n 'in {tab}'.format(tab=main_menu_tab))\n\n\ndef _check_for_item_in_given_list(driver, name, elem_type):\n def _assert_one_item_in_list(s, item_name, item_type):\n items = s.find_elements_by_css_selector('.{:s}-list .secondary-'\n 'sidebar-item .item-label '\n '.truncate'.format(item_type))\n return sum(1 for li in items if li.text == item_name) == 1\n\n Wait(driver, MAX_REFRESH_COUNT * WAIT_BACKEND).until(\n lambda s: refresh_and_call(s, _assert_one_item_in_list,\n name, elem_type),\n message='searching for exactly one {item} '\n 'on {list} list'.format(item=name, list=elem_type)\n )\n\n\n@given(parsers.parse('that in {browser_id} there is an \"{item_name}\" '\n 'item on the {item_type} list'))\n@given(parsers.parse('that in {browser_id} there is a \"{item_name}\" '\n 'item on the {item_type} list'))\ndef op_check_if_there_is_an_item_on_the_list(selenium, browser_id,\n item_name, item_type):\n driver = select_browser(selenium, browser_id)\n _check_for_item_in_given_list(driver, item_name, item_type)\n\n\n@when(parsers.parse('user of {browser_id} sees that the new item has appeared '\n 'on the {item_type} list'))\n@then(parsers.parse('user of {browser_id} sees that the new item has appeared '\n 'on the {item_type} list'))\ndef op_check_if_new_item_appeared_in_list(selenium, browser_id,\n item_type, name_string):\n driver = select_browser(selenium, browser_id)\n _check_for_item_in_given_list(driver, name_string, item_type)\n\n\n@when(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has appeared on the {item_type} list'))\n@then(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has appeared on the {item_type} list'))\ndef op_check_if_item_of_name_appeared_in_list(selenium, browser_id,\n item_name, item_type):\n driver = select_browser(selenium, browser_id)\n _check_for_item_in_given_list(driver, item_name, item_type)\n\n\n# TODO uncomment when leave from group backend will be repaired\n# @then(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n# 'has disappeared from the {item_type} list'))\n# def op_check_if_item_of_name_disappeared_from_list(selenium, browser_id,\n# item_type, item_name):\n# def _check_for_lack_of_item_in_list(s):\n# items = s.find_elements_by_css_selector('.{:s}-list .secondary-'\n# 'sidebar-item .item-label '\n# '.truncate'.format(item_type))\n# return all(item.text != item_name for item in items)\n#\n# driver = select_browser(selenium, browser_id)\n# Wait(driver, MAX_REFRESH_COUNT*WAIT_BACKEND).until(\n# lambda s: refresh_and_call(s, _check_for_lack_of_item_in_list),\n# message='waiting for {item} to disappear from '\n# '{list} list'.format(item=item_name, list=item_type)\n# )\n\n\n# TODO rm when leave from group backend will be repaired\n@when(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has disappeared from the {item_type} list'))\n@then(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has disappeared from the {item_type} list'))\ndef op_check_if_item_of_name_disappeared_from_list(selenium, browser_id,\n item_name, item_type):\n def _check_for_lack_of_item_in_list(s):\n items = s.find_elements_by_css_selector('.{:s}-list .secondary-'\n 'sidebar-item .item-label '\n '.truncate'.format(item_type))\n return all(item.text != item_name for item in items)\n\n def _refresh_and_call():\n \"\"\"Refresh browser and keep calling callback with given args\n until achieve expected result or timeout.\n \"\"\"\n op_url = parse_url(driver.current_url).group('base_url')\n driver.get(op_url)\n _click_on_tab_in_main_menu_sidebar(driver, item_type)\n\n try:\n result = Wait(driver, WAIT_REFRESH).until(\n lambda s: _check_for_lack_of_item_in_list(s)\n )\n except TimeoutException:\n return None\n else:\n return result\n\n driver = select_browser(selenium, browser_id)\n Wait(driver, MAX_REFRESH_COUNT*WAIT_BACKEND).until(\n lambda s: _refresh_and_call(),\n message='waiting for {:s} to disappear from '\n 'groups list'.format(item_name)\n )\n\n\ndef _check_for_presence_of_item_in_table(driver, name, caption):\n table_elems = driver.find_elements_by_css_selector('table thead, '\n 'table tbody')\n for thead, tbody in zip(table_elems[::2], table_elems[1::2]):\n th = thead.find_element_by_css_selector('th .item-label')\n if th.text.lower() == caption.lower():\n items = tbody.find_elements_by_css_selector('.permissions-'\n 'table-row '\n '.truncate')\n return any(item.text == name for item in items)\n\n\n@when(parsers.parse('user of {browser_id} sees that \"{name}\" item has appeared '\n 'on current {caption} permissions table'))\n@then(parsers.parse('user of {browser_id} sees that \"{name}\" item has appeared '\n 'on current {caption} permissions table'))\ndef op_check_if_row_of_name_appeared_in_table(selenium, browser_id,\n name, caption):\n driver = select_browser(selenium, browser_id)\n Wait(driver, MAX_REFRESH_COUNT * WAIT_BACKEND).until(\n lambda s: refresh_and_call(s, _check_for_presence_of_item_in_table,\n name, caption),\n message='searching for exactly one {:s} '\n 'on {:s} list in table'.format(name, caption)\n )\n\n\ndef _find_item_in_sidebar_list(driver, item_name, item_type):\n items = driver.find_elements_by_css_selector('.' + item_type + '-list '\n '.secondary-sidebar-item')\n for item in items:\n # if settings dropdown menu is expanded text looks like: name\\noption1\\noption2\\n...\n # so splitting text on nl and getting 0 element\n if item_name == item.text.split('\\n')[0]: # TODO better way to check if it is the item we seek\n return item\n\n\n@when(parsers.parse('user of {browser_id} clicks a settings icon displayed '\n 'for \"{item_name}\" item on the {item_type} list'))\n@then(parsers.parse('user of {browser_id} clicks a settings icon displayed '\n 'for \"{item_name}\" item on the {item_type} list'))\ndef op_click_settings_icon_for_list_item(selenium, browser_id,\n item_name, item_type):\n\n def _find_settings_icon_and_check_if_clickable(s):\n list_item = _find_item_in_sidebar_list(s, item_name, item_type)\n icon = list_item.find_element_by_css_selector('.oneicon-settings')\n if icon.is_enabled():\n s.execute_script('arguments[0].scrollIntoView();', icon)\n return icon\n\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n _find_settings_icon_and_check_if_clickable,\n message='clicks on settings icon for {name} on {type} '\n 'list'.format(name=item_name, type=item_type)\n ).click()\n\n\n@when(parsers.parse('user of {browser_id} sees a settings dropdown menu for '\n '\"{name}\" item on the {elem_type} list'))\n@then(parsers.parse('user of {browser_id} sees a settings dropdown menu for '\n '\"{name}\" item on the {elem_type} list'))\ndef op_wait_for_settings_dropdown_menu(selenium, browser_id, name, elem_type):\n\n def _find_expanded_menu(s):\n list_item = _find_item_in_sidebar_list(s, name, elem_type)\n toggle = list_item.find_element_by_css_selector('.dropdown-toggle')\n return toggle.get_attribute('aria-expanded') == 'true'\n\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n _find_expanded_menu,\n message='waiting for settings dropdown to expand'\n )\n\n\n@when(parsers.parse('user of {browser_id} clicks on the \"{item_name}\" item '\n 'in current settings dropdown'))\n@then(parsers.parse('user of {browser_id} clicks on the \"{item_name}\" item '\n 'in current settings dropdown'))\ndef op_click_on_item_in_current_settings_dropdown(selenium, browser_id,\n item_name):\n driver = select_browser(selenium, browser_id)\n click_on_element(driver, item_name=item_name,\n css_path='.settings-dropdown '\n '.dropdown-menu-settings '\n '.clickable',\n msg='clicking on {:s} in current '\n 'settings dropdown')\n\n\n@given(parsers.parse('user of {browser_id} sees that main content '\n 'has ended loading'))\ndef op_check_if_main_content_has_been_reloaded(selenium, browser_id):\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n EC.invisibility_of_element_located((By.CSS_SELECTOR,\n '.common-loader-spinner')),\n message='wait for main content to end loading'\n )\n\n\n@given(parsers.re('users? of (?P.*?) seen that '\n 'Oneprovider session has started'))\ndef wait_for_op_session_to_start(selenium, browser_id_list):\n def _check_url():\n try:\n found = parse_url(driver.current_url).group('access')\n except AttributeError:\n return False\n else:\n return 'onedata' == found.lower()\n\n for browser_id in list_parser(browser_id_list):\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_BACKEND).until(\n lambda _: _check_url(),\n message='waiting for session to start'\n )\n","sub_path":"tests/gui/steps/oneprovider_common.py","file_name":"oneprovider_common.py","file_ext":"py","file_size_in_byte":16942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"651598069","text":"__author__ = 'charlesztt'\n\n\nimport os\nimport re\nfrom xml.sax.saxutils import escape\n\ndef date_detection(one_file):\n token_list=one_file.split('_')\n for one_token in token_list:\n re1='((?:(?:[1]{1}\\\\d{1}\\\\d{1}\\\\d{1})|(?:[2]{1}\\\\d{3}))(?:[0]?[1-9]|[1][012])(?:(?:[0-2]?\\\\d{1})|(?:[3][01]{1})))(?![\\\\d])'\t# YYYYMMDD 1\n rg = re.compile(re1,re.IGNORECASE|re.DOTALL)\n m = rg.search(one_token)\n if m:\n yyyymmdd1=m.group(1)\n return yyyymmdd1\n else:\n continue\n return \"\"\n\n\ntopic_list=['isis']\n\nfor one_topic in topic_list:\n file_list=os.listdir(os.path.join('./data/nered',one_topic))\n for one_file in file_list:\n f=open(os.path.join('./data/nered',one_topic,one_file))\n f2=open(os.path.join('./data/true',one_topic,one_file.replace(\".txt\",\".sgm\")),'w')\n\n f2.write('\\n')\n f2.write('%s\\n'%(one_file.replace(\".txt\",\"\")))\n f2.write(' NEWS STORY \\n')\n f2.write(' %s '%date_detection(one_file.replace(\".txt\",\"\")))\n f2.write('\\n \\n')\n f2.write('\\n')\n for one_line in f:\n first_flag=1;\n one_line.replace(\"\\n\",\"\")\n one_line_list=one_line.split(\" \")\n for one_token in one_line_list:\n if first_flag==1:\n first_flag=0\n temp=one_token.split('/')[0].lower().title()\n f2.write(escape(temp+' '))\n continue\n try:\n if one_token.split('/')[1] != 'O':\n temp=one_token.split('/')[0].lower().title()\n f2.write(escape(temp+' '))\n else:\n temp=one_token.split('/')[0].lower()\n f2.write(escape(temp+' '))\n except:\n pass\n f2.write('\\n')\n f2.write('\\n\\n')","sub_path":"python/true_case_them.py","file_name":"true_case_them.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"397343988","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom contextlib import contextmanager\n\nfrom sqlalchemy import create_engine\n\nfrom scout_apm.sqlalchemy import instrument_sqlalchemy\nfrom tests.compat import mock\n\n\n@contextmanager\ndef conn_with_scout():\n \"\"\"\n Create an instrumented SQLAlchemy connection to an in-memory SQLite database.\n\n \"\"\"\n engine = create_engine(\"sqlite:///:memory:\")\n instrument_sqlalchemy(engine)\n conn = engine.connect()\n try:\n yield conn\n finally:\n conn.close()\n\n\ndef test_hello():\n with conn_with_scout() as conn:\n result = conn.execute(\"SELECT 'Hello World!'\")\n assert list(result) == [(\"Hello World!\",)]\n\n\n# Monkey patch should_capture_backtrace in order to keep the test fast.\n@mock.patch(\n \"scout_apm.core.n_plus_one_call_set.NPlusOneCallSetItem.should_capture_backtrace\"\n)\ndef test_hello_capture_backtrace(should_capture_backtrace):\n should_capture_backtrace.return_value = True\n with conn_with_scout() as conn:\n result = conn.execute(\"SELECT 'Hello World!'\")\n assert list(result) == [(\"Hello World!\",)]\n\n\ndef test_instrument_engine_is_idempotent():\n engine = create_engine(\"sqlite:///:memory:\")\n instrument_sqlalchemy(engine)\n instrument_sqlalchemy(engine) # does nothing, doesn't crash\n","sub_path":"tests/integration/test_sqlalchemy.py","file_name":"test_sqlalchemy.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"269385965","text":"from Bio.Blast import NCBIWWW\nfrom Bio.Blast import NCBIXML\nfrom Bio import SeqIO\nfrom pathlib import Path\nfrom Bio import SearchIO\n\ntarget_species = [\"Homo sapiens\", \"Pan troglodytes\", \"Notechis scutatus\", \\\n \"Takifugu rubripes\"]\n\ndef blaster(fasta_file): \n \"\"\"\n Based on a target species list, we BLAST the given\n input sequence and put them in a file. \n \"\"\"\n fasta_string = open(fasta_file).read()\n print(\"BLAST initiated...\")\n\n # qblast opens up the BLAST function in NCBI. \n result_handle = NCBIWWW.qblast(\"blastn\", \"nt\", fasta_string)\n\n print(\"BLAST search done.\")\n # Records will then be written in a file.\n records = []\n # Results need to go into an XML file. \n with open(\"my_blast.xml\", \"w\") as out_handle:\n out_handle.write(result_handle.read())\n\n blast_result = SearchIO.read(\"my_blast.xml\", \"blast-xml\")\n print(\"Writing BLAST results to file..\")\n for i in target_species: \n # Interate through the blast result hits. \n for hit in blast_result: \n print(hit)\n if i in hit.description: \n # If the taret species is found, append. \n records.append(hit[0].hit)\n\n\n # Pretty easy way to write the given sequences in one file. \n SeqIO.write(records, \"blast-results.fasta\", \"fasta\")\n print(\"\\nBLAST result file written to blast_results.fasta.\")\n return (\"blast_results.fasta\")\n \n\n \n \n","sub_path":"scripts/blast2.py","file_name":"blast2.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"644350089","text":"#!/usr/bin/python3 -u\nfrom scipy.io import netcdf\nfrom scipy.ndimage import zoom\nfrom PIL import Image, ImageOps, ImageChops\nfrom time import sleep\n\nimport subprocess\nimport numpy as np\nimport os\n#import gc\n\nfrom tempfile import NamedTemporaryFile\nfrom urllib.request import urlopen\nimport json\nimport datetime\nimport h5py\n\n# Format for URLs (spaces added by yours truly)\n# --////OR_---M-C-G-s _e _c.nc\n#https://storage.cloud.google.com/gcp-public-data-goes-16/ABI -L2 -CMIPF /2018 /070 /20 /OR_ABI -L2 -CMIPF -M3 C02 _G16 _s20180702000416_e20180702011183_c20180702011253.nc\n\n# Path to store tempfiles to\nTEMP = 'data'\nSTORAGE = 'data'\n# URL to fetch directory listings from\n# https://www.googleapis.com/storage/v1/b/gcp-public-data-goes-16/o?prefix=ABI-L2-CMIPF/ 2018/070/21/OR_ABI-L2-CMIPF-M3C01\nDIR_LIST = 'https://www.googleapis.com/storage/v1/b/gcp-public-data-goes-16/o?prefix=ABI-L2-CMIPF/{date:%Y/%j/%H}/OR_ABI-L2-CMIPF-M3C{channel:02}'\n# Size to chunk downloads into, bytes\nCHUNK_SIZE = 5000000 # 5MB\n# Final size of the generated images. Refer to the \"Channel 2 is X by Y\" message for the full size.\n# This must be a common denominator to all layers. (10848, 5424, 2712, 1356, ...)\n#FINAL_SIZE = (10848, 10848)\n#FINAL_SIZE = (5424, 5424)\n#FINAL_SIZE = (2712, 2712)\nFINAL_SIZE = (500, 500)\nTHUMB_SIZE = (1000, 1000)\n\n# Polling time - how often to check the API for new images (seconds)\n# Full-disk scans come every 15 minutes.\nPOLL_TIME = 5*60\n\n# How much timestamps can differ while being considered identical (seconds)\nTIME_FUZZ = 60\n\n#class Timer(object):\n# \"\"\"A simple lap timer. On each call of lap(), it returns the elapsed time since the last call.\"\"\"\n# def __init__(self):\n# self.last = datetime.datetime.now()\n# self.start = self.last\n# def lap(self):\n# old = self.last\n# self.last = datetime.datetime.now()\n# return (self.last - old).total_seconds()\n# def total(self):\n# self.lap()\n# return (self.last - self.start).total_seconds()\n# def delay(self, seconds):\n# # Delays for the number of seconds since the last lap.\n# # Reset the lap counter on exit.\n# sleepTime = seconds - self.lap()\n# if sleepTime > 0:\n# print('Sleeping for {} seconds'.format(sleepTime))\n# sleep(sleepTime)\n# else:\n# print('Period already expired ({}s ago)'.format(-sleepTime))\n# self.lap()\n\ndef getLatestUrl(channel, offset=0):\n \"\"\" Gets the URL to the most recent GOES-R image of the specified channel.\"\"\"\n \n url = DIR_LIST.format(date=datetime.datetime.utcnow() - datetime.timedelta(seconds=offset), channel=channel)\n if offset == 0: print('Fetching file list:', url)\n text = urlopen(url, timeout=15).read().decode('utf-8')\n try: obj = json.loads(text)['items'][-1]\n except KeyError:\n # If nothing matches this hour, try an hour ago.\n if offset == 0:\n print(' - No data found for this hour; trying an hour ago')\n return getLatestUrl(channel, offset=3600)\n # If that also fails, die.\n print('No files matched the query.')\n raise\n return obj\n\ndef downloadFile(src, dest, size = 0):\n handle = urlopen(src, timeout=15)\n chunk = 'not an empty string'\n\n with open(dest, 'wb') as output:\n # Download the file!\n while chunk:\n print('Downloaded{: 4.1f} of{: 0.1f}MB: {}'.format(output.tell()/1E6, int(size)/1E6, dest), end='\\r')\n chunk = handle.read(CHUNK_SIZE)\n output.write(chunk)\n output.flush()\n print()\n\ndef getLatestData(channel, obj, last=''):\n timer = Timer()\n filename = TEMP + '/' + os.path.basename(obj['name'])\n\n # Download file to /tmp and convert to netCDF3. If it's there already, cool.\n if not os.path.isfile(filename):\n try:\n downloadFile(obj['mediaLink'], filename, obj['size'])\n except:\n # If something goes wrong, kill the file rather than leaving a corrupted download.\n os.remove(filename)\n raise\n else: # file already existed\n print('Downloaded:', filename, timer.lap())\n\n # Read it as CDF; pull out the reflectance data.\n print(' - Reading netCDF', timer.lap())\n with netcdf.netcdf_file(filename, 'r') as g16nc:\n print(' - Extracting reflectance', timer.lap())\n reflectance = g16nc.variables['CMI'][:] # Extract the reflectance\n\n zoom_factor = [FINAL_SIZE[0]/reflectance.shape[0], FINAL_SIZE[1]/reflectance.shape[1]]\n print(' - Channel {} is {} by {}; resizing by {}'.format(channel, g16nc.variables['CMI'].shape[0], g16nc.variables['CMI'].shape[1], zoom_factor), timer.lap())\n reflectance = zoom(reflectance, zoom_factor, order=1)\n\n # Optional: delete the netcdf to avoid clogging up the disk.\n # (On a 10GB disk, that's important)\n #os.remove(filename)\n\n print(' - Ensuring all values are positive', timer.lap())\n np.maximum(reflectance, 0, reflectance)\n\n print(' - Applying gamma correction', timer.lap())\n reflectance = reflectance ** 0.55\n\n print(' - Scaling for improved contrast'.format(channel), timer.lap())\n if channel != 13:\n reflectance *= 5\n\n print(' - Converting to image', timer.lap())\n image = Image.fromarray(reflectance).convert(mode='L')\n \n gc.collect()\n\n print(' - Total time:', timer.total())\n return image\n\ndef makeImage(lastTime = 0):\n timer = Timer()\n print('Downloading latest images')\n # Decide which file to download (obj includes filesize, a link, and some other stuff)\n obj = {} # Obj is a dictionary of file attributes - the latest image availiable for the specified channel.\n for channel in [1, 2, 3, 13]:\n obj[channel] = getLatestUrl(channel)\n obj[channel]['time'] = int(obj[channel]['name'].split('_')[-1][1:-3])\n\n # Pick out a timestamp to use elsewhere.\n timestamp = obj[1]['time']\n\n # Check that all timestamps are \"close\"\n if ((-TIME_FUZZ <= (obj[1]['time'] - obj[2]['time']) <= TIME_FUZZ)\n and (-TIME_FUZZ <= (obj[1]['time'] - obj[3]['time']) <= TIME_FUZZ)\n and (-TIME_FUZZ <= (obj[2]['time'] - obj[3]['time']) <= TIME_FUZZ)):\n print('Images are time-synchronous ({}, {}, and {})'.format(\n obj[1]['time'],\n obj[2]['time'],\n obj[3]['time']))\n else:\n # If not, try again later.\n print('Images are not time-synchronous ({}, {}, and {})'.format(\n obj[1]['time'],\n obj[2]['time'],\n obj[3]['time']))\n return lastTime\n\n # Check that the image has updated (no sense making duplicates)\n if timestamp == lastTime:\n print('Images have not changed since last check ({})'.format(obj[1]['time']))\n return lastTime\n\n # Getting to work - insert a break.\n print()\n\n blue = getLatestData(1, obj[1]) # Load Channel 1 - Blue (Visible 0.47 µm)\n red = getLatestData(2, obj[2]) # Load channel 2 - Red (visible 0.64 µm)\n veggie = getLatestData(3, obj[3]) # Load Channel 3 - Veggie (NIR 1.6 µm)\n cloud = getLatestData(13, obj[13]) # Load Channel 6 - Cloud particle size (NIR 2.2 µm)\n\n # Clean up the NC files before continuing.\n gc.collect()\n\n print('Making a pseudo-green channel', timer.lap())\n # Derived from Planet Labs data, CC > 0.9\n # true_green = 0.48358168 * ch_2 + 0.45706946 * ch_1 + 0.06038137 * ch_3\n green = ImageChops.add(Image.eval(blue, lambda x: x*0.45706946),\n ImageChops.add(Image.eval(red, lambda x: x*0.48358168),\n Image.eval(veggie, lambda x: x*0.06038137 )))\n\n print('Colorizing channels', timer.lap())\n red = ImageOps.colorize(red, (0, 0, 0), (255, 0, 0))\n veggie = ImageOps.colorize(veggie, (0, 0, 0), (0, 255, 0))\n green = ImageOps.colorize(green , (0, 0, 0), (0, 255, 0))\n blue = ImageOps.colorize(blue, (0, 0, 0), (0, 0, 255))\n cloud = ImageOps.colorize(cloud, (0, 0, 0), (255, 255, 255))\n cloud = ImageOps.equalize(cloud)\n red.save(STORAGE+'/red-{}.jpg'.format(timestamp))\n veggie.save(STORAGE+'/veggie-{}.jpg'.format(timestamp))\n green.save(STORAGE+'/green-{}.jpg'.format(timestamp))\n blue.save(STORAGE+'/blue-{}.jpg'.format(timestamp))\n cloud.save(STORAGE+'/cloud-{}.jpg'.format(timestamp))\n\n print('Generating geocolor and truecolor outputs', timer.lap())\n #geocolor = ImageChops.add(ImageChops.add(red, veggie), blue)\n #geocolor.save(STORAGE+'/geocolor-{}.png'.format(timestamp))\n\n truecolor = ImageChops.add(ImageChops.add(red, green), blue)\n truecolor.save(STORAGE+'/truecolor-{}.jpg'.format(timestamp))\n #truecolor.resize(THUMB_SIZE).save(STORAGE+'/truecolor-thumb-{}.jpg'.format(timestamp))\n cloudcolor = ImageChops.screen(ImageChops.add(ImageChops.add(red, green), blue),cloud)\n cloudcolor.save(STORAGE+'/cloudcolor-{}.jpg'.format(timestamp))\n\n # Make a symlink pointing to the latest for javascript to point at.\n try: os.remove( STORAGE+'/truecolor-latest.jpg')\n except FileNotFoundError: pass\n os.symlink(STORAGE+'/truecolor-{}.jpg'.format(timestamp), STORAGE+'/truecolor-latest.jpg')\n try: os.remove( STORAGE+'/truecolor-thumb-latest.jpg')\n except FileNotFoundError: pass\n os.symlink(STORAGE+'/truecolor-thumb-{}.jpg'.format(timestamp), STORAGE+'/truecolor-thumb-latest.jpg')\n\n\n print('Done!', timer.lap())\n print('Total time:', timer.total())\n print()\n\n return timestamp\n\ndef get_channels_descriptions(channels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]):\n descriptions = {} # Obj is a dictionary of file attributes - the latest image availiable for the specified channel.\n for channel in channels:\n descriptions[channel] = getLatestUrl(channel)\n #descriptions[channel]['time'] = int(descriptions[channel]['name'].split('_')[-1][1:-3])\n descriptions[channel]['time'] = datetime.datetime.strptime(descriptions[channel]['updated'], '%Y-%m-%dT%H:%M:%S.%fZ')\n # calculate time covariance\n times = np.array([[descriptions[channel]['time']]*len(channels) for channel in channels])\n if (times - times.T).max().total_seconds() > TIME_FUZZ:\n print('Warning: time covariances greater than threshold: %0.2f seconds'%((times - times.T).max().total_seconds()))\n return descriptions\n\ndef get_channel_file(channel, description):\n filename = TEMP + '/' + os.path.basename(description['name'])\n # Download file to /tmp and convert to netCDF3. If it's there already, cool.\n if not os.path.isfile(filename):\n try:\n downloadFile(description['mediaLink'], filename, description['size'])\n except:\n # If something goes wrong, kill the file rather than leaving a corrupted download.\n os.remove(filename)\n raise\n else: # file already existed\n print('Downloaded:', filename)\n description['filepath'] = filename\n\ndef create_rgb(desc=None):\n channels = [1, 2, 3]\n if desc == None:\n desc = get_channels_descriptions(channels)\n for i in channels:\n get_channel_file(i,desc[i])\n # create channels blue, red and veggie\n b = np.array(h5py.File(desc[1]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/b.shape[0], FINAL_SIZE[1]/b.shape[1]]\n b = zoom(b, zoom_f, order=1)\n r = np.array(h5py.File(desc[2]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/r.shape[0], FINAL_SIZE[1]/r.shape[1]]\n r = zoom(r, zoom_f, order=1)\n v = np.array(h5py.File(desc[3]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/v.shape[0], FINAL_SIZE[1]/v.shape[1]]\n v = zoom(v, zoom_f, order=1)\n # make syntetic green channel\n g = b * 0.45706946 + r * 0.48358168 + v * 0.06038137\n\n # normalize to 0,1 interval for plotting with plt.imshow()\n\n r = (r-r.min())/(r.max()-r.min())\n g = (g-g.min())/(g.max()-g.min()) \n b = (b-b.min())/(b.max()-b.min())\n \n # create composite RGB image\n img = np.zeros((FINAL_SIZE[0], FINAL_SIZE[1], 3))\n img[:, :, 0] = r\n img[:, :, 1] = g\n img[:, :, 2] = b\n return img\n\ndef create_rgb13(desc=None):\n channels = [1, 2, 3, 13]\n if desc == None:\n desc = get_channels_descriptions(channels)\n for i in channels:\n get_channel_file(i,desc[i])\n # create channels blue, red, veggie and clouds\n b = np.array(h5py.File(desc[1]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/b.shape[0], FINAL_SIZE[1]/b.shape[1]]\n b = zoom(b, zoom_f, order=1)\n r = np.array(h5py.File(desc[2]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/r.shape[0], FINAL_SIZE[1]/r.shape[1]]\n r = zoom(r, zoom_f, order=1)\n v = np.array(h5py.File(desc[3]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/v.shape[0], FINAL_SIZE[1]/v.shape[1]]\n v = zoom(v, zoom_f, order=1)\n c = np.array(h5py.File(desc[13]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/c.shape[0], FINAL_SIZE[1]/c.shape[1]]\n c = zoom(c, zoom_f, order=1)\n # make syntetic green channel\n g = b * 0.45706946 + r * 0.48358168 + v * 0.06038137\n r1 = (r-r.min())/(r.max()-r.min())\n g1 = (g-g.min())/(g.max()-g.min()) \n b1 = (b-b.min())/(b.max()-b.min())\n mask = ((r1+g1+b1) < 0.01).astype(int)\n c *= mask\n c //=4\n r = r-c\n g = g-c\n b = b-c\n # normalize to 0,1 interval for plotting with plt.imshow()\n r = (r-r.min())/(r.max()-r.min())\n g = (g-g.min())/(g.max()-g.min()) \n b = (b-b.min())/(b.max()-b.min())\n # create composite RGB image\n img = np.zeros((FINAL_SIZE[0], FINAL_SIZE[1], 3))\n img[:, :, 0] = r\n img[:, :, 1] = g\n img[:, :, 2] = b\n return img\n\ndef create_NDVI(desc=None):\n '''Create a Normalized Difference Vegetation Index image\n NIR - red\n NDVI = -----------\n NIR + red\n By definition this image is grey scale, but can by plotted using summer_r or\n YIGn color maps in plt.imshow()\n '''\n channels = [2, 3]\n if desc == None:\n desc = get_channels_descriptions(channels)\n for i in channels:\n get_channel_file(i,desc[i])\n # create channels blue, red, veggie and clouds\n r = np.array(h5py.File(desc[2]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/r.shape[0], FINAL_SIZE[1]/r.shape[1]]\n r = zoom(r, zoom_f, order=1)\n v = np.array(h5py.File(desc[3]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/v.shape[0], FINAL_SIZE[1]/v.shape[1]]\n v = zoom(v, zoom_f, order=1)\n img = (v-r)/(v+r)\n return img\n\ndef create_image(channel, desc=None):\n if desc == None:\n desc = get_channels_descriptions(channels)\n get_channel_file(channel, desc[channel])\n img = np.array(h5py.File(desc[channel]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/img.shape[0], FINAL_SIZE[1]/img.shape[1]]\n img = zoom(img, zoom_f, order=1)\n return img\n","sub_path":"myGOES16.sci.py","file_name":"myGOES16.sci.py","file_ext":"py","file_size_in_byte":15207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"529605092","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('musics', '0002_auto_20150215_1530'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='score',\n name='musescore_user',\n field=models.IntegerField(blank=True, verbose_name='Musescore UserID'),\n preserve_default=True,\n ),\n ]\n","sub_path":"musics/migrations/0003_auto_20150215_1542.py","file_name":"0003_auto_20150215_1542.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"259517377","text":"## Descrição: Este programa irá simular um sistema de gerenciamento da DAC, realizando operações como impressão, ordenação, inclusão, remoção e busca de alunos matriculados em cada turma.\n## Entrada: uma lista com n inteiros (RA de cada aluno) e uma lista de operações a serem realizadas finalizada pelo caractere s.\n## Saída:Deverá ser impressa a lista como lida na entrada e, para cada operação 'p' realizada deve ser impressa a lista no estado atual, dadas as operações realizadas anteriormente. Quando o programa ler o operador s, que representa a operação de sair, o programa deve encerrar a execução. No caso da busca binária, os índices da busca para cada passo devem ser impressos, independente do RA estar na lista ou não. Para isso, basta imprimir o índice da posição do meio da lista durante a busca.\n## FERNANDO DOS REIS SANTOS FILHO - RA: 23447\n\nfrom aux import *\n\ndef p():\n for i in range (len(lista)-1):\n lista[i]=int(lista[i])\n print (lista[i], end=\" \")\n print (lista[len(lista)-1]); ##IMPRIMIR\n\ndef inserir(aux):\n lista.append(int(aux))\n\n if (ordenacao == \"c\"):\n insertionSortCrescente(lista)\n elif (ordenacao == \"d\"):\n insertionSortDecrescente(lista)\n\n\n\nordenacao = 0\n\nlista = input().split() ##Le a lista de RAs\n\nwhile (True):\n aux = input().split()\n\n if (aux[0] == \"s\"): ## SAIR do programa\n break;\n\n if (aux[0] == \"p\"): ## Executar a função IMPRIMIR\n try:\n p()\n except IndexError:\n continue\n\n if (aux[0] == \"c\"): ## Executar a função ORDENACAO CRESCENTE\n try:\n insertionSortCrescente(lista)\n ordenacao = \"c\"\n except IndexError:\n continue\n\n if (aux[0] == \"d\"): ## Executar a função ORDENACAO DECRESCENTE\n try:\n insertionSortDecrescente(lista)\n ordenacao =\"d\"\n except IndexError:\n continue\n\n if (aux[0] == \"i\"): ## Executar a função INSERIR\n inserir(aux[1])\n\n if (aux[0] == \"r\"): ## Executar a função REMOVER\n list.remove() ## !!\n","sub_path":"lab14.py","file_name":"lab14.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"524434245","text":"import pandas as pd\n\ndef interpolate(dataframe, freq='15Min', units='kW'):\n\tadjustment_hashmap = {'kW':1, 'kWh':4, 'W':0.001, 'Wh':0.004}\n\tstart_date = dataframe.first_valid_index().date()\n\tend_date = dataframe.last_valid_index().date() + pd.Timedelta(days=1)\n\n\tdaterange = pd.date_range(start_date, end_date, freq=freq, closed='left')\n\tdataframe = pd.concat([dataframe * adjustment_hashmap[units], pd.DataFrame(index=daterange)])\n\tdataframe.index.names = [const.TIMESTAMP]\n\treturn dataframe[~dataframe.index.duplicated(keep='first')].sort_index().interpolate()\n\ndef convert(dataframe, start_prefix=None, end_prefix=None):\n\tconversion_hashmap = {\n\t\t'kilo':1000,\n\t\t'mega':1000000,\n\t\t'giga':1000000000,\n\t\t'tera':1000000000000\n\t}\n\n\t# datetimeindex.freq.delta","sub_path":"enerlytics/util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"214689035","text":"import numpy as np\r\nimport pandas as pd\r\nimport pickle\r\n\r\nclass ReliableSource():\r\n\r\n def __init__(self):\r\n path = \"/content/drive/My Drive/MLFall2020/the-feature-finders/AlternusVera/ReliableSource/data.csv\"\r\n \r\n \r\n def FeatureFinders_getSourceReliabilityScore(self, source): # return between 0 and 1, being 0 = True, 1 = Fake\r\n path = \"/content/drive/My Drive/MLFall2020/the-feature-finders/AlternusVera/ReliableSource/data.csv\"\r\n fakeNewsSites = pd.read_csv(path)\r\n for index, row in fakeNewsSites.iterrows():\r\n score = 100\r\n if (row['Type of site'] == 'Some fake stories'):\r\n score = 50\r\n fakeNewsSites.at[index, 'fake_score'] = score\r\n\r\n if (source == \"\"):\r\n return 0\r\n #print(source)\r\n d = fakeNewsSites[fakeNewsSites['Site name'].str.match(r'\\b' + source + r'\\b')]\r\n #print(d)\r\n if d.shape[0] > 0:\r\n return d.iloc[0]['fake_score']\r\n\r\n # if (d['fake_score'].empty):\r\n # return 0\r\n # return int(d['fake_score'].values)\r\n return 0;\r\n\r\n def FeatureFinders_getReliabilityBySource(self,src):\r\n x = self.FeatureFinders_getSourceReliabilityScore(src)\r\n xTrain = np.array(x).reshape(-1, 1)\r\n\r\n readfile = open('/content/drive/My Drive/MLFall2020/the-feature-finders/AlternusVera/pickled-model/ReliableSourceLabelmodel', 'rb')\r\n best_clf = pickle.load(readfile)\r\n\r\n xPpredicted = best_clf.predict(xTrain)\r\n print(xPpredicted)\r\n xPredicedProb = best_clf.predict_proba(xTrain)[:,1]\r\n #xPredicedProb = best_clf.predict_proba(xTrain)\r\n #print(xPredicedProb)\r\n return 1 - float(xPredicedProb)","sub_path":"AlternusVeraReliableSource.py","file_name":"AlternusVeraReliableSource.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"307271148","text":"#!/usr/bin/env python\n\nfrom canari.maltego.entities import NSRecord, DNSName, IPv4Address\nfrom canari.maltego.message import Label, UIMessage\nfrom sploitego.scapytools.dns import nslookup\nfrom canari.framework import configure\nfrom canari.maltego.utils import debug\nfrom canari.maltego.html import Table\nfrom canari.config import config\nfrom scapy.all import DNS\n\n\n__author__ = 'Nadeem Douba'\n__copyright__ = 'Copyright 2012, Sploitego Project'\n__credits__ = []\n\n__license__ = 'GPL'\n__version__ = '0.1'\n__maintainer__ = 'Nadeem Douba'\n__email__ = 'ndouba@gmail.com'\n__status__ = 'Development'\n\n__all__ = [\n 'dotransform'\n]\n\n\n@configure(\n label='To DNS Names [Cache Snoop]',\n description='This transform performs DNS cache snooping on the target DNS server for the Alexa top 500 list.',\n uuids=[\n 'sploitego.v2.IPv4AddressToDNSName_CacheSnoop',\n 'sploitego.v2.NSRecordToDNSName_CacheSnoop'\n ],\n inputs=[\n ( 'Reconnaissance', IPv4Address ),\n ( 'Reconnaissance', NSRecord )\n ]\n)\ndef dotransform(request, response):\n ip = request.value\n ans = nslookup(\"www.google.ca\", nameserver=ip)\n if ans is not None:\n for site in config['dnscachesnoop/wordlist']:\n debug('Resolving %s' % site)\n ans = nslookup(site, nameserver=ip, rd=0)\n if not ans[DNS].ancount:\n ans = nslookup('www.%s' % site, nameserver=ip, rd=0)\n if ans[DNS].ancount:\n e = DNSName(site)\n t = Table(['Name', 'Query Class', 'Query Type', 'Data', 'TTL'], 'Cached Answers')\n for i in range(0, ans[DNS].ancount):\n rr = ans[DNS].an[i]\n t.addrow([\n rr.rrname.rstrip('.'),\n rr.sprintf('%rclass%'),\n rr.sprintf('%type%'),\n rr.rdata.rstrip('.'),\n rr.sprintf('%ttl%')\n ])\n e += Label('Cached Answers', t, type='text/html')\n response += e\n else:\n response += UIMessage('DNS server did not respond to initial DNS request.')\n return response","sub_path":"src/sploitego/transforms/dnscachesnoop.py","file_name":"dnscachesnoop.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"376362186","text":"IMPORT_DIRS = [\n \"windows\",\n \"utils\",\n \"/home/epics/src/R3.14.9-SL-5/base/lib/linux-x86_64\"\n]\n\nAPP_NAME = 'TemplateApp'\n\nENABLE_ERROR_DECORATOR = False\nPROMPT_ON_QUIT = False\nUSE_SIMULATED_PVS = True\n\nSETTINGS_FILE_NAME = \".TemplateApp/settings.json\"\n\nclass KEY(object):\n X_MM_START = 'x_start'\n X_MM_STOP = 'x_stop'\n Z_MM_START = 'z_start'\n Z_MM_STOP = 'z_stop'\n COMMAND = 'cmd'\n KEY = 'key'\n PV = 'pv'\n FORMAT = 'format'\n TEXT = 'text'\n FONT = 'font'\n FONT_SIZE = 'font-size'\n HEIGHT = 'height'\n WIDTH = 'width'\n ALIGN = 'align'\n COLOR = 'color'\n ITEM = 'item'\n LINE_WIDTH = 'line_width'\n LINE_STYLE = 'line_style'\n QT_LAYER = 'z_value'\n\n\n\n","sub_path":"old_tests/template_sandbox/.DONT_USE_TemplateApp/src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"383798872","text":"#!/usr/bin/env python\n\"\"\" Provides AVKButton Widget Class\n\n\"\"\"\n\nfrom tkinter import Button\nfrom ..Styles import AVKButtonStyles\n\n__author__ = \"Andrew Vorndran\"\n__copyright__ = \"Copyright 2018, Andrew Vorndran\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Andrew Vorndran\"\n__email__ = \"andvornd@iu.edu\"\n\nclass AVKButton(Button):\n\tdef __init__(self,\n\t pmaster,\n\t ptext=None,\n\t pcommand=None,\n\t pstyle=\"DEFAULT\"):\n\t\t\"\"\"\n\t\tInitialization Function\n\t\t:param pmaster: Master Frame\n\t\t:param ptext: Text to display on button\n\t\t:param pcommand: Command button will execute\n\t\t:param pstyle: User-defined style or default as string\n\t\t\"\"\"\n\t\tButton.__init__(self, pmaster)\n\t\tself.config(text=ptext)\n\t\tself.config(command=pcommand)\n\n\t\ttry:\n\t\t\tself.avkbConfigure(AVKButtonStyles[pstyle])\n\t\texcept KeyError:\n\t\t\traise AVKButton.InvalidAVKButtonTypeError(pstyle)\n\n\tdef avkbConfigure(self, pconfig):\n\t\t\"\"\"\n\t\tConfigures style of AVKButton Based on configuration dictionary\n\t\t:param pconfig: Configuration dictionary from AVKButtonStyles\n\t\t:return: None\n\t\t\"\"\"\n\t\tself.configure(font=pconfig['font'])\n\t\tself.configure(fg=pconfig['fontColor'])\n\t\tself.configure(bg=pconfig['backgroundColor'])\n\t\tself.configure(activeforeground=pconfig['activeFontColor'])\n\t\tself.configure(activebackground=pconfig['activeBackgroundColor'])\n\t\tself.configure(relief=pconfig['relief'])\n\n\tclass InvalidAVKButtonTypeError(Exception):\n\t\tdef __init__(self, key):\n\t\t\t\"\"\"\n\t\t\tInitialization Function\n\t\t\t:param key: Key that caused error meaning this AVKButton Style doesn't exist\n\t\t\t\"\"\"\n\t\t\tException.__init__(self, \"\\nInvalid AVButton type: {0}\".format(key))\n","sub_path":"Widgets/AVKButton.py","file_name":"AVKButton.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"308206905","text":"import threading, time\nfrom decimal import *\nfrom trading_bot import *\nfees = {\"spot\": Decimal(0.001), \"market\": Decimal(0.005)}\nstarting_amount = 1000\n\n# Initialize new bot\nbot = TradingBot(fees, starting_amount)\n\n# Custom rules\nwhile True:\n # Manage alerts\n if bot.alerts:\n alert = bot.alerts[-1]\n ema4 = alert['4ma']\n token = alert['token']\n stable_balance = bot.wallet['stable']\n\n # Make sure bot has current price \n if not token in bot.prices:\n continue\n \n current_price = Decimal(bot.prices[token])\n bot.alerts.pop()\n\n if bot.wallet['stable'] > 10:\n if ema4 == sorted(ema4):\n stop_loss = current_price - current_price * Decimal(0.01)\n stop_gain = current_price + current_price * Decimal(0.01)\n\n bot.market_buy(token, bot.wallet['stable'])\n bot.limit_sell(token, bot.wallet[token], stop_loss)\n bot.limit_sell(token, bot.wallet[token], stop_gain)\n #bot.limit_buy(token, bot.wallet['stable'], current_price + current_price * Decimal(0.01))\n #bot.trailing_stop_loss(token, bot.wallet[token], current_price, 0.002)\n \n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"266681058","text":"#!/usr/local/python/bin/python3\n# coding:utf-8\nimport socketserver\nimport os\nimport sys\nimport logging\nfrom subprocess import getoutput\n# from Crypto.Cipher import AES\nimport struct\nfrom tools.IPy import IP\nimport re, time\nimport threading\nimport signal\n\n\nHOST = \"\"\nPORT = 12777\nADDR = (HOST, PORT)\nALLOWED_HOSTS = [\"119.23.52.178\", \"172.16.90.0/24\", \"127.0.0.1\"]\n\n\nclass threading_install(threading.Thread):\n\tdef __init__(self,cmd):\n\t\tthreading.Thread.__init__(self)\n\t\tself.cmd = cmd\n\n\tdef run(self):\n\t\toutput = getoutput('/bin/bash '+self.cmd)\n\t\tlogfile = self.cmd.split('/')[-1].split('.')[0]+'.log'\n\t\tfd = open(logfile, 'wb')\n\t\tfd.write(output)\n\t\tfd.flush()\n\t\tfd.close()\n\ndef threading_main(cmd):\n\tt = threading_install(cmd)\n\tt.setDaemon(True)\n\tt.start()\n\n\n\n#insert iptables rule for myself\ndef insert_iptables_rule():\n\tIPT = \"/sbin/iptables\",\"/usr/sbin/iptables\",\n\tfor path in IPT:\n\t\tif os.path.exists(path):\n\t\t\tIPT_PATH = path\n\tif 'dpt:'+str(PORT) not in getoutput(IPT_PATH + ' -L -nv'):\n\t\tIPT_COMMAND = IPT_PATH + \" -I INPUT -m state --state NEW -m tcp -p tcp --dport \" + str(PORT) + \" -j ACCEPT\"\n\t\tos.system(IPT_COMMAND)\n\n\n#let current process become a daemon\ndef create_daemon(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):\n\t# 忽略终端I/O信号,STOP信号\n\tsignal.signal(signal.SIGTTOU, signal.SIG_IGN)\n\tsignal.signal(signal.SIGTTIN, signal.SIG_IGN)\n\tsignal.signal(signal.SIGTSTP, signal.SIG_IGN)\n\tsignal.signal(signal.SIGHUP, signal.SIG_IGN)\n\n\t# 结束父进程,使得子进程成为后台进程\n\ttry:\n\t\tif os.fork() > 0:\n\t\t\tsys.exit(0)\n\texcept OSError as error:\n\t\tprint('fork #1 failed: %d (%s)' % (error.errno, error.strerror))\n\t\tsys.exit(1)\n\n\t# 建立一个新的进程组,在这个新的进程组中,子进程成为这个进程组的首进程,以使该进程脱离所有终端\n\tos.setsid()\n\n\t# 再次新建一个子进程,退出父进程,保证该进程不是进程组长,同时让该进程无法再打开一个新的终端\n\ttry:\n\t\tpid = os.fork()\n\t\tif pid > 0:\n\t\t\tprint('Daemon PID %d' % pid)\n\t\t\tsys.exit(0)\n\texcept OSError as error:\n\t\tprint('fork #2 failed: %d (%s)' % (error.errno, error.strerror))\n\t\tsys.exit(1)\n\n\tfor f in sys.stdout, sys.stderr: f.flush()\n\tMAXFD = os.sysconf('SC_OPEN_MAX')\n\tfor i in range(3, MAXFD):\n\t\ttry:\n\t\t\tos.close(i)\n\t\texcept:\n\t\t\tpass\n\tsi = open(stdin, 'rb', 0)\n\tso = open(stdout, 'ab', 0)\n\tse = open(stderr, 'ab', 0)\n\tos.dup2(si.fileno(), sys.stdin.fileno())\n\tos.dup2(so.fileno(), sys.stdout.fileno())\n\tos.dup2(se.fileno(), sys.stderr.fileno())\n\n\t# 改变工作目录,使得进程不与任何文件系统联系\n\tos.chdir('/')\n\n\t# 将文件屏蔽字设置为0\n\tos.umask(0)\n\n\t# 忽略SIGCHLD信号\n\tsignal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\n\n# #AES 256 Encrypt\n# class mycrypt(object):\n# \tdef __init__(self,key):\n# \t\tself.key = key\n# \t\tself.mode = AES.MODE_CBC\n#\n# \tdef myencrypt(self,text):\n# \t\tcryptor = AES.new(self.key,self.mode)\n# \t\tlength = 16\n# \t\tcount = text.count('')\n# \t\tif count < length:\n# \t\t\tadd = (length-count) + 1\n# \t\t\ttext = text + (' ' * add)\n# \t\telif count > length:\n# \t\t\tadd = (length-(count % length)) + 1\n# \t\t\ttext = text + (' ' * add)\n# \t\tself.ciphertext = cryptor.encrypt(text)\n# \t\treturn self.ciphertext\n#\n#\n# \tdef mydecrypt(self,text):\n# \t\tcryptor = AES.new(key,self.mode)\n# \t\tplain_text = cryptor.decrypt(text)\n# \t\treturn plain_text\n\n\n# save formated log to file\ndef loginfo(info, level):\n\tlogger = logging.getLogger()\n\t# handler = logging.FileHandler('/tmp/server.log')\n\thandler = logging.FileHandler('server.log')\n\tlogflt = logging.Formatter(\"%(levelname)s [%(asctime)s]: %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\thandler.setFormatter(logflt)\n\tlogger.addHandler(handler)\n\tlevels = {\"CRITICAL\": 50, \"ERROR\": 40, \"WARNING\": 30, \"INFO\": 20, \"DEBUG\": 10}\n\tfor key in levels:\n\t\tif level == key:\n\t\t\tlogger.setLevel(levels[key])\n\t\t\teval(\"logging.\"+key.lower()+\"(\"+'\"'+info+'\"'+\")\")\n\tlogger.removeHandler(handler)\n\n\n#loginfo('error info...','ERROR')\n\n\n#class which process commands that receive from client\nclass commands(object):\n\tdef __init__(self, cmd):\n\t\tself.cmd = cmd\n\t\tloginfo('Got Command: %s' % cmd,'INFO')\n\t\tprint(\"Got Command: %s\" % cmd)\n\n\tdef check_cmds(self):\n\t\tif self.cmd == \"getsysinfo\":\n\t\t\treturn self.get_sysinfo()\n\t\telif self.cmd ==\"help\":\n\t\t\treturn \"::Valid commands are: getsysinfo getnic getvga getharddrive getparttion getcpu getmem getifvirtued system help\"\n\t\telif \"system\" in self.cmd:\n\t\t\treturn self.system(self.cmd.split('system'))\n\t\telif \"gethardware\" in self.cmd:\n\t\t\treturn self.get_hardware()\n\t\telif self.cmd == \"getnic\":\n\t\t\treturn self.get_nic()\n\t\telif self.cmd == \"getvga\":\n\t\t\treturn self.get_vga()\n\t\telif self.cmd == \"getharddrive\":\n\t\t\treturn self.get_harddrive()\n\t\telif self.cmd == \"getparttion\":\n\t\t\treturn self.get_parttion()\n\t\telif self.cmd == \"getcpu\":\n\t\t\treturn self.get_cpu()\n\t\telif self.cmd == \"getmem\":\n\t\t\treturn self.get_mem()\n\t\telif self.cmd == \"getifvirtued\":\n\t\t\treturn self.get_virtualized()\n\t\telif self.cmd == \"getuptime\":\n\t\t\treturn self.get_uptime()\n\t\telif self.cmd == \"getloadavg\":\n\t\t\treturn self.get_loadavg()\n\t\telif \"install\" in self.cmd:\n\t\t\treturn self.install(self.cmd.split('install'))\n\t\telif self.cmd == \"\":\n\t\t\treturn ''\n\t\telse:\n\t\t\treturn \"::Please input legal command!\"\n\n\n\tdef get_sysinfo(self):\n\t\tr = '\\r\\n'\n\t\tissue = self.get_issue()\n\t\tos = getoutput('uname -o')\n\t\tmachine = getoutput('uname -m')\n\t\tkernel = getoutput('uname -r')\n\t\treturn issue+r+os+r+machine+r+kernel\n\n\tdef get_hardware(self):\n\t\treturn self.get_nic() + '\\n' + self.get_vga().strip() + '\\n' + self.get_harddrive().strip()\n\n\tdef get_bin_path(self):\n\t\tlspci_path = \"/usr/bin/lspci\",\"/sbin/lspci\",\"/usr/sbin/lspci\",\n\t\tfor bin in lspci_path:\n\t\t\tif os.path.exists(bin):\n\t\t\t\tself.lspci = bin\n\t\t\t\tbreak\n\n\tdef get_pci(self):\n\t\tself.get_bin_path()\n\t\treturn getoutput(self.lspci)\n\n\tdef get_harddrive(self):\n\t\tif \"SATA\" in self.get_pci():\n\t\t\treturn getoutput(self.lspci+\" | awk -F ':' '/SATA/ {print $3}'\")\n\t\telif \"RAID\" in self.get_pci():\n\t\t\treturn getoutput(self.lspci+\" | awk -F ':' '/RAID/ {print $3}'\")\n\n\tdef get_nic(self):\n\t\tself.get_bin_path()\n\t\tether_nic = getoutput(self.lspci+\" | awk -F ':' '/Ether/ {print $3}'\")\n\t\treturn ether_nic\n\n\tdef get_vga(self):\n\t\tself.get_bin_path()\n\t\treturn getoutput(self.lspci+\"| awk -F ':' '/VGA/ {print $3}'\")\n\n\tdef get_parttion(self):\n\t\treturn getoutput(\"fdisk -l | awk '/\\/dev\\// {print}'\")\n\n\tdef get_cpu(self):\n\t\treturn getoutput(\"cat /proc/cpuinfo | grep 'model name' | cut -d: -f2 | sed 's/^ //'\")\n\n\tdef get_virtualized(self):\n\t\tif \"vmx\" or \"vme\" or \"svm\" in getoutput(\"cat /proc/cpuinfo\"):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\tdef get_mem(self):\n\t\tmem = int(getoutput(\"cat /proc/meminfo | grep 'MemTotal' | cut -d: -f2 | sed 's/^\\s*//' | cut -d' ' -f1\"))/1024\n\t\treturn \"%s MB\" % mem\n\n\tdef system(self,parms):\n\t\tif parms[1] != \"\":\n\t\t\tif \"rm\" in parms[1]:\n\t\t\t\treturn \"Dangerous! Make sure **the path** you specified!.\\r\\n\"+getoutput(parms[1])\n\t\t\treturn getoutput(parms[1])\n\t\telse:\n\t\t\treturn \"\"\n\n\tdef install(self,parms):\n\t\tif parms[1] != \"\":\n\t\t\tthreading_main(parms[1].strip())\n\n\tdef get_issue(self):\n\t\tissue_file = '/etc/issue'\n\t\tif not os.path.exists(issue_file):\n\t\t\treturn getoutput('uname -o')\n\t\tf = open(issue_file)\n\t\tlines = f.readlines()\n\t\tf.close()\n\t\tfor line in lines:\n\t\t\tif 'Arch' in line:\n\t\t\t\treturn \"ArchLinux\"\n\t\t\telif 'CentOS' in line:\n\t\t\t\treturn \"CentOS\"\n\t\t\telif 'Ubuntu' in line:\n\t\t\t\treturn \"Ubuntu\"\n\t\t\telif 'Fedora' in line:\n\t\t\t\treturn 'Fedora'\n\t\t\telse:\n\t\t\t\treturn \"Linux\"\n\n\tdef get_uptime(self):\n\t\treturn getoutput(\"uptime | cut -d',' -f 1\").strip()\n\n\tdef get_loadavg(self):\n\t\tload1,load2,load3 = os.getloadavg()\n\t\treturn str(load1) + ' ' + str(load2) + ' ' + str(load3)\n\n\nclass MyRequestHandler(socketserver.BaseRequestHandler):\n\tdef handle(self):\n\t\tip_address = str(self.client_address[0])\n\t\tport = str(self.client_address[1])\n\t\tprint('ip_address:%s, port:%s' % (ip_address, port))\n\t\thosts = []\n\t\tcidr = []\n\t\tfor host in ALLOWED_HOSTS:\n\t\t\tif '/' in host:\n\t\t\t\tcidr.append(host)\n\t\t\telse:\n\t\t\t\thosts.append(host)\n\t\tfor net in cidr:\n\t\t\tfor ip in IP(net):\n\t\t\t\thosts.append(str(ip))\n\t\tif ip_address in hosts:\n\t\t\tprint('::Connected from: ', self.client_address)\n\t\t\tloginfo(\"Connected from: %s:%s\" % (ip_address, port), 'INFO')\n\t\telse:\n\t\t\tself.request.send('Not Allowed Here!\\n'.encode())\n\t\t\tprint('::Forbidden Host from: ', self.client_address)\n\t\t\tloginfo(\"Forbidden Host from: %s:%s\" % (ip_address, port), 'INFO')\n\t\t\tself.finish()\n\n\t\twhile True:\n\t\t\tBUF_SIZE = struct.calcsize('!1024s')\n\t\t\tbuffer = self.request.recv(BUF_SIZE)\n\t\t\tprint('len(buffer):%s' % len(buffer))\n\t\t\tif len(buffer) == 1024:\n\t\t\t\tdata = struct.unpack('!1024s', buffer)[0]\n\t\t\telse:\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\tdata = data.decode('utf8').replace('\\x00', '')\n\t\t\tprint('data xx:%s' % data)\n\t\t\tif data == 'byebye':\n\t\t\t\ttry:\n\t\t\t\t\tself.request.send(\"seeyou!\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tloginfo('%s:%s Send failed! %s' % (ip_address,port,e),'ERROR')\n\t\t\t\tprint(\"::%s:%s Leaving server.\\r\\n\" % (ip_address,port))\n\t\t\t\tloginfo(\"%s:%s Leaving server.\" % (ip_address,port),'INFO')\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\t#if in putfile mode\n\t\t\t# cmd in client is like this: \"putfile /client/side/file.jpg /server/side/file.jpg APPEDN_BIN\"\n\t\t\tif \"putfile\" in data:\n\t\t\t\tfilename = data.split(' ')[2]\n\t\t\t\ttry:\n\t\t\t\t\tmod = data.split(' ')[3]\n\t\t\t\t\tif mod == 'WRITE_BIN': mod = 'wb'\n\t\t\t\t\telif mod == 'WRITE_ASC': mod = 'w'\n\t\t\t\t\telif mod == \"APPEND_BIN\": mod = 'ab+'\n\t\t\t\t\telif mod == \"APPEND_ASC\": mod = 'a+'\n\t\t\t\texcept:\n\t\t\t\t\tmod = 'wb'\n\t\t\t\tfd = open(filename, mod)\n\t\t\t\twhile True:\n\t\t\t\t\tcontent = self.request.recv(1024)\n\t\t\t\t\tif not content:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfd.write(content.decode('hex'))\n\t\t\t\tfd.flush()\n\t\t\t\tfd.close()\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\t#if in getfile mode\n\t\t\tif \"getfile\" in data:\n\t\t\t\tfilename = data.split(' ')[1]\n\t\t\t\t#if file not exist or filename is a directory, send error information to client\n\t\t\t\tif not os.path.exists(filename) or os.path.isdir(filename):\n\t\t\t\t\tself.request.sendall(struct.pack('!128s','File not found, please check the path!'))\n\t\t\t\telse:\n\t\t\t\t\tself.request.sendall(struct.pack('!128s','File Found! Will Transfer Now!'))\n\t\t\t\tprint('filename:%s' % filename)\n\t\t\t\tfd = open(filename,'rb')\n\t\t\t\t# send data in a loop\n\t\t\t\twhile True:\n\t\t\t\t\tdata = fd.read(1024)\n\t\t\t\t\tif not data:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tself.request.send(data.encode('hex'))\n\t\t\t\tfd.close()\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\t# process command that receive from client\n\t\t\tcmd_output = commands(data)\n\t\t\ttry:\n\t\t\t\tlines = cmd_output.check_cmds()\n\t\t\t\tprint('lines:%s' % lines)\n\t\t\t\tself.request.send(lines.encode())\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\t\t\texcept Exception as e:\n\t\t\t\tloginfo('%s:%s Send failed! %s' % (ip_address, port, e), 'ERROR')\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\ndef main():\n\tsocketserver.ThreadingTCPServer.allow_reuse_address = True\n\ttcp_srv = socketserver.ThreadingTCPServer(ADDR, MyRequestHandler)\n\tprint('::waiting for connecting...')\n\ttcp_srv.serve_forever()\n\n\nif __name__ == '__main__':\n\tif os.getuid() != 0:\n\t\tprint(\"This server process should be running by root!\")\n\t\tsys.exit(1)\n\t# insert_iptables_rule()\n\tdaemon_log_path = os.getcwd() + \"/daemon.log\"\n\tcreate_daemon('/dev/null', daemon_log_path, daemon_log_path)\n\tmain()\n","sub_path":"admins/cs/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"116103263","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import division\r\nfrom os import listdir, path, system, makedirs, remove\r\nfrom shutil import rmtree\r\nimport cv2\r\n\r\n\r\ndef main():\r\n in_path = \"in\"\r\n tmp_path = \"tmp\"\r\n out_path = \"out\"\r\n external_path = \"external\"\r\n\r\n try:\r\n rmtree(tmp_path)\r\n except:\r\n pass\r\n makedirs(tmp_path, exist_ok=True)\r\n\r\n file_count = 0\r\n\r\n for _file in listdir(in_path):\r\n if file_count % 10 == 0:\r\n print(_file)\r\n input_file = path.join(in_path, _file)\r\n if path.isfile(input_file):\r\n if _file.split(\".\")[-1].lower() != \"ppm\":\r\n # convert to ppm\r\n system(path.join(external_path, \"convert.exe\" + \" \" + input_file\r\n + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \".ppm\")))\r\n src_image = cv2.imread(input_file, -1)\r\n # apply MLAA\r\n system(path.join(external_path, \"mlaa.exe\" + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \".ppm\")\r\n + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \"_AA.ppm\")))\r\n # delete temp ppm file\r\n remove(path.join(tmp_path, _file.split(\".\")[0] + \".ppm\"))\r\n # convert to png\r\n save_file = path.join(out_path, _file.split(\".\")[0] + \".png\")\r\n system(path.join(external_path, \"convert.exe\" + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \"_AA.ppm\")\r\n + \" \" + save_file))\r\n dst_image = cv2.imread(save_file, -1)\r\n if src_image.shape != dst_image.shape:\r\n h, w = src_image.shape\r\n image_resize = cv2.resize(dst_image, (w, h), interpolation=cv2.INTER_CUBIC)\r\n cv2.imwrite(save_file, image_resize)\r\n # delete temp AA ppm file\r\n remove(path.join(tmp_path, _file.split(\".\")[0] + \"_AA.ppm\"))\r\n\r\n file_count += 1\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"287232584","text":"import os\nimport json\nimport pprint\nimport math\nimport re\n\nservices = None\ncfn_spec = None\n\ntf_resources = []\ncfn_types = []\ncfn_occurances = []\ntf_occurances = []\ncfn_exceptions = {\n 'AWS::CloudFormation::CustomResource': 'N/A',\n 'AWS::CloudFormation::Macro': 'N/A',\n 'AWS::CloudFormation::Stack': 'N/A',\n 'AWS::CloudFormation::WaitCondition': 'N/A',\n 'AWS::CloudFormation::WaitConditionHandle': 'N/A',\n 'AWS::EC2::SecurityGroupEgress': 'N/A',\n 'AWS::EC2::SecurityGroupIngress': 'N/A',\n 'AWS::EC2::TrunkInterfaceAssociation': 'N/A',\n 'AWS::ElastiCache::SecurityGroupIngress': 'N/A',\n 'AWS::Redshift::ClusterSecurityGroupIngress': 'N/A',\n 'AWS::Route53::RecordSetGroup': 'N/A',\n 'AWS::SDB::Domain': 'N/A',\n 'AWS::IAM::UserToGroupAddition': 'N/A'\n}\ntf_exceptions = {\n 'aws_cloudformation_stack': 'N/A',\n 'aws_cloudformation_stack_set': 'N/A',\n 'aws_cloudformation_stack_set_instance': 'N/A'\n}\n\nwith open(\"util/cfnspec.json\", \"r\") as f:\n cfn_spec = json.loads(f.read())['ResourceTypes']\n\nwith open(\"util/tf_resources.txt\", \"r\") as f:\n lines = f.read().splitlines()\n for line in lines:\n tf_resources.append(line)\n\nfor cfntype, _ in cfn_spec.items():\n cfn_types.append(cfntype)\n\ncfn_types.append(\"AWS::Lambda::LayerVersionPermission\")\ncfn_types.append(\"AWS::EC2::VPCEndpointService\")\ncfn_types.append(\"AWS::Lambda::LayerVersion\")\ncfn_types.append(\"AWS::EC2::CapacityReservation\")\ncfn_types = set(cfn_types)\n\nwith open(\"js/mappings.js\", \"r\") as f:\n text = f.read()\n lines = text.splitlines()\n cfn_occurances += re.compile(r'(AWS\\:\\:[a-zA-Z0-9]+\\:\\:[a-zA-Z0-9]+)').findall(text)\n tf_occurances += re.compile(r'terraformType\\'\\:\\ \\'(aws(?:\\_[a-zA-Z0-9]+)+)\\'').findall(text)\n\ntotal_services = 0\ntotal_operations = 0\ntotal_unique_occurances = 0\nwith open(\"RESOURCE_COVERAGE.md\", \"w\") as f:\n f.write(\"## CloudFormation Resource Coverage\\n\\n\")\n f.write(\"**%s/%s (%s%%)** Resources Covered\\n\" % (\n len(set(cfn_occurances)) + len(cfn_exceptions),\n len(cfn_types),\n int(math.floor((len(set(cfn_occurances)) + len(cfn_exceptions)) * 100 / len(cfn_types)))\n ))\n\n f.write(\"\\n| Type | Coverage |\\n\")\n f.write(\"| --- | --- |\\n\")\n\n for cfntype in sorted(cfn_types):\n coverage = \"\"\n if cfn_occurances.count(cfntype) > 0:\n coverage = \":thumbsup:\"\n if cfntype in cfn_exceptions:\n coverage = cfn_exceptions[cfntype]\n f.write(\"| *%s* | %s |\\n\" % (cfntype, coverage))\n\n f.write(\"\\n## Terraform Coverage\\n\\n\")\n f.write(\"**%s/%s (%s%%)** Resources Covered\\n\" % (\n len(set(tf_occurances)) + len(tf_exceptions),\n len(tf_resources),\n int(math.floor((len(set(tf_occurances)) + len(tf_exceptions)) * 100 / len(tf_resources)))\n ))\n \n f.write(\"\\n| Type | Coverage |\\n\")\n f.write(\"| --- | --- |\\n\")\n\n for tf_resource in sorted(tf_resources):\n coverage = \"\"\n if tf_occurances.count(tf_resource) > 0:\n coverage = \":thumbsup:\"\n if tf_resource in tf_exceptions:\n coverage = tf_exceptions[tf_resource]\n f.write(\"| *%s* | %s |\\n\" % (tf_resource, coverage))\n","sub_path":"util/generateCoverage.py","file_name":"generateCoverage.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"36703813","text":"import argparse\nimport datetime\nimport os\nimport pprint\nimport sys\n\nimport numpy as np\nimport torch\nfrom env import make_vizdoom_env\nfrom network import DQN\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tianshou.data import Collector, VectorReplayBuffer\nfrom tianshou.policy import ICMPolicy, PPOPolicy\nfrom tianshou.trainer import OnpolicyTrainer\nfrom tianshou.utils import TensorboardLogger, WandbLogger\nfrom tianshou.utils.net.common import ActorCritic\nfrom tianshou.utils.net.discrete import Actor, Critic, IntrinsicCuriosityModule\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--task\", type=str, default=\"D1_basic\")\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--buffer-size\", type=int, default=100000)\n parser.add_argument(\"--lr\", type=float, default=0.00002)\n parser.add_argument(\"--gamma\", type=float, default=0.99)\n parser.add_argument(\"--epoch\", type=int, default=300)\n parser.add_argument(\"--step-per-epoch\", type=int, default=100000)\n parser.add_argument(\"--step-per-collect\", type=int, default=1000)\n parser.add_argument(\"--repeat-per-collect\", type=int, default=4)\n parser.add_argument(\"--batch-size\", type=int, default=256)\n parser.add_argument(\"--hidden-size\", type=int, default=512)\n parser.add_argument(\"--training-num\", type=int, default=10)\n parser.add_argument(\"--test-num\", type=int, default=100)\n parser.add_argument(\"--rew-norm\", type=int, default=False)\n parser.add_argument(\"--vf-coef\", type=float, default=0.5)\n parser.add_argument(\"--ent-coef\", type=float, default=0.01)\n parser.add_argument(\"--gae-lambda\", type=float, default=0.95)\n parser.add_argument(\"--lr-decay\", type=int, default=True)\n parser.add_argument(\"--max-grad-norm\", type=float, default=0.5)\n parser.add_argument(\"--eps-clip\", type=float, default=0.2)\n parser.add_argument(\"--dual-clip\", type=float, default=None)\n parser.add_argument(\"--value-clip\", type=int, default=0)\n parser.add_argument(\"--norm-adv\", type=int, default=1)\n parser.add_argument(\"--recompute-adv\", type=int, default=0)\n parser.add_argument(\"--logdir\", type=str, default=\"log\")\n parser.add_argument(\"--render\", type=float, default=0.0)\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\"--frames-stack\", type=int, default=4)\n parser.add_argument(\"--skip-num\", type=int, default=4)\n parser.add_argument(\"--resume-path\", type=str, default=None)\n parser.add_argument(\"--resume-id\", type=str, default=None)\n parser.add_argument(\n \"--logger\",\n type=str,\n default=\"tensorboard\",\n choices=[\"tensorboard\", \"wandb\"],\n )\n parser.add_argument(\"--wandb-project\", type=str, default=\"vizdoom.benchmark\")\n parser.add_argument(\n \"--watch\",\n default=False,\n action=\"store_true\",\n help=\"watch the play of pre-trained policy only\",\n )\n parser.add_argument(\n \"--save-lmp\",\n default=False,\n action=\"store_true\",\n help=\"save lmp file for replay whole episode\",\n )\n parser.add_argument(\"--save-buffer-name\", type=str, default=None)\n parser.add_argument(\n \"--icm-lr-scale\",\n type=float,\n default=0.0,\n help=\"use intrinsic curiosity module with this lr scale\",\n )\n parser.add_argument(\n \"--icm-reward-scale\",\n type=float,\n default=0.01,\n help=\"scaling factor for intrinsic curiosity reward\",\n )\n parser.add_argument(\n \"--icm-forward-loss-weight\",\n type=float,\n default=0.2,\n help=\"weight for the forward model loss in ICM\",\n )\n return parser.parse_args()\n\n\ndef test_ppo(args=get_args()):\n # make environments\n env, train_envs, test_envs = make_vizdoom_env(\n args.task,\n args.skip_num,\n (args.frames_stack, 84, 84),\n args.save_lmp,\n args.seed,\n args.training_num,\n args.test_num,\n )\n args.state_shape = env.observation_space.shape\n args.action_shape = env.action_space.shape or env.action_space.n\n # should be N_FRAMES x H x W\n print(\"Observations shape:\", args.state_shape)\n print(\"Actions shape:\", args.action_shape)\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n # define model\n net = DQN(\n *args.state_shape,\n args.action_shape,\n device=args.device,\n features_only=True,\n output_dim=args.hidden_size,\n )\n actor = Actor(net, args.action_shape, device=args.device, softmax_output=False)\n critic = Critic(net, device=args.device)\n optim = torch.optim.Adam(ActorCritic(actor, critic).parameters(), lr=args.lr)\n\n lr_scheduler = None\n if args.lr_decay:\n # decay learning rate to 0 linearly\n max_update_num = np.ceil(args.step_per_epoch / args.step_per_collect) * args.epoch\n\n lr_scheduler = LambdaLR(optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)\n\n # define policy\n def dist(p):\n return torch.distributions.Categorical(logits=p)\n\n policy = PPOPolicy(\n actor,\n critic,\n optim,\n dist,\n discount_factor=args.gamma,\n gae_lambda=args.gae_lambda,\n max_grad_norm=args.max_grad_norm,\n vf_coef=args.vf_coef,\n ent_coef=args.ent_coef,\n reward_normalization=args.rew_norm,\n action_scaling=False,\n lr_scheduler=lr_scheduler,\n action_space=env.action_space,\n eps_clip=args.eps_clip,\n value_clip=args.value_clip,\n dual_clip=args.dual_clip,\n advantage_normalization=args.norm_adv,\n recompute_advantage=args.recompute_adv,\n ).to(args.device)\n if args.icm_lr_scale > 0:\n feature_net = DQN(\n *args.state_shape,\n args.action_shape,\n device=args.device,\n features_only=True,\n output_dim=args.hidden_size,\n )\n action_dim = np.prod(args.action_shape)\n feature_dim = feature_net.output_dim\n icm_net = IntrinsicCuriosityModule(\n feature_net.net,\n feature_dim,\n action_dim,\n device=args.device,\n )\n icm_optim = torch.optim.Adam(icm_net.parameters(), lr=args.lr)\n policy = ICMPolicy(\n policy,\n icm_net,\n icm_optim,\n args.icm_lr_scale,\n args.icm_reward_scale,\n args.icm_forward_loss_weight,\n ).to(args.device)\n # load a previous policy\n if args.resume_path:\n policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))\n print(\"Loaded agent from: \", args.resume_path)\n # replay buffer: `save_last_obs` and `stack_num` can be removed together\n # when you have enough RAM\n buffer = VectorReplayBuffer(\n args.buffer_size,\n buffer_num=len(train_envs),\n ignore_obs_next=True,\n save_only_last_obs=True,\n stack_num=args.frames_stack,\n )\n # collector\n train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)\n test_collector = Collector(policy, test_envs, exploration_noise=True)\n\n # log\n now = datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\")\n args.algo_name = \"ppo_icm\" if args.icm_lr_scale > 0 else \"ppo\"\n log_name = os.path.join(args.task, args.algo_name, str(args.seed), now)\n log_path = os.path.join(args.logdir, log_name)\n\n # logger\n if args.logger == \"wandb\":\n logger = WandbLogger(\n save_interval=1,\n name=log_name.replace(os.path.sep, \"__\"),\n run_id=args.resume_id,\n config=args,\n project=args.wandb_project,\n )\n writer = SummaryWriter(log_path)\n writer.add_text(\"args\", str(args))\n if args.logger == \"tensorboard\":\n logger = TensorboardLogger(writer)\n else: # wandb\n logger.load(writer)\n\n def save_best_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, \"policy.pth\"))\n\n def stop_fn(mean_rewards: float) -> bool:\n if env.spec.reward_threshold:\n return mean_rewards >= env.spec.reward_threshold\n return False\n\n # watch agent's performance\n def watch():\n print(\"Setup test envs ...\")\n policy.eval()\n test_envs.seed(args.seed)\n if args.save_buffer_name:\n print(f\"Generate buffer with size {args.buffer_size}\")\n buffer = VectorReplayBuffer(\n args.buffer_size,\n buffer_num=len(test_envs),\n ignore_obs_next=True,\n save_only_last_obs=True,\n stack_num=args.frames_stack,\n )\n collector = Collector(policy, test_envs, buffer, exploration_noise=True)\n result = collector.collect(n_step=args.buffer_size)\n print(f\"Save buffer into {args.save_buffer_name}\")\n # Unfortunately, pickle will cause oom with 1M buffer size\n buffer.save_hdf5(args.save_buffer_name)\n else:\n print(\"Testing agent ...\")\n test_collector.reset()\n result = test_collector.collect(n_episode=args.test_num, render=args.render)\n rew = result[\"rews\"].mean()\n lens = result[\"lens\"].mean() * args.skip_num\n print(f'Mean reward (over {result[\"n/ep\"]} episodes): {rew}')\n print(f'Mean length (over {result[\"n/ep\"]} episodes): {lens}')\n\n if args.watch:\n watch()\n sys.exit(0)\n\n # test train_collector and start filling replay buffer\n train_collector.collect(n_step=args.batch_size * args.training_num)\n # trainer\n result = OnpolicyTrainer(\n policy=policy,\n train_collector=train_collector,\n test_collector=test_collector,\n max_epoch=args.epoch,\n step_per_epoch=args.step_per_epoch,\n repeat_per_collect=args.repeat_per_collect,\n episode_per_test=args.test_num,\n batch_size=args.batch_size,\n step_per_collect=args.step_per_collect,\n stop_fn=stop_fn,\n save_best_fn=save_best_fn,\n logger=logger,\n test_in_train=False,\n ).run()\n\n pprint.pprint(result)\n watch()\n\n\nif __name__ == \"__main__\":\n test_ppo(get_args())\n","sub_path":"examples/vizdoom/vizdoom_ppo.py","file_name":"vizdoom_ppo.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"404887885","text":"\"Implementation of test-runner for nose tests.\"\n\nimport os\n\nimport nose\n\nfrom cosmic_ray.testing.test_runner import TestRunner\nfrom cosmic_ray.util import redirect_stdout, redirect_stderr\n\n\nclass NoseResultsCollector(nose.plugins.Plugin):\n \"Nose plugin that collects results for later analysis.\"\n name = 'cosmic_ray'\n enabled = True\n\n def __init__(self):\n super().__init__()\n self.result = None\n\n def finalize(self, result):\n \"Store result.\"\n self.result = result\n\n\nclass NoseRunner(TestRunner): # pylint: disable=too-few-public-methods\n \"\"\"A TestRunner using nosetest.\n\n This treats `test_args` as a list of arguments to `nose.run()`. The args\n are passed directly to that function. See nose's command line reference\n for a description of what arguments are accepted.\n\n NOTE: ``-s`` is not accepted here!\n \"\"\"\n\n def _run(self):\n argv = ['', '--with-cosmic_ray']\n argv += self.test_args.split()\n collector = NoseResultsCollector()\n\n with open(os.devnull, 'w') as devnull:\n with redirect_stdout(devnull):\n with redirect_stderr(devnull):\n nose.run(argv=argv, plugins=[collector])\n return (collector.result.wasSuccessful(),\n [r[1] for r in collector.result.errors +\n collector.result.failures])\n","sub_path":"plugins/test-runners/nose/cosmic_ray_nose_runner/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"515366281","text":"\"\"\"\nThe module implements the Publish-Subscribe pattern.\n\nThis is the `pubsub push` pattern implementation. See references, below, for context.\nThe Publisher notifies Subscribers via a callback method.\n\nExample:\n To come.\n\n.. _Publish-Subscribe reference:\n https://github.com/hovey/pyschool/blob/f3a60800386c0416af4f129671ef1240cf75ff7b/pubsub/README.md\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import NamedTuple\n\n# import numpy as np\n\n\nclass Subscriber(ABC):\n \"\"\"The base class Subscriber for the Publish-Subscribe pattern.\n\n Classes should inherit from this base class to receive the\n subscribe mechanism of the publish-subscribe pattern.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def publication_callback(self, *, message: str = \"\"):\n \"\"\"The callback method called by a Publisher. Descendants must implement this\n method to inherit from `Subscriber`.\n\n Keyword Arguments:\n message (string): The publication as a message string from the publisher to\n the subscriber via the callback contract. Defaults to `\"\"` (empty string).\n \"\"\"\n pass\n\n\nclass ConcreteSubscriber(Subscriber):\n \"\"\"This class is included as an example of how Subscriber descendants\n could be implemented.\n \"\"\"\n\n def __init__(self, *, name: str, verbose: bool = False):\n super().__init__()\n self.name = name\n self._verbose = verbose\n if verbose:\n print(f\"ConcreteSubscriber {name} created.\")\n\n def publication_callback(self, *, message: str) -> None:\n super().publication_callback(message=message)\n\n if self._verbose:\n print(f\"-> Callback message: '{message}', received by {self.name}.\")\n\n\nclass PublisherEvent(NamedTuple):\n subscribed: str = \"subscribed event was triggered\"\n unsubscribed: str = \"unsubscribed event was triggered\"\n publication: str = \"Publication event was triggered.\"\n paused: str = \"subscription was paused\"\n resumed: str = \"subscription was resumed\"\n\n\nclass Publisher(ABC):\n \"\"\"The base class Publisher for the Publish-Subscribe pattern.\n\n Classes should inherit from this base class to receive the\n publish mechanism of the publish-subscribe pattern.\n\n Attributes:\n _subscribers (dict[Subscriber: callback (str)]): Dictionary map with keys\n as Subscriber objects and values as the string callback methods\n implemented in the Subscriber descendant. The callback string defaults\n to \"update\" if subscribers provide no callback method string.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n # self._subscribers = dict(Subscriber, bool) # initialized as emtpy dictionary\n self._subscribers = dict() # initialized as emtpy dictionary\n self.events = PublisherEvent() # publisher establishes these event strings\n\n def subscribe(self, subscriber: Subscriber, active: bool = True) -> None:\n \"\"\"Creates a subscription of the subscriber to a publisher.\n\n Arguments:\n subscriber (Subscriber): A subscriber of the publication.\n active (bool): If False, the subscriber's publications are paused. If\n True, the subscriber's publications are resumed. Defaults to True.\n \"\"\"\n self._subscribers[subscriber] = active\n subscriber.publication_callback(message=self.events.subscribed)\n\n def unsubscribe(self, subscriber: Subscriber) -> None:\n \"\"\"Deletes a subscriber from a publisher's dictionary of subscribers.\n\n Arguments:\n subscriber (Subscriber): A subscriber of the publication.\n\n Raises:\n KeyError: if the subscriber is not in the publisher's dictionary of\n subscribers.\n \"\"\"\n try:\n subscriber.publication_callback(message=self.events.unsubscribed)\n del self._subscribers[subscriber]\n except KeyError:\n print(f\"Error: subscriber {subscriber} is unknown.\")\n\n def publish(self, message: str = \"\") -> None:\n \"\"\"Publishes a message (string) to subscribers via publish_callback function.\n\n This is the `push` implementation of the Publish-Subscribe pattern.\n \"\"\"\n for subscriber, active in self._subscribers.items():\n if active:\n subscriber.publication_callback(message=message)\n\n def pause(self, subscriber: Subscriber) -> None:\n \"\"\"Retains the connection between publisher and subscriber, but turns off\n notifications from the publisher to subscriber. See also `resume` method.\n\n Arguments:\n subscriber (Subscriber): The subscriber for which updates should be\n paused until `resume` is used.\n\n Raises:\n KeyError: if the subscriber is not in the publisher's dictionary of\n subscribers.\n \"\"\"\n try:\n self._subscribers[subscriber] = False # subscription is paused\n subscriber.publication_callback(message=self.events.paused)\n except KeyError:\n print(f\"Error: subscriber {subscriber} is unknown.\")\n\n def resume(self, subscriber) -> None:\n \"\"\"Retains the connection between publisher and subscriber, but turns on\n notifications from the publisher to subscriber. See also `pause` method.\n\n Arguments:\n subscriber (Subscriber): The subscriber for which updates should be\n resumed until `pause` is used.\n\n Raises:\n KeyError: if the subscriber is not in the publisher's dictionary of subscribers.\n \"\"\"\n try:\n self._subscribers[subscriber] = True # subscription is resumed\n subscriber.publication_callback(message=self.events.resumed)\n except KeyError:\n print(f\"Error: subscriber {subscriber} is unknown.\")\n\n @property\n def subscribers(self) -> dict:\n \"\"\"Returns the publisher's dictionary of current subscribers and their repective\n callbacks.\"\"\"\n return self._subscribers\n","sub_path":"src/pyschool/pattern/publish_subscribe.py","file_name":"publish_subscribe.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"463602936","text":"class TrapezeBuilding(object):\n\n def __init__(self, un_pos_x, un_pos_y, un_pos_z, un_size_x, un_size_y, un_height):\n # MULTIPLY COORD BY City.UNIT_VALUE\n # The front windows:\n # Import Window\n # Move it to (un_pos_x + (un_size_x-1)/2, un_pos_y, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingWindow']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingWindow', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, un_pos_y * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, 1)\n new_obj.scale = (un_size_x, 1, 1)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unZ = un_pos_z + 1\n unY = un_pos_y + 1/3\n while unZ < un_pos_z + un_height and unY <= un_pos_y + un_size_y - 2:\n # Duplicate last placed object\n # Move it to (un_pos_x + (un_size_x-1)/2, unY, unZ)\n obj = bpy.data.objects['_ModernBuildingWindow']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingWindow', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, unY * City.UNIT_VALUE, unZ * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, 1)\n new_obj.scale = (un_size_x, 1, 1)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unY += 1/3\n unZ += 1\n \n # The center part:\n if un_size_y > 2:\n # Import Center\n # Move it to (un_pos_x + (un_size_x-1)/2, un_pos_y + 1/2 + 1/6, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingCenter']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingCenter', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, (un_pos_y + 1/2 + 1/6) * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, 1)\n new_obj.scale = (un_size_x, 1, 1)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unY2 = un_pos_y + 1\n unScaleZ = 2\n while unY2 <= un_pos_y + un_size_y - 9/6:\n # Duplicate last placed Element\n # Move it at (un_pos_x + (un_size_x-1)/2, unY, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingWindow']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingWindow', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, unY2 * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, unScaleZ)\n new_obj.scale = (un_size_x, 1, unScaleZ)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unScaleZ = min(unScaleZ + 1, unZ - un_pos_z)\n unY2 += 1/3\n \n # The back:\n # Import Back from Modern 2.blend\n # Move it to (un_pos_x + (un_size_x-1)/2, un_pos_y + un_size_y - 1, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingBack']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingBack', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, (un_pos_y + un_size_y - 1) * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, un_height)\n new_obj.scale = (un_size_x, 1, unZ - un_pos_z)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n # Join all objects and remove doubles\n scene.objects.active = new_obj\n bpy.ops.object.join()\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.remove_doubles()\n bpy.ops.object.mode_set(mode='OBJECT')","sub_path":"src/elements/buildings/TrapezeBuilding.py","file_name":"TrapezeBuilding.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"39137364","text":"# -*- coding: utf-8 -*- {{{\n#\n# Your license here\n# }}}\n\nfrom datetime import datetime, timedelta\n\n\nclass FleetRequest:\n \"\"\"\n This class describes input fields required by fleets \n \"\"\"\n def __init__(self, ts=datetime.utcnow(),\n sim_step=timedelta(hours=1),\n start_time=None,\n p=None, q=None, steps=1):\n \"\"\"\n Constructor\n \"\"\"\n # Timestamp in simulation loop: datetime\n self.ts_req = ts\n\n # Simulation time step: timedelta object\n self.sim_step = sim_step\n \n # Initial timestamp in simulation loop: datetime \n # Used for artificial inertia service\n self.start_time = start_time\n\n # Real power request\n self.P_req = p\n\n # Reactive power request\n self.Q_req = q\n\n # NREL WaterHeater only: Number of steps in simulation.\n # This value is always = 1 for the sake of not changing WaterHeater code\n self.steps = 1\n","sub_path":"src/fleet_request.py","file_name":"fleet_request.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"195324432","text":"from flask import Flask, request,jsonify\r\nimport requests\r\nimport json\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/', methods=['GET'])\r\ndef hello():\r\n\treturn 'Hello!!!', 200 # 200 is HTTP the response code to be returned to client\r\n\r\nfrom datetime import datetime\r\n@app.route('/datetime', methods=['GET'])\r\ndef print_today():\r\n\treturn str(datetime.now()), 200 \r\n## Potential Applications \r\n\r\n\r\n@app.route('/users/')\r\ndef get_user(username):\r\n\treturn \"user: \"+str(username),200\r\n \r\n#http://localhost:5000/add?op1=3&op2=4\r\n@app.route('/add', methods=['GET'])\r\ndef add():\r\n if 'op1' in request.args.keys() and 'op2' in request.args.keys():\r\n a = int(request.args['op1'])\r\n b = int(request.args['op2'])\r\n return jsonify({\"operand 1\": a, \"operand 2\": b, \"sum\":a+b}) #return JSON object\r\n else:\r\n return jsonify({'error':'missing parameter(s)'}), 400\r\n \r\n\r\n#request content-type=application/json\r\n#request body format: {\"op1\":3,\"op2\":5}\r\n@app.route('/mul', methods=['POST'])\r\ndef mul():\r\n\t\r\n\tdata = request.json #get json data from request body\r\n\ta = data[\"op1\"]\r\n\tb = data[\"op2\"]\r\n\t\r\n\treturn jsonify({'mul':a*b}),200\r\n \r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=5000, debug=True)\r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"526421369","text":"import shutil \nimport os\nimport sys\n\nsdkVersion = 'YumiMediationSDK_v'+ sys.argv[1]\nthirdPartys = ['PlayableAds','AdColony','AdMob','Applovin','Baidu','Chartboost','Domob','Facebook','GDT','InMobi','IronSource','Mobvista','Unity','Vungle','OneWay']\npodDir = './Pods/'\nmediationSDKPath = podDir + sdkVersion\nthirdPartyPathName = mediationSDKPath +'/YumiMediationThirdPartys'\ndebugcenterPathName = mediationSDKPath +'/YumiMediationDebugCenter-iOS'\nyumiMediationSDKPathName = mediationSDKPath +'/YumiMediationSDK'\n\n# adapter\ndef archiveAdapter(adapterName):\n\tthirdYumiSDKName = 'Yumi' + adapterName\n\tif adapterName == 'PlayableAds':\n\t\tthirdYumiSDKName = adapterName\n\tthirdYumiMediationAdapterName = 'YumiMediation' + adapterName\n\t#copy adapters\n\tsrcAdapterPath = podDir + 'YumiMediationAdapters' + '/'+ thirdYumiMediationAdapterName\n\ttargetAdapterPath = thirdPartyPathName + '/'+ thirdYumiMediationAdapterName\n\tsrcThirdSDKPath = podDir + thirdYumiSDKName\n\ttargetThirdSDkPath = thirdPartyPathName + '/'+ thirdYumiMediationAdapterName + '/' +thirdYumiSDKName\n\n\tshutil.copytree(srcAdapterPath,targetAdapterPath,symlinks=True)\n\tshutil.copytree(srcThirdSDKPath,targetThirdSDkPath,symlinks=True)\n\tshutil.make_archive(targetAdapterPath,'bztar',targetAdapterPath)\n\tshutil.rmtree(targetAdapterPath)\n\n# zip 会变大 ,gztar 和bztar差不多 tar体积没有变化\ndef archiveThirdAdapters():\n\tfor thirdParty in thirdPartys:\n\t\tprint('is copying %s' % thirdParty)\n\t\tarchiveAdapter(thirdParty)\n\n\tif os.path.exists(thirdPartyPathName):\n\t\tprint('is archived Adapters')\n\t\tshutil.make_archive(thirdPartyPathName,'bztar',thirdPartyPathName)\n\t\tshutil.rmtree(thirdPartyPathName)\n\n# debugcenter \ndef archiveDebugcenter():\n\t#copy debugcenter\n\tprint('is copying debugcenter')\n\tsrcDebugcenterPath = podDir + 'YumiMediationDebugCenter-iOS'\n\n\tshutil.copytree(srcDebugcenterPath,debugcenterPathName,symlinks = True)\n\tif os.path.exists(debugcenterPathName):\n\t\tprint('is archived debugcenter')\n\t\tshutil.make_archive(debugcenterPathName,'bztar',debugcenterPathName)\n\t\tshutil.rmtree(debugcenterPathName)\n\ndef archiveYumiMediationSDK():\n\t#copy YumiMediationSDK\n\tprint('is copying YumiMediationSDK')\n\tsrcYumiMediationSDKPath = podDir + 'YumiMediationSDK'\n\tshutil.copytree(srcYumiMediationSDKPath,yumiMediationSDKPathName,symlinks = True)\n\tif os.path.exists(yumiMediationSDKPathName):\n\t\tprint('is archived YumiMediationSDK')\n\t\tshutil.make_archive(yumiMediationSDKPathName,'bztar',yumiMediationSDKPathName)\n\t\tshutil.rmtree(yumiMediationSDKPathName)\n\ndef archiveReleaseSDK():\n\tif os.path.exists(mediationSDKPath):\n\t\tshutil.rmtree(mediationSDKPath)\n\tarchiveThirdAdapters()\n\tarchiveDebugcenter()\n\tarchiveYumiMediationSDK()\n\t#copy xcconfig\n\tpodPath = os.path.dirname(podDir)\n\txcconfigPath = os.path.dirname(podPath) + \"/YumiMediationSDKConfig.xcconfig\"\n\tif os.path.exists(xcconfigPath):\t\n\t\tprint('is copying YumiMediationSDKConfig.xcconfig')\n\t\tshutil.copy(xcconfigPath,mediationSDKPath)\n\n\tshutil.make_archive(mediationSDKPath,'bztar',mediationSDKPath)\n\tprint(\"archive yumi mediation sdk successed\")\n\n# release archive yumi mediation sdk\narchiveReleaseSDK()\n","sub_path":"archivedYumiMediationSDK.py","file_name":"archivedYumiMediationSDK.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"237347575","text":"#!/usr/bin/python3\n\"\"\"Unittest for Base Model\n\"\"\"\nimport unittest\nimport models\nfrom models.base_model import BaseModel\nimport models.base_model\nimport json\nimport pep8\nimport sys\nimport io\nfrom datetime import datetime\nimport inspect\nimport uuid\nimport time\nimport os\n\n\nclass TestDocsBaseModel(unittest.TestCase):\n \"\"\"SI FUNCIONAN LAS DE DOCUMENTACION\n check for documentation \"\"\"\n\n def test_permissions(self):\n \"\"\" Test for check the permissions \"\"\"\n exist = os.access('models/base_model.py', os.F_OK)\n self.assertTrue(exist)\n read = os.access('models/base_model.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/base_model.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/base_model.py', os.X_OK)\n self.assertTrue(exe)\n\n def test_module_doc(self):\n \"\"\" check for module documentation \"\"\"\n self.assertTrue(len(models.base_model.__doc__) > 0)\n\n def test_class_doc(self):\n \"\"\" check for documentation \"\"\"\n self.assertTrue(len(BaseModel.__doc__) > 0)\n\n def test_method_docs(self):\n \"\"\" check for method documentation \"\"\"\n for func in dir(BaseModel):\n self.assertTrue(len(func.__doc__) > 0)\n\n\nclass TestPep8BaseModel(unittest.TestCase):\n \"\"\"SI FUNCIONA\n check for pep8 validation \"\"\"\n def test_pep8(self):\n \"\"\" test base and test_base for pep8 conformance \"\"\"\n style = pep8.StyleGuide(quiet=True)\n file1 = 'models/base_model.py'\n file2 = 'tests/test_models/test_base_model.py'\n result = style.check_files([file1, file2])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warning).\")\n\n\nclass TestBaseModel(unittest.TestCase):\n \"\"\"\n tests class BaseModel\n \"\"\"\n def tearDown(self):\n \"\"\"clean everything up after running setup\"\"\"\n sys.stdout = sys.__stdout__\n os.remove(\"file.json\")\n\n def test_init(self):\n \"\"\" No funciona\n checks correct instances \"\"\"\n ww = BaseModel()\n ww.name = \"waluigi\"\n ww.my_number = 40\n a_t = {\n \"id\": str,\n \"created_at\": datetime,\n \"updated_at\": datetime,\n \"name\": str,\n \"my_number\": int\n }\n for a, t in a_t.items():\n with self.subTest(a=a, t=t):\n self.assertIn(a, ww.__dict__)\n self.assertEqual(isinstance(ww.__dict__[a], t), True)\n self.assertEqual(ww.name, \"waluigi\")\n self.assertEqual(ww.my_number, 40)\n\n def test_save(self):\n \"\"\"SI FUNCIONA\n check if last updated changes where saved\"\"\"\n hola = BaseModel()\n creado = hola.created_at\n viejo = hola.updated_at\n time.sleep(1)\n hola.save()\n nuevo = hola.updated_at\n self.assertNotEqual(viejo, nuevo)\n self.assertEqual(viejo, creado)\n self.assertNotEqual(nuevo, creado)\n\n def test_uuid(self):\n \"\"\"SI FUNCIONA\n test valid uuid\"\"\"\n pepita = BaseModel()\n cholado = BaseModel()\n\n def is_valid_uuid(val):\n \"\"\"check uuid\"\"\"\n try:\n uuid.UUID(str(val))\n return True\n except ValueError:\n return False\n self.assertEqual(is_valid_uuid(pepita.id), True)\n self.assertNotEqual(pepita.id, cholado.id)\n\n def test_to_dict(self):\n \"\"\"SI FUNCIONA\n test to dictionary for json\"\"\"\n pipelin = BaseModel()\n pipelin.name = \"felipe\"\n pipelin.my_number = 5\n my_dictionary = pipelin.to_dict()\n expected = [\"id\", \"created_at\", \"updated_at\", \"name\", \"my_number\",\n \"__class__\"]\n self.assertCountEqual(my_dictionary.keys(), expected)\n self.assertEqual(my_dictionary['__class__'], \"BaseModel\")\n self.assertEqual(my_dictionary['name'], \"felipe\")\n self.assertEqual(my_dictionary['my_number'], 5)\n\n def test_dict_dt_values(self):\n \"\"\"\n check if attribute datetime values are in the correct output format\n \"\"\"\n box = BaseModel()\n box.name = \"Banana\"\n box.my_number = 25\n d = box.to_dict()\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n self.assertEqual(d[\"__class__\"], \"BaseModel\")\n self.assertEqual(isinstance(d[\"created_at\"], str), True)\n self.assertEqual(isinstance(d[\"updated_at\"], str), True)\n self.assertEqual(d[\"created_at\"], box.created_at.strftime(format))\n self.assertEqual(d[\"updated_at\"], box.updated_at.strftime(format))\n\n def test_datetime(self):\n \"\"\"SI FUNCIONA\n check datetime values\"\"\"\n clock_one = datetime.now()\n one = BaseModel()\n clock_two = datetime.now()\n self.assertTrue(clock_one <= one.created_at <= clock_two)\n time.sleep(1)\n clock_one = datetime.now()\n two = BaseModel()\n clock_two = datetime.now()\n self.assertTrue(clock_one <= two.created_at <= clock_two)\n self.assertEqual(one.created_at, one.updated_at)\n self.assertEqual(two.created_at, two.updated_at)\n self.assertNotEqual(one.created_at, two.created_at)\n self.assertNotEqual(one.updated_at, two.updated_at)\n\n def test_str(self):\n \"\"\"SI FUNCIONA\n test of str\"\"\"\n pepito = BaseModel()\n string = \"[BaseModel] ({}) {}\".format(pepito.id, pepito.__dict__)\n self.assertEqual(string, str(pepito))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"59313444","text":"from orbis import app\nfrom orbis.libs.files_lib import build_file_name\nfrom orbis.libs.files_lib import get_timestamp\n\nimport collections\nimport os\nimport csv\n\n\ndef run(yaml_config: dict, data: dict, results: dict):\n app.logger.info(\"Saving results as csv.\")\n\n file_name = build_file_name(\"collected_results.csv\", yaml_config, raw=True)\n\n response = collections.OrderedDict()\n response[\"date\"] = get_timestamp()\n response[\"name\"] = yaml_config[\"file_name\"]\n\n response[\"macro_precision\"] = results[\"binary_classification\"][\"macro\"][\"precision\"]\n response[\"macro_recall\"] = results[\"binary_classification\"][\"macro\"][\"recall\"]\n response[\"macro_f1_score\"] = results[\"binary_classification\"][\"macro\"][\"f1_score\"]\n\n response[\"micro_precision\"] = results[\"binary_classification\"][\"micro\"][\"precision\"]\n response[\"micro_recall\"] = results[\"binary_classification\"][\"micro\"][\"recall\"]\n response[\"micro_f1_score\"] = results[\"binary_classification\"][\"micro\"][\"f1_score\"]\n\n response[\"has_score\"] = results[\"has_score\"]\n response[\"no_score\"] = results[\"no_score\"]\n response[\"empty_responses\"] = results[\"empty_responses\"]\n\n response[\"aggregator_name\"] = yaml_config[\"aggregator\"][\"service\"][\"name\"]\n response[\"aggregator_profile\"] = yaml_config[\"aggregator\"][\"service\"].get(\"profile\", \"None\")\n response[\"aggregator_limit\"] = yaml_config[\"aggregator\"][\"service\"].get(\"limit\", \"None\")\n response[\"aggregator_location\"] = yaml_config[\"aggregator\"][\"service\"][\"location\"]\n response[\"aggregator_data_set\"] = yaml_config[\"aggregator\"][\"input\"][\"data_set\"][\"name\"]\n\n response[\"evaluator_name\"] = yaml_config[\"evaluator\"][\"name\"]\n response[\"scorer_name\"] = yaml_config[\"scorer\"][\"name\"]\n\n response[\"entities\"] = \" \".join([e for e in yaml_config[\"scorer\"][\"entities\"]])\n response[\"mapping\"] = yaml_config[\"aggregator\"][\"input\"].get(\"mappings\", \"None\")\n response[\"lense\"] = yaml_config[\"aggregator\"][\"input\"].get(\"lenses\", \"None\")\n response[\"filter\"] = yaml_config[\"aggregator\"][\"input\"].get(\"filters\", \"None\")\n\n header = [key for key in response.keys()]\n values = [value for value in response.values()]\n\n if not os.path.isfile(file_name):\n with open(file_name, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t', quotechar=\"'\")\n writer.writerow(header)\n\n with open(file_name, 'a', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t', quotechar=\"'\")\n writer.writerow(values)\n\n app.logger.info(\"Finished saving results as csv.\")\n","sub_path":"src/orbis/plugins/savors/list_results/list_results.py","file_name":"list_results.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"25717633","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\nfrom __future__ import print_function\nfrom __future__ import division\nimport argparse\nimport collections\nimport six\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='translate.py')\n\nparser.add_argument('-ans', required=True,\n help='Path to answers')\nparser.add_argument('-pred', required=True,\n help='Path to predictions')\nparser.add_argument('-output_ranked_r2',\n help='Path to write predictions ranked by R-2.')\nparser.add_argument('-source',\n help='If output ranked prediction, this path points to the input.')\nopt = parser.parse_args()\n\ndef _ngrams(words, n):\n queue = collections.deque(maxlen=n)\n for w in words:\n queue.append(w)\n if len(queue) == n:\n yield tuple(queue)\n\ndef _ngram_counts(words, n):\n return collections.Counter(_ngrams(words, n))\n\ndef _ngram_count(words, n):\n return max(len(words) - n + 1, 0)\n\ndef _counter_overlap(counter1, counter2):\n result = 0\n for k, v in six.iteritems(counter1):\n result += min(v, counter2[k])\n return result\n\ndef _safe_divide(numerator, denominator):\n if denominator > 0:\n return numerator / denominator\n else:\n return 0\n\ndef _safe_f1(matches, recall_total, precision_total, alpha=1):\n recall_score = _safe_divide(matches, recall_total)\n precision_score = _safe_divide(matches, precision_total)\n denom = (1.0 - alpha) * precision_score + alpha * recall_score\n if denom > 0.0:\n return (precision_score * recall_score) / denom\n else:\n return 0.0\n\ndef rouge_n(peer, models, n, alpha=1):\n \"\"\"\n Compute the ROUGE-N score of a peer with respect to one or more models, for\n a given value of `n`.\n \"\"\"\n matches = 0\n recall_total = 0\n peer_counter = _ngram_counts(peer, n)\n for model in models:\n model_counter = _ngram_counts(model, n)\n matches += _counter_overlap(peer_counter, model_counter)\n recall_total += _ngram_count(model, n)\n precision_total = len(models) * _ngram_count(peer, n)\n return _safe_f1(matches, recall_total, precision_total, alpha)\n\ndef rouge_1(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-1 (unigram) score of a peer with respect to one or more\n models.\n \"\"\"\n return rouge_n(peer, models, 1, alpha)\n\ndef rouge_2(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-2 (bigram) score of a peer with respect to one or more\n models.\n \"\"\"\n return rouge_n(peer, models, 2, alpha)\n\ndef rouge_3(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-3 (trigram) score of a peer with respect to one or more\n models.\n \"\"\"\n return rouge_n(peer, models, 3, alpha)\n\ndef lcs(a, b):\n \"\"\"\n Compute the length of the longest common subsequence between two sequences.\n\n Time complexity: O(len(a) * len(b))\n Space complexity: O(min(len(a), len(b)))\n \"\"\"\n # This is an adaptation of the standard LCS dynamic programming algorithm\n # tweaked for lower memory consumption.\n # Sequence a is laid out along the rows, b along the columns.\n # Minimize number of columns to minimize required memory\n if len(a) < len(b):\n a, b = b, a\n # Sequence b now has the minimum length\n # Quit early if one sequence is empty\n if len(b) == 0:\n return 0\n # Use a single buffer to store the counts for the current row, and\n # overwrite it on each pass\n row = [0] * len(b)\n for ai in a:\n left = 0\n diag = 0\n for j, bj in enumerate(b):\n up = row[j]\n if ai == bj:\n value = diag + 1\n else:\n value = max(left, up)\n row[j] = value\n left = value\n diag = up\n # Return the last cell of the last row\n return left\n\ndef rouge_l(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-L score of a peer with respect to one or more models.\n \"\"\"\n matches = 0\n recall_total = 0\n for model in models:\n matches += lcs(model, peer)\n recall_total += len(model)\n precision_total = len(models) * len(peer)\n return _safe_f1(matches, recall_total, precision_total, alpha)\n\nif __name__ == \"__main__\":\n answers = []\n for line in open(opt.ans).readlines():\n line_split = line.decode('utf8').strip()\n answers.append([line_split])\n predictions = []\n for line in open(opt.pred).readlines():\n line_split = line.decode('utf8').strip()\n predictions.append(line_split)\n rouge_ones = []\n rouge_twos = []\n rouge_ls = []\n for pred_id in range(len(predictions)):\n rouge_ones.append(rouge_1(predictions[pred_id], answers[pred_id]))\n rouge_twos.append(rouge_2(predictions[pred_id], answers[pred_id]))\n rouge_ls.append(rouge_l(predictions[pred_id], answers[pred_id]))\n print(\"R-1: {:.4f}\".format(sum(rouge_ones)/len(predictions)))\n print(\"R-2: {:.4f}\".format(sum(rouge_twos)/len(predictions)))\n print(\"R-L: {:.4f}\".format(sum(rouge_ls)/len(predictions)))\n if not opt.output_ranked_r2: exit()\n if opt.source:\n sources = []\n for line in open(opt.source).readlines():\n line_split = line.decode('utf8').strip().split('\\t')\n sources.append(line_split[0])\n ranked_ids = np.argsort(rouge_twos)[::-1]\n with open(opt.output_ranked_r2, 'w') as ranked_pred_file:\n ranked_pred_file.write(\"l_id\\trouge_2F\\tpred\\tgold\\torg\\n\")\n for ranked_id in ranked_ids:\n ranked_pred_file.write(\"{}\".format(ranked_ids[ranked_id]+1))\n ranked_pred_file.write('\\t')\n ranked_pred_file.write(\"{:.4f}\".format(rouge_twos[ranked_id]))\n ranked_pred_file.write('\\t')\n ranked_pred_file.write((''.join(predictions[ranked_id])).encode('utf8'))\n ranked_pred_file.write('\\t')\n ranked_pred_file.write((''.join(answers[ranked_id][0])).encode('utf8'))\n ranked_pred_file.write('\\t')\n if opt.source is not None:\n ranked_pred_file.write((''.join(sources[ranked_id])).encode('utf8'))\n else:\n ranked_pred_file.write(u'-')\n ranked_pred_file.write('\\n')\n","sub_path":"get_python_rouge.py","file_name":"get_python_rouge.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"106932018","text":"\"\"\"\n마이쮸 문제\n\"\"\"\n\nfront = rear = None\n\nclass Node:\n def __init__(self, data, n=None, p=None):\n self.data = data\n self.next = n\n self.prev = p\n\ndef isEmpty():\n return front == None\n\ndef enQ(data):\n global front, rear\n # 새로운 노드(객체)생성 & 주소값을 저장한다고 생각\n newNode = Node(data)\n if isEmpty():\n front = newNode\n else:\n rear.next = newNode\n rear = newNode\n\ndef deQ():\n global front, rear\n if isEmpty():\n print(\"Empty\")\n return None\n data = front.data\n front = front.next\n if isEmpty():\n rear = None\n return data\n\nmyzzu = 20\nperson = 0\nget = 1\nwhile myzzu > 0: \n person += 1\n print(f\"{person} 등장\")\n enQ([person,get])\n prev_person, prev_get = deQ()\n myzzu = myzzu - prev_get\n print(f\"{prev_person}이 {prev_get}개 받아갑니다.\")\n print(f\"{myzzu}개 남음\")\n if myzzu < 1:\n print(f\"{prev_person}번 사람이 막타\")\n break\n else:\n enQ([prev_person,prev_get+1])\n ","sub_path":"OnlineJudge/SWExpertAcademy/Example/20190225/example4.py","file_name":"example4.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"351436017","text":"import numpy as np\nimport pylab as py\nfrom functions import *\nfrom flux_box import flux\nfrom math import factorial\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib\nfrom matplotlib import cm\nfrom multiprocessing import Process\nimport multiprocessing as mp\n\n#Builds a labelmap for various moments up to n=10\nlabelmap = {(0,0): \"eD\", (0,1): \"eQ\", (0,2): \"eO\",\n (1,0): \"mD\", (1,1): \"mQ\", (1,2): \"mO\"}\nfor n in range(3, 10):\n labelmap[(0,n)] = \"e\" + str(2**(n+1))\n labelmap[(1,n)] = \"m\" + str(2**(n+1))\n\ndef set_lim(xmin, xmax, xvals, yvals, prev = [0]):\n \"\"\"set xlimit and ylimit of current plot\"\"\"\n ymax = max(((xvals>xmin) & (xvals max(prev):\n py.ylim([0,ymax*1.33])\n return ymax\n\nclass modes:\n def __init__(self, anm, index, direc, out_direc=\"/output/\"):\n self.anm = anm\n self.index = index\n self.I,self.Nfreq,self.J,self.K = anm.shape\n out_direc = direc + out_direc\n\n #compute field incident intensity at center\n (Exr_mid, Exi_mid, Eyr_mid, Eyi_mid, Ezr_mid, Ezi_mid) = [np.zeros(self.Nfreq) for i in range(6)]\n norms = {\"rE_mid_x\": Exr_mid, \"iE_mid_x\": Exi_mid,\n \"rE_mid_y\": Eyr_mid, \"iE_mid_y\": Eyi_mid,\n \"rE_mid_z\": Ezr_mid, \"iE_mid_z\": Ezi_mid}\n for fname in norms:\n filename = \"{0}/{1}.dat\".format(out_direc,fname)\n datafile = open(filename, 'rb')\n data = np.fromfile(datafile)\n norms[fname] = data\n Ex_norm = norms[\"rE_mid_x\"] + 1j*norms[\"iE_mid_x\"]\n Ey_norm = norms[\"rE_mid_y\"] + 1j*norms[\"iE_mid_y\"]\n Ez_norm = norms[\"rE_mid_z\"] + 1j*norms[\"iE_mid_z\"]\n self.Enorm = (np.abs(Ex_norm)**2 + np.abs(Ey_norm)**2 + np.abs(Ez_norm)**2)**.5\n\n #compute flux through 7 faces\n f = flux(direc + \"/input.txt\")\n fcen = f.D[\"freq\"]\n fwid = f.D[\"fwidth\"]\n self.wav = 1000/np.linspace(fcen-fwid/2,fcen+fwid/2,self.Nfreq)\n\n files = {\"front\": \"flux_front.dat\", \"back\": \"flux_back.dat\",\n \"left\": \"flux_left.dat\",\"right\": \"flux_right.dat\",\n \"top\": \"flux_top.dat\", \"bottom\": \"flux_bottom.dat\"}\n norm_file = \"flux_norm.dat\"\n\n self.tot = np.zeros(self.Nfreq) \n for k in files: \n y = f.load_flux(out_direc + files[k])\n self.tot += np.abs(y)\n # if np.average(y) > 0:\n # self.tot += y\n # else:\n # self.tot -= y\n self.norm = f.load_flux(out_direc + norm_file)\n\n def plot_box_scattering(self, area, label=\"Box Flux\"):\n \"\"\"plot the scattering cross-section calculated through the box with given area in micron^2\"\"\"\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n yval =area*self.tot/self.norm \n py.plot(self.wav, yval, linewidth=1.5, label=label)\n set_lim(200,1000,self.wav,yval)\n\n def get_freq_index(self, wavelength):\n \"\"\"return freq_index closest to a given wavelength\"\"\"\n return min(enumerate(np.abs(self.wav-wavelength)), key = lambda p: p[1])[0]\n\n\n def plot_angular(self, freq_index, r, phi, thetapts=100):\n \"\"\"Plot field 2d angular distribution at radius r, angle phi\"\"\"\n k = 2*np.pi*self.index/self.wav[freq_index]\n\n def E(theta):\n retE = np.zeros(3, dtype=np.complex)\n for n in range(1,self.anm.shape[2]+1):\n for m in range(-n, n+1):\n factor = 1j**(n+2*m-1)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n retE += factor*self.anm[0,freq_index,n-1,m+n]*N(n,m,theta,phi,r,k)\n retE += factor*self.anm[1,freq_index,n-1,m+n]*M(n,m,theta,phi,r,k)\n # retE *= k**2\n return retE\n\n def E2(theta):\n Eval = E(theta)\n return np.dot(Eval, np.conj(Eval)).real\n\n theta = np.linspace(0.01,2*np.pi-0.01, thetapts)\n rval = [E2(th) for th in theta]\n \n ax = py.subplot(111, projection='polar')\n py.plot(theta,rval)\n ax.grid(True)\n\n py.title(\"Angular Distribution of $|\\\\mathbf{E}|^2$\", fontsize=18)\n\n def plot_angular_3d(self, freq_index, r, pts=30, axis = True):\n \"\"\"Plot field 3d angular distribution at radius r\"\"\"\n k = 2*np.pi/self.wav[freq_index]\n\n ax = py.subplot2grid((1,1),(0,0), projection='3d')\n if not axis:\n ax.axis('off')\n\n theta = np.linspace(0.01,np.pi-.01,pts)\n phi = np.linspace(0,2*np.pi,pts)\n\n th,ph = np.meshgrid(theta,phi)\n shape = th.shape\n R = np.zeros(shape)\n for i in range(shape[0]):\n for j in range(shape[1]):\n retE = np.zeros(3, dtype=np.complex)\n for n in range(1,self.anm.shape[2]+1):\n for m in range(-n, n+1):\n # if n != 2:\n # continue\n factor = 1j**(n+2*m-1)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n retE += factor*self.anm[0,freq_index,n-1,m+n]*N(n,m,th[i,j],ph[i,j],r,k)\n retE += factor*self.anm[1,freq_index,n-1,m+n]*M(n,m,th[i,j],ph[i,j],r,k)\n\n R[i,j] = np.dot(retE, np.conj(retE)).real\n R /= np.max(R)\n X = R*np.sin(th)*np.cos(ph)\n Y = R*np.sin(th)*np.sin(ph)\n Z = R*np.cos(th)\n colors = np.zeros((shape[0], shape[1], 4))\n for i in range(shape[0]):\n for j in range(shape[1]):\n colors[i,j,:] = cm.rainbow(R[i,j])\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,shade=False, facecolors=colors,linewidth=.1, edgecolors='#000000')\n surf.set_edgecolor('k')\n\n a=1.5\n py.xlim([-a,a])\n py.ylim([-a,a])\n ax.set_zlim(-a,a)\n cset = ax.contourf(X,Y,Z, zdir='x', offset=-a, cmap=cm.coolwarm)\n cset = ax.contourf(X,Y,Z, zdir='y', offset=a, cmap=cm.coolwarm)\n ax.set_xlim3d(-a, a) \n ax.set_ylim3d(-a, a) \n ax.set_zlim3d(-a, a) \n \n def get_E(self, r, theta, phi, freq_index, Nmax = None):\n \"\"\"Determine the E field at postion (r,theta,phi) using up to n=Nmax\"\"\"\n Eval = 0 + 0j\n if Nmax:\n J = Nmax \n else:\n J = self.J\n\n for j in range(J):\n n = j + 1\n for k in range(2*n+1):\n m = -n + k\n Enm = self.Enorm[freq_index]*1j**(n+2*m-1)/(2*np.pi**.5)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n knum = 2*np.pi*index/self.wav[freq_index] \n Eval += knum**2*Enm*self.anm[0,freq_index,j,k]*N(n,m,theta,phi,r,knum)\n Eval += knum**2*Enm*self.anm[1,freq_index,j,k]*M(n,m,theta,phi,r,knum)\n\n return Eval\n\n def plot_anm(self, Nmax = None):\n \"\"\"Plot all momemnts up to n=Nmax, return total sum across these moments\"\"\"\n tot = np.zeros(self.Nfreq)\n if Nmax:\n J = Nmax \n else:\n J = self.J\n \n prev = [0]\n for i in range(self.I):\n for j in range(J):\n y = self.index**3*1000*(2*np.pi/self.wav)**2*(j+1)*(j+2)*np.sum(abs(self.anm[i,:,j,:])**2,axis=1)\n tot += y\n py.plot(self.wav,y, label = labelmap[(i,j)], linewidth=1.5)\n ymax = set_lim(200,1000,self.wav,y,prev=prev)\n prev.append(ymax)\n\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n return tot\n\n #note that r isn't used with analytic integral\n def scattered_flux(self, r, area, Nmax = None):\n \"\"\"Plot all momemnts up to n=Nmax, return total sum across these moments\"\"\"\n flux = np.zeros(self.Nfreq)\n if Nmax:\n J = Nmax \n else:\n J = self.J\n\n prev = [0]\n for j in range(J):\n n = j + 1\n temp_e = np.zeros(self.Nfreq)\n temp_m = np.zeros(self.Nfreq)\n for k in range(2*n+1):\n m = -n + k\n for freq_index in range(self.Nfreq):\n Enm = self.Enorm[freq_index]*1j**(n+2*m-1)/(2*np.pi**.5)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n knum = 2*np.pi*self.index/self.wav[freq_index] \n factor = knum**4*np.abs(Enm)**2*np.abs(self.anm[0,freq_index,j,k])**2\n # integ = compute_norm(n,m,r,knum,'e')*r**2\n integ = n*(n+1)/np.abs(Enm)**2/knum**2*self.Enorm[freq_index]**2\n temp_e[freq_index] += factor*integ\n factor = knum**4*np.abs(Enm)**2*np.abs(self.anm[1,freq_index,j,k])**2\n # integ = compute_norm(n,m,r,knum,'m')*r**2\n integ = n*(n+1)/np.abs(Enm)**2/knum**2*self.Enorm[freq_index]**2\n temp_m[freq_index] += factor*integ\n print(n,m) \n y1 = temp_e/self.norm*area*self.index\n y2 = temp_m/self.norm*area*self.index\n flux += (y1+y2)\n\n py.plot(self.wav,y1, label = labelmap[(0,j)], linewidth=1.5)\n py.plot(self.wav,y2, label = labelmap[(1,j)], linewidth=1.5)\n ymax = set_lim(200,1000,self.wav,y1,prev=prev)\n prev.append(ymax)\n ymax = set_lim(200,1000,self.wav,y2,prev=prev)\n prev.append(ymax)\n\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n\n y = flux\n return y\n\n #area could be made part of the class\n def partial_flux(self, r, area, theta1, theta2, phi1, phi2, Nmax = None):\n \"\"\"Plot total scatering cross-section through a given part of the sphere\"\"\"\n flux = np.zeros(self.Nfreq)\n output = mp.Queue()\n processes = []\n cores = 8\n # for freq_index in range(self.Nfreq):\n # def E_sq(theta,phi):\n # Eval = self.get_E(r,theta,phi,freq_index, Nmax)\n # return np.dot(Eval, np.conj(Eval)).real\n # flux[freq_index] = double_integral(E_sq,theta1,theta2,phi1,phi2)\n # print(freq_index)\n\n for i in range(int(self.Nfreq/cores)):\n for c in range(cores):\n freq_index = i*cores + c\n def E_sq(theta,phi):\n Eval = self.get_E(r,theta,phi,freq_index, Nmax)\n return np.dot(Eval, np.conj(Eval)).real\n p = Process(target=double_integral_parallel, args=(E_sq,theta1,theta2,phi1,phi2,freq_index,output))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n while not output.empty():\n out = output.get()\n index = out[0]\n flux_val = out[1]\n flux[index] = flux_val*r**2\n processes = []\n\n scat = flux/self.norm*self.index*area\n\n ymax = set_lim(200,1000,self.wav,scat)\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n py.plot(self.wav, scat, label=\"Partial Scattering\", linewidth=1.5)\n\n return scat\n\nif __name__ == \"__main__\":\n anm = np.load(\"mode_output.npy\")\n index = 1.33\n direc = \"/home/john/research/mymeep/projects/silver_nanosphere/\"\n output = \"output/\"\n area = 0.03**2\n wavelength = 700\n\n anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n A = modes(anm,index,direc,output)\n freq_index = A.get_freq_index(wavelength)\n\n # py.figure(6)\n # anm = np.load(\"cluster_azimuthal.npy\")\n # output = \"output_azimuthal/\"\n # anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n # A = modes(anm,index,direc,output)\n # A.plot_box_scattering(area, \"Azi\")\n # anm = np.load(\"cluster_radial.npy\")\n # output = \"output_radial/\"\n # anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n # A = modes(anm,index,direc,output)\n # A.plot_box_scattering(area, \"Rad\")\n # anm = np.load(\"cluster_linear.npy\")\n # output = \"output_linear/\"\n # anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n # A = modes(anm,index,direc,output)\n # A.plot_box_scattering(area, \"Lin\")\n # py.legend()\n\n py.figure(2)\n A.plot_angular_3d(freq_index,10000)\n\n py.figure(4)\n tot = A.scattered_flux(5000,area, Nmax=2)\n py.legend()\n py.xlim((200,1000))\n\n # py.figure(6)\n # A.partial_flux(10000,area,0,np.pi,0,2*np.pi)\n # A.plot_box_scattering(area)\n # py.legend()\n # py.xlim((200,1000))\n\n py.figure(5)\n py.plot(A.wav,tot, linewidth=1.5, label=\"Sum of Moments\")\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Total Scattering Cross Section ($\\mu m^2$)\")\n A.plot_box_scattering(area)\n set_lim(200,1000,A.wav,tot)\n py.legend()\n\n\n py.show()\n","sub_path":"post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":12833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"589022354","text":"from django.urls import path\nfrom . import views\nfrom halls import views as hall_views\nurlpatterns = [\n path('edit/', views.edit, name='review-edit'),\n path('write/', views.write, name='review-write'),\n path('delete/', views.delete, name='review-delete'),\n path('edit//photos', views.review_photos, name='review-photos'),\n path('report/', views.report, name='report'),\n]\n","sub_path":"honesthalls/reviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"310116743","text":"# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass Job(Mo):\n \"\"\"\n The configuration job.\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.config.Job\")\n\n meta.moClassName = \"configJob\"\n meta.rnFormat = \"run-%(name)s\"\n meta.category = MoCategory.REGULAR\n meta.label = \"Job Instance\"\n meta.writeAccessMask = 0x1\n meta.readAccessMask = 0x3\n meta.isDomainable = False\n meta.isReadOnly = False\n meta.isConfigurable = True\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.childClasses.add(\"cobra.model.tag.Tag\")\n meta.childClasses.add(\"cobra.model.fault.Counts\")\n meta.childClasses.add(\"cobra.model.config.ImportP\")\n meta.childClasses.add(\"cobra.model.config.RollbackP\")\n meta.childClasses.add(\"cobra.model.config.ExportP\")\n meta.childClasses.add(\"cobra.model.aaa.RbacAnnotation\")\n meta.childClasses.add(\"cobra.model.fault.Inst\")\n meta.childClasses.add(\"cobra.model.health.Inst\")\n meta.childClasses.add(\"cobra.model.aaa.DomainRolesTuple\")\n meta.childClasses.add(\"cobra.model.config.SubJob\")\n meta.childClasses.add(\"cobra.model.fault.Delegate\")\n meta.childClasses.add(\"cobra.model.tag.Annotation\")\n\n meta.childNamesAndRnPrefix.append((\"cobra.model.tag.Annotation\", \"annotationKey-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.aaa.DomainRolesTuple\", \"domainroles\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.ImportP\", \"configimp-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.ExportP\", \"configexp-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.aaa.RbacAnnotation\", \"rbacDom-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.tag.Tag\", \"tagKey-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Counts\", \"fltCnts\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.RollbackP\", \"snprlb-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Inst\", \"fault-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.health.Inst\", \"health\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.SubJob\", \"job-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Delegate\", \"fd-\"))\n\n meta.parentClasses.add(\"cobra.model.config.JobCont\")\n\n meta.superClasses.add(\"cobra.model.naming.NamedObject\")\n meta.superClasses.add(\"cobra.model.pol.Obj\")\n meta.superClasses.add(\"cobra.model.pol.Comp\")\n\n meta.rnPrefixes = [\n ('run-', True),\n ]\n\n prop = PropMeta(\"str\", \"ack\", \"ack\", 16588, PropCategory.REGULAR)\n prop.label = \"Triggers job deletion\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"ack\", prop)\n\n prop = PropMeta(\"str\", \"annotation\", \"annotation\", 37299, PropCategory.REGULAR)\n prop.label = \"Annotation. Suggested format orchestrator:value\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 128)]\n prop.regex = ['[a-zA-Z0-9_.:-]+']\n meta.props.add(\"annotation\", prop)\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"decryptErrors\", \"decryptErrors\", 21514, PropCategory.REGULAR)\n prop.label = \"Secure field decryption errors\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"decryptErrors\", prop)\n\n prop = PropMeta(\"str\", \"descr\", \"descr\", 5582, PropCategory.REGULAR)\n prop.label = \"Description\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 128)]\n prop.regex = ['[a-zA-Z0-9\\\\!#$%()*,-./:;@ _{|}~?&+]+']\n meta.props.add(\"descr\", prop)\n\n prop = PropMeta(\"str\", \"details\", \"details\", 444, PropCategory.REGULAR)\n prop.label = \"Job Details\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"details\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"executeTime\", \"executeTime\", 20143, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"executeTime\", prop)\n\n prop = PropMeta(\"str\", \"extMngdBy\", \"extMngdBy\", 39438, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"undefined\"\n prop._addConstant(\"msc\", \"msc\", 1)\n prop._addConstant(\"undefined\", \"undefined\", 0)\n meta.props.add(\"extMngdBy\", prop)\n\n prop = PropMeta(\"str\", \"fileName\", \"fileName\", 16238, PropCategory.REGULAR)\n prop.label = \"Export Config File name\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"fileName\", prop)\n\n prop = PropMeta(\"str\", \"lastStepDescr\", \"lastStepDescr\", 20424, PropCategory.REGULAR)\n prop.label = \"Last executed workflow step time\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"lastStepDescr\", prop)\n\n prop = PropMeta(\"str\", \"lastStepIndex\", \"lastStepIndex\", 20425, PropCategory.REGULAR)\n prop.label = \"Last executed workflow step index\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"lastStepIndex\", prop)\n\n prop = PropMeta(\"str\", \"lastStepTime\", \"lastStepTime\", 23299, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"lastStepTime\", prop)\n\n prop = PropMeta(\"str\", \"lcOwn\", \"lcOwn\", 9, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"local\"\n prop._addConstant(\"implicit\", \"implicit\", 4)\n prop._addConstant(\"local\", \"local\", 0)\n prop._addConstant(\"policy\", \"policy\", 1)\n prop._addConstant(\"replica\", \"replica\", 2)\n prop._addConstant(\"resolveOnBehalf\", \"resolvedonbehalf\", 3)\n meta.props.add(\"lcOwn\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"monPolDn\", \"monPolDn\", 13803, PropCategory.REGULAR)\n prop.label = \"Monitoring policy attached to this observable object\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"monPolDn\", prop)\n\n prop = PropMeta(\"str\", \"name\", \"name\", 5994, PropCategory.REGULAR)\n prop.label = \"Name\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n prop.range = [(1, 64)]\n prop.regex = ['[a-zA-Z0-9_.:-]+']\n meta.props.add(\"name\", prop)\n\n prop = PropMeta(\"str\", \"nameAlias\", \"nameAlias\", 28417, PropCategory.REGULAR)\n prop.label = \"Name alias\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 63)]\n prop.regex = ['[a-zA-Z0-9_.-]+']\n meta.props.add(\"nameAlias\", prop)\n\n prop = PropMeta(\"str\", \"operSt\", \"operSt\", 445, PropCategory.REGULAR)\n prop.label = \"Operational State\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"pending\"\n prop._addConstant(\"fail-no-data\", \"fail-no-data\", 7)\n prop._addConstant(\"fail-no-retry\", \"fail-no-retry\", 6)\n prop._addConstant(\"failed\", \"failed\", 5)\n prop._addConstant(\"pending\", \"pending\", 0)\n prop._addConstant(\"retry\", \"retry\", 8)\n prop._addConstant(\"running\", \"running\", 1)\n prop._addConstant(\"success\", \"success\", 2)\n prop._addConstant(\"success-with-warnings\", \"success-with-warnings\", 3)\n prop._addConstant(\"timeout\", \"timeout\", 4)\n meta.props.add(\"operSt\", prop)\n\n prop = PropMeta(\"str\", \"retriesUsed\", \"retriesUsed\", 446, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"retriesUsed\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n prop = PropMeta(\"str\", \"totalStepCount\", \"totalStepCount\", 20426, PropCategory.REGULAR)\n prop.label = \"Number of total workflow steps in the job\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"totalStepCount\", prop)\n\n prop = PropMeta(\"str\", \"type\", \"type\", 20423, PropCategory.REGULAR)\n prop.label = \"Job type\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 1\n prop.defaultValueStr = \"export\"\n prop._addConstant(\"export\", \"export\", 1)\n prop._addConstant(\"import\", \"import\", 0)\n prop._addConstant(\"import-ids\", \"import-ids\", 5)\n prop._addConstant(\"rollback\", \"rollback\", 2)\n prop._addConstant(\"snapshot-download\", \"snapshot-download\", 4)\n prop._addConstant(\"snapshot-upload\", \"snapshot-upload\", 3)\n meta.props.add(\"type\", prop)\n\n prop = PropMeta(\"str\", \"uid\", \"uid\", 8, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"uid\", prop)\n\n prop = PropMeta(\"str\", \"userName\", \"userName\", 20422, PropCategory.REGULAR)\n prop.label = \"User who triggered the job\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"userName\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"name\"))\n\n def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):\n namingVals = [name]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","sub_path":"venv/Lib/site-packages/cobra/modelimpl/config/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":11299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"332400208","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 4 15:27:53 2019\r\n\r\n@author: HOME\r\n\"\"\"\r\n\r\na = \"0.255.56.1\"\r\n\r\ndef is_valid_IP(strng):\r\n split_string = strng.split('.')\r\n string_list = []\r\n if len(split_string) > 4 or len(split_string) < 4:\r\n return False\r\n else:\r\n for i in range(len(split_string)):\r\n temp = split_string[i]\r\n if len(temp) > 1 and temp[0] == '0':\r\n return False\r\n elif temp.isdigit():\r\n string_list.append(int(temp))\r\n else:\r\n return False\r\n for j in range(len(string_list)):\r\n if string_list[j] > 255 or string_list[j] < 0:\r\n return False\r\n return True\r\n \r\nis_valid_IP(a)\r\n\r\n'''\r\ndef is_valid_IP(strng):\r\n lst = strng.split('.')\r\n passed = 0\r\n for sect in lst:\r\n if sect.isdigit():\r\n if sect[0] != '0':\r\n if 0 < int(sect) <= 255:\r\n passed += 1\r\n return passed == 4\r\n'''","sub_path":"CodeWars/ip_validation.py","file_name":"ip_validation.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"100935405","text":"class Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # Max money can get from nums[:i-1] houses.\n dp = [0] * (len(nums) + 2)\n for i in range(2, len(nums)+2):\n dp[i] = max(nums[i-2] + dp[i-2], dp[i-1])\n return dp[-1]\n\n","sub_path":"python2/l0198_house_robber.py","file_name":"l0198_house_robber.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"176975178","text":"###################################################################################\r\n# Author: Ricardo Pereira\r\n# Date: 29-03-2021\r\n# Last Modified data: 01-04-2021\r\n# Abstract: SiamRPN: training data preparation\r\n# Adapted from arbitularov (https://github.com/arbitularov/SiamRPN-PyTorch)\r\n###################################################################################\r\n\r\nimport os\r\nimport sys\r\nimport cv2\r\nimport time\r\nimport random\r\nimport numpy as np\r\n\r\n\r\nimport torch\r\nimport torch.nn\r\nfrom torch.utils.data import Dataset\r\nfrom torchvision import datasets, transforms, utils\r\n\r\nfrom PIL import Image, ImageOps, ImageStat, ImageDraw\r\nfrom config import config\r\n\r\n\r\nclass Anchor_Boxes(object):\r\n\tdef __init__(self):\r\n\t\tself.scales \t\t= config.anchor_scales\t\t# [8,]\r\n\t\tself.ratios \t\t= config.anchor_ratios\t\t# [0.33, 0.5, 1, 2, 3]\r\n\t\tself.anchor_num\t\t= config.anchor_num\t\t\t# 5\r\n\t\tself.base_size\t\t= config.anchor_base_size\t# 8\r\n\t\tself.score_size\t\t= config.score_size\t\t\t# 17\r\n\t\tself.total_stride\t= config.total_stride\t\t# 12\r\n\t\tself.anchors \t\t= self.generate_anchors()\r\n\r\n\tdef generate_anchors(self):\r\n\t\tanchor = np.zeros((self.anchor_num, 4), dtype = np.float32) # shape = (5,4)\r\n\t\tsize \t= self.base_size * self.base_size \t\t\t\t\t # size = 64\r\n\t\tcount \t= 0\r\n\r\n\t\tfor ratio in self.ratios:\r\n\t\t\tws = int(np.sqrt(size / ratio)) # 13, 11, 8, 5, 4\r\n\t\t\ths = int(ws * ratio)\t\t\t# 4, 5, 8, 10, 12\r\n\t\t\tfor scale in self.scales:\r\n\t\t\t\twws = ws * scale \t\t\t# 104, 88, 64, 40, 32\r\n\t\t\t\thhs = hs * scale \t\t\t# 32, 40, 64, 80, 96\r\n\t\t\t\tanchor[count, 0] = 0\r\n\t\t\t\tanchor[count, 1] = 0\r\n\t\t\t\tanchor[count, 2] = wws\r\n\t\t\t\tanchor[count, 3] = hhs\r\n\t\t\t\tcount += 1\r\n\t\t\r\n\t\tanchor = np.tile(anchor, self.score_size * self.score_size).reshape((-1,4)) # (1445, 4)\r\n\t\tori \t= 25\r\n\t\txx, yy = np.meshgrid([ori + self.total_stride * dx for dx in range(self.score_size)], # (17,17)\r\n\t\t\t\t\t\t\t [ori + self.total_stride * dy for dy in range(self.score_size)]) # (17,17)\r\n\t\txx, yy = np.tile(xx.flatten(), (self.anchor_num, 1)).flatten(), \\\r\n\t\t\t\t np.tile(yy.flatten(), (self.anchor_num, 1)).flatten()\r\n\t\tanchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32) # (1445, 4)\r\n\r\n\t\treturn anchor\r\n\r\n\r\n\tdef pos_neg_anchors(self, bbox):\r\n\t\tnorm_anchors = self.anchors_normalization(self.anchors, bbox)\r\n\t\tiou \t\t = self.compute_IoU(self.anchors, bbox).flatten()\r\n\r\n\t\tpos_index = np.where(iou >= config.pos_threshold)[0][:config.num_max_pos]\r\n\t\tneg_index = np.random.choice(np.where(iou < config.neg_threshold)[0], config.num_max_neg, replace = False)\r\n\r\n\t\tlabel \t = np.ones_like(iou) * (- 1)\r\n\t\tlabel[pos_index] = 1\r\n\t\tlabel[neg_index] = 0\r\n\r\n\t\treturn norm_anchors, label\r\n\r\n\r\n\t# Paper's Eq. 3\r\n\tdef anchors_normalization(self, anchors, gt_bbox):\r\n\t\tnorm_anchors = np.zeros_like(anchors, dtype = np.float32)\r\n\t\tnorm_anchors[:,0] = (gt_bbox[0] - anchors[:,0]) / (anchors[:,2] + 1e-6)\r\n\t\tnorm_anchors[:,1] = (gt_bbox[1] - anchors[:,1]) / (anchors[:,3] + 1e-6)\r\n\t\tnorm_anchors[:,2] = np.log((gt_bbox[2] + 1e-6) / (anchors[:,2] + 1e-6))\r\n\t\tnorm_anchors[:,3] = np.log((gt_bbox[3] + 1e-6) / (anchors[:,3] + 1e-6))\r\n\r\n\t\treturn norm_anchors\r\n\r\n\r\n\r\n\r\n\tdef compute_IoU(self, anchors, bbox):\r\n\t\tif np.array(bbox).ndim == 1:\r\n\t\t\tbbox = np.array(bbox)[None, :] # shape = (1, 4)\r\n\t\telse:\r\n\t\t\tbbox = np.array(bbox)\r\n\t\tgt_bbox = np.tile(bbox.reshape(1,-1), (anchors.shape[0], 1))\t# shape = (1445, 4)\r\n\r\n\t\t# Transform cx, cy, w, h => (x1,y1) (x2,y2)\r\n\t\tanchor_x1 = anchors[:, 0] - anchors[:, 2] / 2 + 0.5\r\n\t\tanchor_y1 = anchors[:, 1] - anchors[:, 3] / 2 + 0.5\r\n\t\tanchor_x2 = anchors[:, 0] + anchors[:, 2] / 2 - 0.5\r\n\t\tanchor_y2 = anchors[:, 1] + anchors[:, 3] / 2 - 0.5\r\n\r\n\t\tgt_x1 = gt_bbox[:, 0] - gt_bbox[:, 2] / 2 + 0.5\r\n\t\tgt_y1 = gt_bbox[:, 1] - gt_bbox[:, 3] / 2 + 0.5\r\n\t\tgt_x2 = gt_bbox[:, 0] + gt_bbox[:, 2] / 2 - 0.5\r\n\t\tgt_y2 = gt_bbox[:, 1] + gt_bbox[:, 3] / 2 - 0.5\r\n\r\n\t\t# Edges values\r\n\t\txmax = np.max([anchor_x1, gt_x1], axis=0)\r\n\t\tymax = np.max([anchor_y1, gt_y1], axis=0)\r\n\t\txmin = np.min([anchor_x2, gt_x2], axis=0)\r\n\t\tymin = np.min([anchor_y2, gt_y2], axis=0)\r\n\r\n\t\t# Intersection\r\n\t\tinter_area = np.max([xmin - xmax, np.zeros(xmax.shape)], axis=0) * \\\r\n\t\t\t\t\t np.max([ymin - ymax, np.zeros(ymax.shape)], axis=0)\r\n\r\n\t\t# Area of prediction and ground-truth\r\n\t\tarea_anchor = (anchor_x2 - anchor_x1) * (anchor_y2 - anchor_y1)\r\n\t\tarea_gt \t= (gt_x2 - gt_x1) * (gt_y2 - gt_y1)\r\n\r\n\t\t# Intersection over union\r\n\t\tiou = inter_area / (area_anchor + area_gt - inter_area + 1e-6)\r\n\r\n\t\treturn iou\r\n\r\n\r\n\r\n\r\n# Computes and stores the average and current value\r\nclass AverageMeter(object):\r\n\tdef __init__(self):\r\n\t\tself.reset()\r\n\r\n\tdef reset(self):\r\n\t\tself.val \t= 0\r\n\t\tself.avg \t= 0\r\n\t\tself.sum \t= 0\r\n\t\tself.count \t= 0\r\n\r\n\tdef update(self, val, n = 1):\r\n\t\tself.val \t= val\r\n\t\tself.sum \t+= val * n\r\n\t\tself.count \t+= n\r\n\t\tself.avg \t= self.sum / self.count\r\n\r\n\r\n\r\nclass TrainDataLoader(Dataset):\r\n\tdef __init__(self, data_path, check = False):\r\n\t\tself.max_inter \t\t= config.max_inter\r\n\t\tself.data_path \t\t= data_path\r\n\t\tself.ret \t \t\t= {}\r\n\t\tself.count \t\t\t= 0\r\n\t\tself.tmp_dir\t\t= 'tmp/visualization'\r\n\t\tself.check\t\t\t= check\r\n\t\tself.gen_anchors \t= Anchor_Boxes()\r\n\t\tself.anchors \t\t= self.gen_anchors.anchors\r\n\t\tself.ret['anchors'] = self.anchors\r\n\t\tself.sub_class_dir \t= [sub_class_dir for sub_class_dir in os.listdir(self.data_path) if os.path.isdir(os.path.join(self.data_path, sub_class_dir))]\r\n\r\n\t\tif not os.path.isdir(self.tmp_dir):\r\n\t\t\tos.makedirs(self.tmp_dir)\r\n\r\n\r\n\t# Function to pick template and detection images as well their GT\r\n\tdef VOT_pick_img_pairs(self, index_of_subclass):\r\n\t\tassert index_of_subclass < len(self.sub_class_dir), 'index_of_subclass should less than total classes'\r\n\t\t\r\n\t\t# ------------- Images Path ------------- #\r\n\t\tsub_class_dir_basename \t= self.sub_class_dir[index_of_subclass] # Gymnastics\r\n\t\tsub_class_dir_path \t\t= os.path.join(self.data_path, sub_class_dir_basename) # ..\\Gymnastics\r\n\t\tsub_class_img_name \t\t= [img_name for img_name in os.listdir(sub_class_dir_path) if not img_name.find('.jpg') == -1]\r\n\t\tsub_class_img_name \t\t= sorted(sub_class_img_name) # 000001.jpg ...\r\n\t\tsub_class_img_num \t\t= len(sub_class_img_name) # 207\r\n\t\tsub_class_gt_name \t\t= 'groundtruth.txt'\r\n\r\n\t\tstatus = True\r\n\t\twhile status:\r\n\t\t\tif self.max_inter >= sub_class_img_num-1:\r\n\t\t\t\tself.max_inter = sub_class_img_num//2\r\n\r\n\t\t\t#template_index = np.clip(random.choice(range(0, max(1, sub_class_img_num - self.max_inter))), 0, sub_class_img_num-1)\r\n\t\t\t#detection_index= np.clip(random.choice(range(1, max(2, self.max_inter))) + template_index, 0, sub_class_img_num-1)\r\n\t\t\ttemplate_index = 50\r\n\t\t\tdetection_index = 113\r\n\r\n\t\t\ttemplate_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[template_index])\r\n\t\t\tdetection_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[detection_index])\r\n\t\t\tgt_path\t\t\t\t= os.path.join(sub_class_dir_path, sub_class_gt_name)\r\n\r\n\t\t# ------------- Labels ------------- #\r\n\t\t\twith open(gt_path, 'r') as f:\r\n\t\t\t\tgt_lines \t= f.readlines()\r\n\t\t\ttemplate_gt \t= [abs(int(float(i))) for i in gt_lines[template_index].strip('\\n').split(',')[:4]]\r\n\t\t\tdetection_gt \t= [abs(int(float(i))) for i in gt_lines[detection_index].strip('\\n').split(',')[:4]]\r\n\r\n\t\t\tif template_gt[2]*template_gt[3]*detection_gt[2]*detection_gt[3] != 0:\r\n\t\t\t\tstatus = False\r\n\t\t\telse:\r\n\t\t\t\tprint('Warning: encounter object missing, reinitializing...')\r\n\r\n\t\t# ------------- Save Template and Detection info ------------- #\r\n\t\tself.ret['template_img_idx'] \t\t= template_index\r\n\t\tself.ret['detection_img_idx']\t\t= detection_index\r\n\t\tself.ret['template_img_path']\t\t= template_img_path\r\n\t\tself.ret['detection_img_path']\t\t= detection_img_path\r\n\t\tself.ret['template_target_x1y1wh'] \t= template_gt\r\n\t\tself.ret['detection_target_x1y1wh']\t= detection_gt\r\n\t\ttemplate_x1y1wh, detection_x1y1wh \t= template_gt.copy(), detection_gt.copy()\r\n\t\tself.ret['template_target_xywh']\t= np.array([template_x1y1wh[0]+template_x1y1wh[2]//2, template_x1y1wh[1]+template_x1y1wh[3]//2, template_x1y1wh[2], template_x1y1wh[3]], np.float32)\r\n\t\tself.ret['detection_target_xywh']\t= np.array([detection_x1y1wh[0]+detection_x1y1wh[2]//2, detection_x1y1wh[1]+detection_x1y1wh[3]//2, detection_x1y1wh[2], detection_x1y1wh[3]], np.float32)\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '0_check_template_detection_bb')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\t\t\ttemplate_img \t= Image.open(self.ret['template_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['template_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(template_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_template_img.jpg'.format(self.count))\r\n\t\t\ttemplate_img.save(save_path)\r\n\r\n\t\t\tdetection_img \t= Image.open(self.ret['detection_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['detection_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(detection_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_detection_img.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\t\r\n\tdef VOT_sub_class_img_pairs(self, img_index):\r\n\t\t# ------------- Images Path ------------- #\r\n\t\tsub_class_dir_basename \t= 'Gymnastics' # Gymnastics\r\n\t\tsub_class_dir_path \t\t= os.path.join(self.data_path, sub_class_dir_basename) # ..\\Gymnastics\r\n\t\tsub_class_img_name \t\t= [img_name for img_name in os.listdir(sub_class_dir_path) if not img_name.find('.jpg') == -1]\r\n\t\tsub_class_img_name \t\t= sorted(sub_class_img_name) # 000001.jpg ...\r\n\t\tsub_class_img_num \t\t= len(sub_class_img_name) # 207\r\n\t\tsub_class_gt_name \t\t= 'groundtruth.txt'\r\n\r\n\t\tstatus = True\r\n\t\twhile status:\r\n\r\n\t\t\ttemplate_index = 0\r\n\t\t\tdetection_index = img_index\r\n\r\n\t\t\ttemplate_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[template_index])\r\n\t\t\tdetection_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[detection_index])\r\n\t\t\tgt_path\t\t\t\t= os.path.join(sub_class_dir_path, sub_class_gt_name)\r\n\r\n\t\t# ------------- Labels ------------- #\r\n\t\t\twith open(gt_path, 'r') as f:\r\n\t\t\t\tgt_lines \t= f.readlines()\r\n\t\t\ttemplate_gt \t= [abs(int(float(i))) for i in gt_lines[template_index].strip('\\n').split(',')[:4]]\r\n\t\t\tdetection_gt \t= [abs(int(float(i))) for i in gt_lines[detection_index].strip('\\n').split(',')[:4]]\r\n\r\n\t\t\tif template_gt[2]*template_gt[3]*detection_gt[2]*detection_gt[3] != 0:\r\n\t\t\t\tstatus = False\r\n\t\t\telse:\r\n\t\t\t\tprint('Warning: encounter object missing, reinitializing...')\r\n\r\n\t\t# ------------- Save Template and Detection info ------------- #\r\n\t\tself.ret['template_img_idx'] \t\t= template_index\r\n\t\tself.ret['detection_img_idx']\t\t= detection_index\r\n\t\tself.ret['template_img_path']\t\t= template_img_path\r\n\t\tself.ret['detection_img_path']\t\t= detection_img_path\r\n\t\tself.ret['template_target_x1y1wh'] \t= template_gt\r\n\t\tself.ret['detection_target_x1y1wh']\t= detection_gt\r\n\t\ttemplate_x1y1wh, detection_x1y1wh \t= template_gt.copy(), detection_gt.copy()\r\n\t\tself.ret['template_target_xywh']\t= np.array([template_x1y1wh[0]+template_x1y1wh[2]//2, template_x1y1wh[1]+template_x1y1wh[3]//2, template_x1y1wh[2], template_x1y1wh[3]], np.float32)\r\n\t\tself.ret['detection_target_xywh']\t= np.array([detection_x1y1wh[0]+detection_x1y1wh[2]//2, detection_x1y1wh[1]+detection_x1y1wh[3]//2, detection_x1y1wh[2], detection_x1y1wh[3]], np.float32)\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '0_check_template_detection_bb')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\t\t\ttemplate_img \t= Image.open(self.ret['template_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['template_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(template_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_template_img.jpg'.format(self.count))\r\n\t\t\ttemplate_img.save(save_path)\r\n\r\n\t\t\tdetection_img \t= Image.open(self.ret['detection_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['detection_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(detection_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_detection_img.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\r\n\t# Function to pre-process template and detection images\r\n\tdef imgs_pre_processing (self):\r\n\r\n\t\tdef window_size(bbox, size_z, size_x, context_amount):\r\n\t\t\tcx, cy, w, h = bbox\r\n\r\n\t\t\t# Paper's Eqs. 12 and 15\r\n\t\t\twc_xz \t= w + context_amount * (w + h) \t# w + p, where p = (w+h)/2\r\n\t\t\thc_xz \t= h + context_amount * (w + h)\t# h + p, where p = (w+h)/2\r\n\t\t\ts_z \t= int(np.sqrt(wc_xz * hc_xz))\t# s_z = A\r\n\t\t\tscale_z\t= size_z / s_z\r\n\t\t\t#s_x \t= s_z * size_x / size_z # -> approx 2*A \r\n\t\t\ts_x \t= s_z * 2\t# 2*A\r\n\r\n\t\t\treturn s_z, s_x, scale_z\r\n\r\n\t\t# ------------- Template ------------- #\r\n\t\ttemplate_img \t\t= Image.open(self.ret['template_img_path'])\r\n\t\ttemplate_img \t\t= np.array(template_img)\r\n\t\ttemplate_img_mean \t= np.mean(template_img, axis=(0, 1))\r\n\r\n\t\ts_z, s_x, scale = window_size(self.ret['template_target_xywh'],\r\n\t\t\tconfig.template_img_size, config.detection_img_size, config.context)\r\n\r\n\t\ttemplate_crop_img, scale_z = self.crop_and_pad(template_img, self.ret['template_target_xywh'],\r\n\t\t\tconfig.template_img_size, s_z, 'Template', template_img_mean)\r\n\r\n\t\tself.ret['template_crop_img'] = template_crop_img\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '1_check_template_detection_bb_in padding')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\t\t\ttemplate_img = Image.fromarray(self.ret['template_crop_img'].copy(),'RGB')\r\n\t\t\tsave_path \t = os.path.join(check_dir_path, 'idx_{:04d}_template_cropped_resized.jpg'.format(self.count))\r\n\t\t\ttemplate_img.save(save_path)\r\n\r\n\t\t# ------------- Detection ------------- #\r\n\t\tdetection_img \t \t= Image.open(self.ret['detection_img_path'])\r\n\t\tdetection_img \t\t= np.array(detection_img)\r\n\t\tdetection_img_mean\t= np.mean(detection_img, axis=(0, 1))\r\n\t\tcx, cy, w, h \t\t= self.ret['detection_target_xywh']\r\n\r\n\t\tdetection_crop_img, scale_x = self.crop_and_pad(detection_img, self.ret['detection_target_xywh'],\r\n\t\t\tconfig.detection_img_size, s_x, 'Detection', detection_img_mean)\r\n\r\n\t\tsize_x \t= config.detection_img_size\r\n\t\tw_x \t= w * scale_x\r\n\t\th_x \t= h * scale_x\r\n\r\n\t\tx1, y1 \t= int(round((size_x + 1) / 2 - w_x / 2)), int(round((size_x + 1) / 2 - h_x / 2))\r\n\t\tx2, y2 \t= int(round((size_x + 1) / 2 + w_x / 2)), int(round((size_x + 1) / 2 + h_x / 2))\r\n\t\tcx \t\t= int(round(x1 + w_x / 2))\r\n\t\tcy \t\t= int(round(y1 + h_x / 2))\r\n\r\n\r\n\t\tself.ret['detection_crop_img'] \t\t\t= detection_crop_img\r\n\t\tself.ret['detection_crop_resized_xywh']\t= np.array((cx, cy, w_x, h_x), dtype = np.int16)\r\n\r\n\r\n\t\tif self.check:\r\n\t\t\tdetection_img = Image.fromarray(self.ret['detection_crop_img'].copy(),'RGB')\r\n\t\t\tsave_path \t = os.path.join(check_dir_path, 'idx_{:04d}_detection_padding_resized.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\t\t\tx, y, w, h \t= self.ret['detection_crop_resized_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(detection_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_detection_padding_resized_bb.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\r\n\tdef crop_and_pad(self, img, bbox, model_sz, original_sz, img_type, img_mean = None):\r\n\r\n\t\tdef round_up(value):\r\n\t\t\treturn round(value + 1e-6 + 1000) - 1000\r\n\r\n\t\tcx, cy, w, h \t= bbox\r\n\t\timg_h, img_w, k = img.shape\r\n\t\t\r\n\t\txmin = cx - (original_sz - 1) / 2\r\n\t\txmax = xmin + original_sz - 1\r\n\t\tymin = cy - (original_sz - 1) / 2\r\n\t\tymax = ymin + original_sz - 1\r\n\r\n\t\tleft \t= int(round_up(max(0., -xmin)))\r\n\t\ttop \t= int(round_up(max(0., -ymin)))\r\n\t\tright \t= int(round_up(max(0., xmax - img_w + 1)))\r\n\t\tbottom \t= int(round_up(max(0., ymax - img_h + 1)))\r\n\r\n\t\txmin = int(round_up(xmin + left))\r\n\t\txmax = int(round_up(xmax + left))\r\n\t\tymin = int(round_up(ymin + top))\r\n\t\tymax = int(round_up(ymax + top))\r\n\r\n\t\tif any([top, bottom, left, right]):\r\n\t\t\tret_img = np.zeros((img_h + top + bottom, img_w + left + right, k), np.uint8)\r\n\t\t\tret_img[top:top + img_h, left:left + img_w, :] = img\r\n\t\t\tif top: \r\n\t\t\t\tret_img[0:top, left:left + img_w, :] = img_mean\r\n\t\t\tif bottom:\r\n\t\t\t\tret_img[img_h + top:, left:left + img_w, :] = img_mean\r\n\t\t\tif left:\r\n\t\t\t\tret_img[:, 0:left, :] = img_mean\r\n\t\t\tif right:\r\n\t\t\t\tret_img[:, img_w + left:, :] = img_mean\r\n\t\t\timg_patch_original = ret_img[int(ymin):int(ymax + 1), int(xmin):int(xmax + 1), :]\r\n\t\telse:\r\n\t\t\timg_patch_original = img[int(ymin):int(ymax + 1), int(xmin):int(xmax + 1), :]\r\n\r\n\t\tif not np.array_equal(model_sz, original_sz):\r\n\t\t\timg_patch = cv2.resize(img_patch_original, (model_sz, model_sz))\r\n\t\telse:\r\n\t\t\timg_patch = img_patch_original\r\n\r\n\t\tscale = model_sz / img_patch_original.shape[0]\r\n\r\n\t\treturn img_patch, scale\r\n\r\n\r\n\tdef pick_pos_neg_anchors(self):\r\n\t\tnorm_anchors, pos_neg_anchors = self.gen_anchors.pos_neg_anchors(self.ret['detection_crop_resized_xywh'])\r\n\r\n\t\tself.ret['norm_anchors'] \t= norm_anchors\r\n\t\tself.ret['pos_neg_anchors']\t= pos_neg_anchors\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '2_check_anchor_boxes')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\r\n\t\t\tdetection_img = Image.fromarray(self.ret['detection_crop_img'].copy(),'RGB')\r\n\t\t\tdetection_img_all_anchors = detection_img.copy()\r\n\t\t\tdraw \t\t = ImageDraw.Draw(detection_img_all_anchors)\r\n\t\t\tx, y, w, h \t = self.ret['detection_crop_resized_xywh'].copy()\r\n\r\n\t\t\t# ------------- Draw all generated Anchor Boxes ------------- #\r\n\t\t\t# Transform anchors cx, cy, w, h => (x1,y1) (x2,y2)\r\n\t\t\tanchor_x1 = self.anchors[:, 0] - self.anchors[:, 2] / 2 + 0.5\r\n\t\t\tanchor_y1 = self.anchors[:, 1] - self.anchors[:, 3] / 2 + 0.5\r\n\t\t\tanchor_x2 = self.anchors[:, 0] + self.anchors[:, 2] / 2 - 0.5\r\n\t\t\tanchor_y2 = self.anchors[:, 1] + self.anchors[:, 3] / 2 - 0.5\r\n\r\n\t\t\tfor idx in range(self.anchors.shape[0]):\r\n\t\t\t\tan_x1, an_y1, an_x2, an_y2 = anchor_x1[idx], anchor_y1[idx], anchor_x2[idx], anchor_y2[idx]\r\n\t\t\t\tdraw.line([(an_x1, an_y1), (an_x2, an_y1), (an_x2, an_y2), (an_x1, an_y2), (an_x1, an_y1)], width=1, fill='blue')\r\n\r\n\t\t\tsave_path = os.path.join(check_dir_path, 'idx_{:04d}_detection_all_anchor_boxes.jpg'.format(self.count))\r\n\t\t\tdetection_img_all_anchors.save(save_path)\r\n\r\n\t\t\t# ------------- Draw positive and negative Anchor Boxes ------------- #\r\n\t\t\tdetection_img_pos_neg_anchors = detection_img.copy()\r\n\t\t\tdraw = ImageDraw.Draw(detection_img_pos_neg_anchors)\r\n\r\n\t\t\tanchor_labels = self.ret['pos_neg_anchors']\r\n\t\t\tpos_index = np.where(anchor_labels == 1)[0]\r\n\t\t\tneg_index = np.where(anchor_labels == 0)[0]\r\n\r\n\t\t\tfor idx, pos_idx in enumerate(pos_index):\r\n\t\t\t\tan_x1, an_y1, an_x2, an_y2 = anchor_x1[pos_idx], anchor_y1[pos_idx], anchor_x2[pos_idx], anchor_y2[pos_idx]\r\n\t\t\t\tdraw.line([(an_x1, an_y1), (an_x2, an_y1), (an_x2, an_y2), (an_x1, an_y2), (an_x1, an_y1)], width=1, fill='green')\r\n\t\t\tsave_path = os.path.join(check_dir_path, 'idx_{:04d}_detection_pos_anchor_boxes.jpg'.format(self.count))\r\n\t\t\tdetection_img_pos_neg_anchors.save(save_path)\r\n\r\n\t\t\tfor idx, neg_idx in enumerate(neg_index):\r\n\t\t\t\tan_x1, an_y1, an_x2, an_y2 = anchor_x1[neg_idx], anchor_y1[neg_idx], anchor_x2[neg_idx], anchor_y2[neg_idx]\r\n\t\t\t\tdraw.line([(an_x1, an_y1), (an_x2, an_y1), (an_x2, an_y2), (an_x1, an_y2), (an_x1, an_y1)], width=1, fill='red')\r\n\t\t\tsave_path = os.path.join(check_dir_path, 'idx_{:04d}_detection_pos_neg_anchor_boxes.jpg'.format(self.count))\r\n\t\t\tdetection_img_pos_neg_anchors.save(save_path)\r\n\r\n\r\n\tdef transform(self):\r\n\t\ttransform \t\t = transforms.Compose([transforms.ToTensor()])\r\n\r\n\t\ttemplate_tensor = transform(self.ret['template_crop_img'].copy())\r\n\t\tdetection_tensor = transform(self.ret['detection_crop_img'].copy())\r\n\r\n\t\tself.ret['template_tensor'] \t\t= template_tensor #shape = [1, 3, 127, 127]\r\n\t\tself.ret['detection_tensor'] \t\t= detection_tensor\r\n\t\tself.ret['norm_anchors_tensor']\t \t= torch.Tensor(self.ret['norm_anchors'])\r\n\t\tself.ret['pos_neg_anchors_tensor']\t= torch.Tensor(self.ret['pos_neg_anchors'])\r\n\r\n\r\n\tdef __len__(self):\r\n\t\treturn(207)\r\n\t\t#return len(self.sub_class_dir)\r\n\r\n\tdef __getitem__(self, index):\r\n\t\tself.VOT_sub_class_img_pairs(index)\r\n\t\tself.imgs_pre_processing()\r\n\t\tself.pick_pos_neg_anchors()\r\n\t\tself.transform()\r\n\t\tself.count += 1\r\n\t\t\r\n\t\treturn self.ret['template_tensor'], self.ret['detection_tensor'], self.ret['norm_anchors_tensor'], self.ret['pos_neg_anchors_tensor']\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tdataset_path = 'S:\\\\Datasets\\\\VOT2013\\\\Data\\\\'\r\n\r\n\ttrain_loader = TrainDataLoader(dataset_path, check = True)\r\n\tindex_list \t = range(train_loader.__len__())\r\n\tfor i in range(1):\r\n\t\tprint('\\nImage ' + str(i))\r\n\t\ttrain_loader.__getitem__(random.choice(index_list))\r\n\t\r\n\r\n\r\n","sub_path":"data_train.py","file_name":"data_train.py","file_ext":"py","file_size_in_byte":21159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"522812235","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPeekaboo Download\n\"\"\"\n\nfrom scrappeekaboo import ScrapPeekaboo\nfrom dbmanager import DBManager\nfrom filemanager import FileManager\n\n\n# first run\ndef setup():\n db = DBManager()\n db.reset()\n\n\n# get album links\ndef get_album_links():\n db = DBManager()\n scrap = ScrapPeekaboo()\n scrap.db = db\n scrap.get_album_links()\n\n\n# get sources for files, comments ect\ndef get_album_content():\n db = DBManager()\n scrap = ScrapPeekaboo()\n scrap.db = db\n scrap.get_album_content(db.get_album_links())\n\n\n# download files\ndef download_files():\n db = DBManager()\n fm = FileManager()\n scrap = ScrapPeekaboo(fm)\n scrap.db = db\n scrap.download_all_files()\n\n\n\n scrap.get_album_content(db.get_album_links())\n\n\n# view database\ndef view():\n db = DBManager()\n db.sel(\"SELECT * FROM File \")\n db.sel(\"SELECT * FROM Comment \")\n\n\n# view()\n# get_album_links()\n# get_album_content()\ndownload_files()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"563207986","text":"\"\"\"Unit tests for the data model.\"\"\"\n\nimport json\nimport unittest\n\n\nclass DataModelTest(unittest.TestCase):\n \"\"\"Unit tests for the data model.\"\"\"\n\n def setUp(self):\n with open(\"datamodel.json\") as datamodel_json:\n self.datamodel = json.load(datamodel_json)\n\n def test_top_level_keys(self):\n \"\"\"Test that the top level keys are correct.\"\"\"\n self.assertEqual(set([\"metrics\", \"subjects\", \"sources\"]), set(self.datamodel.keys()))\n\n def test_metrics_have_sources(self):\n \"\"\"Test that each metric has one or more sources.\"\"\"\n for metric in self.datamodel[\"metrics\"].values():\n self.assertTrue(len(metric[\"sources\"]) >= 1)\n\n def test_source_parameter_metrics(self):\n \"\"\"Test that the metrics listed for source parameters are metrics supported by the source.\"\"\"\n for source_id, source in self.datamodel[\"sources\"].items():\n for parameter in source[\"parameters\"].values():\n for metric in parameter[\"metrics\"]:\n self.assertTrue(source_id in self.datamodel[\"metrics\"][metric][\"sources\"])\n\n def test_metric_source_parameters(self):\n \"\"\"Test that the sources have at least one parameter for each metric supported by the source.\"\"\"\n for metric_id, metric in self.datamodel[\"metrics\"].items():\n parameter_metrics = []\n for source in metric[\"sources\"]:\n for parameter in self.datamodel[\"sources\"][source][\"parameters\"].values():\n parameter_metrics.extend(parameter[\"metrics\"])\n self.assertTrue(metric_id in parameter_metrics)\n\n def test_multiple_choice_paramters(self):\n \"\"\"Test that multiple choice parameters have both a default value and a list of options.\"\"\"\n for source in self.datamodel[\"sources\"].values():\n for parameter in source[\"parameters\"].values():\n if parameter[\"type\"] == \"multiple_choice\":\n self.assertTrue(\"default_value\" in parameter)\n self.assertTrue(\"values\" in parameter)\n\n def test_addition(self):\n \"\"\"Test each metric had its addition defined correctly.\"\"\"\n for metric in self.datamodel[\"metrics\"].values():\n self.assertTrue(metric[\"addition\"] in (\"max\", \"min\", \"sum\"))\n\n def test_mandatory_parameters(self):\n \"\"\"Test that each metric has a mandatory field with true or false value.\"\"\"\n for source_id, source in self.datamodel[\"sources\"].items():\n for parameter_id, parameter_values in source[\"parameters\"].items():\n self.assertTrue(\n \"mandatory\" in parameter_values,\n f\"The parameter '{parameter_id}' of source '{source_id}' has no 'mandatory' field\")\n self.assertTrue(\n parameter_values[\"mandatory\"] in (True, False),\n f\"The 'mandatory' field of parameter '{parameter_id}' of source '{source_id}' is neither \"\n \"true nor false\")\n\n def test_default_source(self):\n \"\"\"Test that each metric has a default source, and that the default source is listed as possible source.\"\"\"\n for metric in self.datamodel[\"metrics\"].values():\n self.assertTrue(metric[\"default_source\"] in metric[\"sources\"])\n","sub_path":"components/server/tests/unittests/test_datamodel.py","file_name":"test_datamodel.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"506023567","text":"\"\"\"\nGraphic shows a sample comparison of contribution maps for SAI vs. SNR of TREFHT\n\nAuthor : Zachary M. Labe\nDate : 11 August 2022\nVersion : 1 - testing ANN architectures for calculating years since SAI\n\"\"\"\n\n### Import packages\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as c\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport cmocean\nimport cmasher as cmr\nimport numpy as np\nimport calc_Utilities as UT\nimport calc_Stats as dSS\nimport calc_dataFunctions as df\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n###############################################################################\n###############################################################################\n###############################################################################\n### Data preliminaries \nletters = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"o\",\"p\"]\nreg_nameq = ['Globe','NH','SH','Arctic','Antarctic','narrowTropics','SEAsia','NorthAfrica','Amazon']\nlabels = ['Globe','N. Hemisphere','S. Hemisphere','Arctic','Antarctic','Tropics','Southeast Asia','Central Africa','Amazon']\nregionboxes = ['Arctic','Antarctic','narrowTropics','SEAsia','NorthAfrica','Amazon']\ndirectorydata = '/Users/zlabe/Documents/Research/SolarIntervention/Data/'\ndirectoryfigure = '/Users/zlabe/Documents/Research/SolarIntervention/Figures/'\n###############################################################################\n###############################################################################\nmodelGCMs = ['ARISE']\ndatasetsingle = ['ARISE']\nseasons = ['annual']\nmonthlychoice = seasons[0]\n###############################################################################\n###############################################################################\nland_only = True\nocean_only = False\nravelyearsbinary = False\nravelbinary = False\nlensalso = True\nrandomalso = False\nravel_modelens = False\nravelmodeltime = False\ntimeper = 'historical'\nshuffletype = 'GAUSS'\n###############################################################################\n###############################################################################\nyearsall = np.arange(2035,2069+1,1)\nyearsarise = np.arange(2035,2069+1,1)\nnumOfEns = 10\ndataset_obs = 'ERA5'\n###############################################################################\n###############################################################################\nnum_of_class = len(modelGCMs)\nensTypeExperi = 'ENS'\n###############################################################################\n###############################################################################\n############################################################################### \n### Read in data\nreg_name = 'Globe'\nlat_bounds,lon_bounds = UT.regions(reg_name)\n\ncont = np.load(directorydata + 'TREFHT-SAI_period2Contributions.npz')\ncontribution = cont['cont'][:]\nlats = cont['lat'][:]\nlons = cont['lon'][:]\nsnr = np.load(directorydata + 'TREFHT-SAI_period2SNR.npz')\nsnrall = snr['snr']\n\nlon2,lat2 = np.meshgrid(lons,lats)\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Graphs\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([])\ndef setcolor(x, color):\n for m in x:\n for t in x[m][1]:\n t.set_color(color)\n \nfig = plt.figure(figsize=(9,3))\nax = plt.subplot(121)\n\nvar = contribution\nlimit = np.arange(-0.005,0.00501,0.0001)\nbarlim = np.round(np.arange(-0.005,0.00501,0.005),3)\nlabel = r'\\textbf{SAI-1.5 Temperature [Input$\\times$Weights]}'\n\nm = Basemap(projection='robin',lon_0=0,resolution='l',area_thresh=10000)\nm.drawcoastlines(color='darkgrey',linewidth=0.4)\n\nparallels = np.arange(-90,91,30)\nmeridians = np.arange(-180,180,60)\npar=m.drawparallels(parallels,labels=[False,False,False,False],linewidth=0.3,\n color='w',fontsize=4,zorder=40)\nmer=m.drawmeridians(meridians,labels=[False,False,False,False],linewidth=0.3,\n fontsize=4,color='w',zorder=40)\n\ncircle = m.drawmapboundary(fill_color='dimgray',color='dimgray',\n linewidth=1)\ncircle.set_clip_on(False)\n\ncs1 = m.contourf(lon2,lat2,var,limit,extend='both',latlon=True)\n\ncs1.set_cmap(cmocean.cm.balance)\nm.drawlsmask(land_color=(0,0,0,0),ocean_color='dimgray',lakes=False,zorder=11)\n\ncbar1 = plt.colorbar(cs1,orientation='horizontal',\n extend='both',extendfrac=0.07,drawedges=False,\n fraction=0.03,pad=0.06)\ncbar1.set_label(label,fontsize=10,color='dimgrey',labelpad=2) \ncbar1.set_ticks(barlim)\ncbar1.set_ticklabels(list(map(str,barlim)))\ncbar1.ax.tick_params(axis='x', size=.01,labelsize=8)\ncbar1.outline.set_edgecolor('dimgrey')\n\n###############################################################################\n###############################################################################\n###############################################################################\nax = plt.subplot(122)\n\nvar = snrall\nlimit = np.arange(0,2.1,0.25)\nbarlim = np.round(np.arange(0,2.1,1),2)\n\nlabel = r'\\textbf{SAI-1.5 Temperature [Signal-To-Noise]}'\n\nm = Basemap(projection='robin',lon_0=0,resolution='l',area_thresh=10000)\nm.drawcoastlines(color='darkgrey',linewidth=0.4)\n\nparallels = np.arange(-90,91,30)\nmeridians = np.arange(-180,180,60)\npar=m.drawparallels(parallels,labels=[False,False,False,False],linewidth=0.3,\n color='w',fontsize=4,zorder=40)\nmer=m.drawmeridians(meridians,labels=[False,False,False,False],linewidth=0.3,\n fontsize=4,color='w',zorder=40)\n\ncircle = m.drawmapboundary(fill_color='dimgray',color='dimgray',\n linewidth=1)\ncircle.set_clip_on(False)\n\ncs1 = m.contourf(lon2,lat2,var,limit,extend='max',latlon=True)\n\ncs1.set_cmap(cmr.torch)\nm.drawlsmask(land_color=(0,0,0,0),ocean_color='dimgray',lakes=False,zorder=11)\n\ncbar1 = plt.colorbar(cs1,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False,\n fraction=0.03,pad=0.06)\ncbar1.set_label(label,fontsize=10,color='dimgrey',labelpad=1.1) \ncbar1.set_ticks(barlim)\ncbar1.set_ticklabels(list(map(str,barlim)))\ncbar1.ax.tick_params(axis='x', size=.01,labelsize=8)\ncbar1.outline.set_edgecolor('dimgrey')\n\nplt.tight_layout()\n###############################################################################\n###############################################################################\n###############################################################################\n### Add text\nplt.annotate(r'\\textbf{[%s]}' % letters[0],\n textcoords='figure fraction',\n xy=(0,0), xytext=(0.485,0.87),\n fontsize=10,color='k',alpha=1,ha='right')\nplt.annotate(r'\\textbf{[%s]}' % letters[1],\n textcoords='figure fraction',\n xy=(0,0), xytext=(0.98,0.87),\n fontsize=10,color='k',alpha=1,ha='right')\n\nplt.savefig(directoryfigure + 'SampleMaps_SNR-Contributions_TREFHT_period2.png',dpi=1000)\n","sub_path":"Scripts/plot_SampleMaps_SNR-Contributions_TREFHT_period2.py","file_name":"plot_SampleMaps_SNR-Contributions_TREFHT_period2.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"560900183","text":"from flask import Flask, render_template, request,redirect, url_for\nimport pickle\n\n#import nltk\n#nltk.download('stopwords');\n#nltk.download('punkt');\nimport spacy\nnlp = spacy.load('en_core_web_sm');\n\nKW_dict = pickle.load(open(\"Keyword.pkl\", \"rb\"))\nKW_dict_s = pickle.load(open(\"Keyword_s.pkl\", \"rb\"))\nTopic_dict = pickle.load(open(\"Topic.pkl\", \"rb\"))\n\n#from spacy.lang.en.stop_words import STOP_WORDS\n#stopwords = list(STOP_WORDS)\n#stopwords = nltk.corpus.stopwords.words('english')\n#stopwords.extend(['the','i','you','a','c','slu','them','she','he','company'])\n#stopwords.extend(KW_dict.keys())\n\ndef get_recommendation(user_prf, KW_dict):\n '''\n def stopword_RMV(sent):\n res = []\n for word in sent.split():\n if word.lower() not in stopwords:\n res.append(word)\n return ' '.join(res)\n '''\n #doc0 = nlp(stopword_RMV(user_prf))\n doc0 = nlp(user_prf)\n score_dict = {}\n for k, v in KW_dict.items():\n temp_doc = nlp(v)\n score_dict[k] = doc0.similarity(temp_doc)\n\n sorted_score = sorted(score_dict.items(), key=lambda kv: kv[1], reverse=True)\n\n rcm_company = []\n for i in range(5):\n rcm_company.append('#' + str(i + 1) + ': ' + str(sorted_score[i][0].capitalize()))\n\n return rcm_company\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('login.html')\n\n@app.route('/login', methods=['POST','GET'])\ndef login():\n if request.method == 'POST':\n target_company = request.form['target_company']\n target = target_company.lower()\n company = target_company.capitalize()\n return render_template('graph.html', company=company, target=target)\n if request.method == 'GET':\n user_input = request.args.get('user_input')\n return rcm(user_input)\n #return redirect(url_for('rcm', sent = user_input))\n return render_template('login.html')\n\n@app.route('/graph')\ndef graph():\n\n return render_template('graph.html')\n\n@app.route('/rcm')\ndef rcm(sent,KW_dict = KW_dict_s):\n rcm_company=get_recommendation(user_prf=sent, KW_dict=KW_dict)\n c0 = rcm_company[0].split(':')[-1]\n c1 = rcm_company[1].split(':')[-1]\n c2 = rcm_company[2].split(':')[-1]\n c3 = rcm_company[3].split(':')[-1]\n c4 = rcm_company[4].split(':')[-1]\n return render_template('rcm.html',sent=sent,c0=c0,c1=c1,c2=c2,c3=c3,c4=c4)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"572550024","text":"#!/usr/bin/env python3.6\n#-*- coding:utf-8 -*-\n\n\nimport os\nimport requests\n# import urllib.request\nimport redis\nimport random\nimport math\n\n\n\nheight = [2,3,10,5,7,8,9]\n\nif len(height) % 2 == 0 :\n i = int(len(height) / 2)\n j = i\nelse:\n i = math.floor(len(height)/2)\n j = i\n\narea = (j - i) * min(height[i], height[j])\n\nrange_i = i\nfor x in range(0, range_i+1):\n print(x)\n pass\n tmp_i = range_i - x\n tmp_area = (j - tmp_i) * min(height[tmp_i], height[j])\n if tmp_area > area :\n area = tmp_area\n i = tmp_i\n\nrange_j = j\nfor y in range(0, len(height)- range_j):\n tmp_j = range_j + y\n tmp_area = (tmp_j - i) * min(height[i], height[tmp_j])\n if tmp_area > area :\n area = tmp_area\n j = tmp_j\n\nprint(area, i, j)\n\n ","sub_path":"python3/11.盛最多水的容器.py","file_name":"11.盛最多水的容器.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"603644084","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\n\n\ndef callback(data):\n rospy.loginfo(\"movement: \" + str(data.linear.x))\n\n\ndef listener():\n rospy.init_node('display', anonymous=True)\n rospy.Subscriber(\"cmd_vel\", Twist, callback)\n rospy.spin()\n\n\nif __name__ == '__main__':\n listener()","sub_path":"src/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"402208308","text":"from ravestate.module import Module\nfrom ravestate.constraint import s\nfrom ravestate.property import PropertyBase\nfrom ravestate.state import state\nfrom ravestate.receptor import receptor\nfrom ravestate.wrappers import ContextWrapper\n\nimport ravestate_rawio\nimport ravestate_interloc\nfrom ravestate_verbaliser.verbaliser import get_phrase_list\nimport ravestate_phrases_basic_en\nimport ravestate_ontology\n\nfrom scientio.ontology.node import Node\nfrom scientio.session import Session\nfrom scientio.ontology.ontology import Ontology\n\nfrom reggol import get_logger\nlogger = get_logger(__name__)\n\nDEFAULT_INTERLOC_ID = \"terminal_user\"\n\nwith Module(name=\"consoleio\"):\n\n @state(cond=s(\":startup\"), read=\"interloc:all\")\n def console_input(ctx: ContextWrapper):\n\n @receptor(ctx_wrap=ctx, write=\"rawio:in\")\n def write_console_input(ctx_input, value: str):\n ctx_input[\"rawio:in\"] = value\n\n @receptor(ctx_wrap=ctx, write=\"interloc:all\")\n def push_console_interloc(ctx: ContextWrapper, console_node: Node):\n if ctx.push(parentpath=\"interloc:all\", child=PropertyBase(name=DEFAULT_INTERLOC_ID, default_value=console_node)):\n logger.debug(f\"Pushed {console_node} to interloc:all\")\n\n @receptor(ctx_wrap=ctx, write=\"interloc:all\")\n def pop_console_interloc(ctx: ContextWrapper):\n if ctx.pop(f\"interloc:all:{DEFAULT_INTERLOC_ID}\"):\n logger.debug(f\"Popped interloc:all:{DEFAULT_INTERLOC_ID}\")\n\n while not ctx.shutting_down():\n input_value = input(\"> \")\n write_console_input(input_value)\n\n console_interloc_exists = f\"interloc:all:{DEFAULT_INTERLOC_ID}\" in ctx.enum(\"interloc:all\")\n # push Node if you got a greeting\n if input_value.strip() in get_phrase_list(\"greeting\") and not console_interloc_exists:\n # set up scientio\n sess: Session = ravestate_ontology.get_session()\n onto: Ontology = ravestate_ontology.get_ontology()\n\n # create scientio Node of type Person\n query = Node(metatype=onto.get_type(\"Person\"))\n query.set_name(\"x\")\n console_node_list = sess.retrieve(query)\n if not console_node_list:\n console_node = sess.create(query)\n logger.info(f\"Created new Node in scientio session: {console_node}\")\n elif len(console_node_list) == 1:\n console_node = console_node_list[0]\n else:\n logger.error(f'Found multiple Persons with name {DEFAULT_INTERLOC_ID} in scientio session. Cannot push node to interloc:all!')\n continue\n\n # push interloc-Node\n push_console_interloc(console_node)\n\n # pop Node if you got a farewell\n elif input_value.strip() in get_phrase_list(\"farewells\") and console_interloc_exists:\n pop_console_interloc()\n\n\n @state(read=\"rawio:out\")\n def console_output(ctx):\n print(ctx[\"rawio:out\"])\n","sub_path":"modules/ravestate_conio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"586884783","text":"from selenium import webdriver\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nimport selenium.webdriver.support.expected_conditions as ec\r\nfrom selenium.webdriver.common.by import By\r\nimport openpyxl\r\nfrom openpyxl.styles import Font, Color, colors\r\nfrom datetime import date\r\nimport time\r\nimport os\r\nimport tkinter\r\nfrom tkinter import Tk\r\nfrom tkinter import Label\r\nfrom tkinter import LEFT\r\nfrom tkinter import RIGHT\r\nfrom tkinter import Entry\r\nfrom tkinter import Button\r\nimport tkinter as tk\r\n\r\ndef letsgo():\r\n job=e1.get()\r\n link=e2.get()\r\n def valuegrab(homovalue):\r\n nounits=\"\"\r\n for letter in homovalue:\r\n if letter !=\" \":\r\n nounits+=letter\r\n elif letter ==\" \":\r\n return float(nounits)\r\n #wait for page to load\r\n #blastresults=input('Paste link to BLAST results here:\\n')\r\n #print('Opening BLAST Results Link...')\r\n driver= webdriver.Chrome()\r\n wait= WebDriverWait(driver, 1000)\r\n driver.get(link)\r\n driver.maximize_window()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"alignments\"]/div[1]/table/tbody/tr[2]/td[1]')))\r\n #importing the primers from BLAST\r\n #done=input('Press \"Enter\" once Primer-BLASTing is complete.')\r\n print('Exracting Results...')\r\n primerlib={}\r\n primer1=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[1]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer1]=[]\r\n primer2=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[1]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer2]=[]\r\n primer3=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[2]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer3]=[]\r\n primer4=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[2]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer4]=[]\r\n primer5=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[3]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer5]=[]\r\n primer6=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[3]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer6]=[]\r\n primer7=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[4]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer7]=[]\r\n primer8=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[4]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer8]=[]\r\n primer9=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[5]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer9]=[]\r\n primer10=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[5]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer10]=[]\r\n primer11=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[6]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer11]=[]\r\n primer12=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[6]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer12]=[]\r\n primer13=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[7]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer13]=[]\r\n primer14=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[7]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer14]=[]\r\n primer15=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[8]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer15]=[]\r\n primer16=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[8]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer16]=[]\r\n primer17=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[9]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer17]=[]\r\n primer18=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[9]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer18]=[]\r\n primer19=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[10]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer19]=[]\r\n primer20=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[10]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer20]=[]\r\n\r\n print('Opening IDT Oligo Analyzer...')\r\n #Set up Web driver\r\n driver.get('https://www.idtdna.com/calc/analyzer/')\r\n driver.maximize_window()\r\n #wait.until(ec.presence_of_element_located((By.XPATH,'//*[@id=\"modal-holiday\"]/div/div/div[3]/a')))\r\n #dumbbutton=driver.find_element_by_xpath('//*[@id=\"modal-holiday\"]/div/div/div[3]/a')\r\n #dumbbutton.click()\r\n #wait.until(ec.visibility_of_element_located((By.ID,'UserName')))\r\n #sign into IDT\r\n username=driver.find_element_by_id('UserName')\r\n username.send_keys(e3.get())\r\n password=driver.find_element_by_id('Password')\r\n password.send_keys(e4.get())\r\n login=driver.find_element_by_id('login-button')\r\n login.click()\r\n #wait for the page to load\r\n wait= WebDriverWait(driver, 1000)\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"textarea-sequence\"]')))\r\n #grab the Text Box\r\n oligoinput=driver.find_element_by_xpath('//*[@id=\"textarea-sequence\"]')\r\n #Collect Hairpin, Homodimer, and Heterodimer Values for Each Primer\r\n print('Analyzing Primers...')\r\n x=0\r\n primerlist=[]\r\n for item in primerlib:\r\n primerlist.append(item)\r\n for primer in primerlib:\r\n #clear the textbox\r\n driver.find_element_by_xpath('//*[@id=\"textarea-sequence\"]').clear()\r\n #enter the Primer Seq\r\n oligoinput.send_keys(primer)\r\n #Melt Temp Analyze\r\n Tm=driver.find_element_by_id('analyze-button')\r\n Tm.click()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[1]/div[3]/div/div/table/tbody/tr[5]/td[2]/span')))\r\n melted=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[1]/div[3]/div/div/table/tbody/tr[5]/td[2]/span').text\r\n melted=valuegrab(melted)\r\n primerlib[primer].append(melted)\r\n #Hairpin Analyze\r\n checkhp=driver.find_element_by_id('hairpin-button')\r\n checkhp.click()\r\n #wait for it to load the data\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[2]/div[7]/div/div/table/tbody/tr[2]/td[3]')))\r\n #grab hairpin value\r\n hairpinvalue=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[2]/div[7]/div/div/table/tbody/tr[2]/td[3]').text\r\n hairpinvalue=float(hairpinvalue)\r\n primerlib[primer].append(hairpinvalue)\r\n #Homodimer Analyze\r\n checkhod=driver.find_element_by_xpath('//*[@id=\"rmenu\"]/div/div[6]/button')\r\n checkhod.click()\r\n #Grab Homodimer Value\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[3]/div[4]/span[1]')))\r\n homodimervalue=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[3]/div[4]/span[1]').text\r\n homodimervalue=valuegrab(homodimervalue)\r\n #Add Homodimer Value to Library\r\n primerlib[primer].append(homodimervalue)\r\n #Check out the Heterodimer Values\r\n if x%2 ==0:\r\n heterobutton=driver.find_element_by_xpath('//*[@id=\"rmenu\"]/div/div[8]/button')\r\n heterobutton.click()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[4]/div[3]/div[4]/div/div/textarea')))\r\n heterotext=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[4]/div[3]/div[4]/div/div/textarea')\r\n heterotext.send_keys(primerlist[x+1])\r\n heterobutton=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[4]/div[3]/div[5]/div/div/button[2]')\r\n heterobutton.click()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[4]/div[6]/span[1]')))\r\n heterovalue=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[4]/div[6]/span[1]').text\r\n heterovalue=valuegrab(heterovalue)\r\n primerlib[primer].append(heterovalue)\r\n x+=1\r\n today=date.today()\r\n print('Evaluating Primers...')\r\n #check if they're actually solid Primers\r\n x=0\r\n for primer in primerlib:\r\n gold=0\r\n if primerlib[primer][1] >=-1:\r\n gold+=1\r\n if primerlib[primer][2] >-4:\r\n gold+=1\r\n if x%2==0:\r\n if primerlib[primer][3] >-4:\r\n gold +=1\r\n primerlib[primer].append(gold)\r\n x+=1\r\n\r\n goldlist=[]\r\n for primer in primerlib:\r\n goldlist.append(primerlib[primer][-1])\r\n\r\n hitlist=[]\r\n x=0\r\n for value in range(0,20):\r\n if x%2==0:\r\n combo=goldlist[value]+goldlist[value+1]\r\n hitlist.append(combo)\r\n x+=1\r\n\r\n best=max(hitlist)\r\n bestlistpos=[]\r\n y=0\r\n for value in hitlist:\r\n if value == best:\r\n bestlistpos.append(y)\r\n y+=1\r\n\r\n print(\"Exporting Results...\")\r\n #Open the Workbook\r\n wb=openpyxl.Workbook()\r\n sheet1=wb.active\r\n #Make the Template for the Exported Excel Sheet\r\n sheet1.cell(column=1, row=1).value=str(today)+\" \"+job+\" QuickPrime Results\"\r\n sheet1.cell(column=1, row=2).value=\"Sequences:\"\r\n sheet1.cell(column=2, row=2).value=\"Melting Temp:\"\r\n sheet1.cell(column=3, row=2).value=\"Hairpin Delta G:\"\r\n sheet1.cell(column=4, row=2).value=\"Homo-dimer Delta G:\"\r\n sheet1.cell(column=5, row=2).value=\"Hetero-dimer Delta G:\"\r\n #Export the Data from the dictionary\r\n x=0\r\n for row in range(3,23):\r\n seqrow=sheet1.cell(column=1, row=row)\r\n seqrow.value=primerlist[x]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n temprow=sheet1.cell(column=2, row=row)\r\n libkey=primerlist[x]\r\n temprow.value=primerlib[libkey][0]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n hairrow=sheet1.cell(column=3, row=row)\r\n libkey=primerlist[x]\r\n hairrow.value=primerlib[libkey][1]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n selfrow=sheet1.cell(column=4, row=row)\r\n libkey=primerlist[x]\r\n selfrow.value=primerlib[libkey][2]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n if x%2==0:\r\n hetrow=sheet1.cell(column=5, row=row)\r\n libkey=primerlist[x]\r\n hetrow.value=primerlib[libkey][3]\r\n x+=1\r\n for pos in bestlistpos:\r\n pos=(pos*2)+3\r\n sheet1.cell(column=1, row=pos).font=Font(color=colors.BLUE)\r\n sheet1.cell(column=1, row=pos+1).font=Font(color=colors.BLUE)\r\n\r\n #Save the Workbook\r\n print(\"Saving Excel Sheet...\")\r\n today=date.today()\r\n filename=str(today)+\" \"+job+' QuickPrime Results.xlsx'\r\n wb.save('D:/QuickPrimeFiles/OutFiles/'+filename)\r\n os.startfile('D:/QuickPrimeFiles/OutFiles'+filename)\r\n print('Done!')\r\n\r\n\r\n\r\n#GUI Stuff\r\nmaster = tk.Tk()\r\ntk.Label(master, text=\"Enter Job Title:\").grid(row=0)\r\ntk.Label(master, text=\"Enter Blast Results Link:\").grid(row=1)\r\ntk.Label(master, text=\"IDT Username:\").grid(row=2)\r\ntk.Label(master, text=\"IDT Password:\").grid(row=3)\r\n\r\ne1 = tk.Entry(master)\r\ne2 = tk.Entry(master)\r\ne3 = tk.Entry(master)\r\ne4 = tk.Entry(master)\r\n\r\ne1.grid(row=0, column=1)\r\ne2.grid(row=1, column=1)\r\ne3.grid(row=2, column=1)\r\ne4.grid(row=3, column=1)\r\n\r\ntk.Button(master, text='Quit', command=master.quit).grid(row=5, column=0, sticky=tk.W, pady=10, padx=10)\r\ntk.Button(master, text='Begin QuickPrime', command=letsgo).grid(row=5, column=1, sticky=tk.W, pady=10, padx=10)\r\n\r\n\r\ntk.mainloop()\r\n","sub_path":"GUIQuickPrime.py","file_name":"GUIQuickPrime.py","file_ext":"py","file_size_in_byte":11312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"294142447","text":"import pytest\nimport os\nimport sys\nimport json\n\nfrom ipwb import indexer\n\nfrom . import testUtil as ipwbTest\n\n\ndef isValidSURT(surt):\n return True # The surt library does not yet have a way to check this\n\n\ndef isValidDatetime(dt):\n return len(dt) == 14 and dt.isdigit()\n\n\ndef isValidJSON(jsonIn):\n try:\n j = json.loads(json.dumps(jsonIn))\n except ValueError:\n return False\n return True\n\n\ndef checkCDXJFields(cdxjEntry):\n (surt, dt, json) = cdxjEntry.split(' ', 2)\n validSURT = isValidSURT(surt)\n validDT = isValidDatetime(dt)\n validJSON = isValidJSON(json)\n\n return validSURT and validDT and validJSON\n\n\ndef checkIPWBJSONFieldPresesence(jsonStr):\n keys = json.loads(jsonStr)\n return 'locator' in keys and 'mime_type' in keys and 'status_code' in keys\n\n\n@pytest.mark.ipfsDaemonInteraction\ndef test_push():\n \"\"\"\n Read WARC, manipulate content to ensure uniqueness, push to IPFS\n WARC should result in two CDXJ entries with three space-limited fields\n each: surt URI, datetime, JSON\n JSON should contain AT LEAST locator, mime_type, and status fields\n \"\"\"\n newWARCPath = ipwbTest.createUniqueWARC()\n # use ipwb indexer to push\n cdxjList = indexer.indexFileAt(newWARCPath, quiet=True)\n cdxj = '\\n'.join(cdxjList)\n\n firstEntry = cdxj.split('\\n')[0]\n firstNonMetadataEntry = ''\n for line in cdxj.split('\\n'):\n if line[0] != '!':\n firstNonMetadataEntry = line\n break\n\n assert checkCDXJFields(firstNonMetadataEntry)\n firstEntryLastField = firstNonMetadataEntry.split(' ', 2)[2]\n assert checkIPWBJSONFieldPresesence(firstEntryLastField)\n","sub_path":"tests/test_randomized_add.py","file_name":"test_randomized_add.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"604098202","text":"\n# coding: utf-8\n# some parametes should be adjusted in here\nclass Constants(object):\n def __init__(self, n_vocab):\n self.Lr = 0.0001\n self.Embedding_size = 250\n self.Content_represent = 250\n self.Style_represent = 500\n self.Ey_filters = [1, 2, 3, 4, 5]\n self.Ey_num_filters = 100\n self.D_filters = [2, 3, 4, 5, 6]\n self.D_num_filters = 100\n self.Ds_filters = [1, 2, 3, 4]\n self.Ds_num_filters = 100\n self.Hidden_size = 248\n self.N_vocab = n_vocab\n self.Temper = 0.0001\n self.Max_len = 40\n self.Min_len = 6 # 6 is the max window size of the filters\n \n\n","sub_path":"ipynb/Constant.py","file_name":"Constant.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"84908051","text":"import logging\nimport argparse\nimport socket\nfrom src.tcp_protobuf_pb2 import Request, Response\nimport threading\n\n\nclass Client():\n def __init__(self, server_ip, server_tcp_port):\n self.server_ip = server_ip\n self.server_tcp_port = server_tcp_port\n\n def recv(self, sock):\n message = b''\n while True:\n data = sock.recv(1024)\n if not data:\n break\n message += data\n # result是从client发来的信息\n result = Response()\n result.ParseFromString(message)\n return result\n\n def send(self, sock, strbits):\n # 先发送一个数据长度过去,来确定结束位置\n sock.sendall((str(len(strbits)) + '\\n\\n').encode('utf-8'))\n # 再发送消息的具体内容\n sock.sendall(strbits)\n\n # 每个操作都单独建立一次TCP连接\n def get(self, key):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server_ip, self.server_tcp_port))\n # 通信格式使用proto文件内的定义,里面有3个成员变量:operation,key,value\n message_bit = Request(operation='get', key=key).SerializeToString()\n self.send(sock, message_bit)\n result = self.recv(sock)\n print(result)\n sock.close()\n\n def put(self, key, value):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server_ip, self.server_tcp_port))\n message_bit = Request(operation='put', key=key, value=value).SerializeToString()\n self.send(sock, message_bit)\n result = self.recv(sock)\n print(result)\n sock.close()\n\n def delete(self, key):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server_ip, self.server_tcp_port))\n message_bit = Request(operation='delete', key=key).SerializeToString()\n self.send(sock, message_bit)\n result = self.recv(sock)\n print(result)\n sock.close()\n\n\ndef client_start(args):\n print('Please press Ctrl+C to exit the client.')\n client = Client(args.server_ip, args.server_tcp_port)\n try:\n while True:\n # 输入一个操作\n operation = input('input operation: ')\n if operation == 'put':\n key = input('input key: ')\n value = input('input value: ')\n client.put(key, value)\n elif operation == 'get':\n key = input('input key: ')\n client.get(key)\n elif operation == 'delete':\n key = input('input key: ')\n client.delete(key)\n else:\n print('operation error!')\n except KeyboardInterrupt:\n print('Close client!')\n\n\nif __name__ == '__main__':\n # 参数设置\n parser = argparse.ArgumentParser(description='Client.')\n parser.add_argument('--server-ip', type=str, default='localhost', metavar='N', help='The ip of dictionary server.')\n parser.add_argument('--server-tcp-port', type=int, default=8000, metavar='N', help='The ip of dictionary server.')\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO)\n client_start(args)","sub_path":"18340013_���琮昊_Project/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"259203749","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n # https://www.bilibili.com/video/BV1VK411A7Gm\n # (n logn) time and O(1) memory\n def merge(self, h1, h2):\n dummy = tail = ListNode()\n while h1 and h2:\n if h1.val < h2.val:\n tail.next = h1\n h1 = h1.next\n else:\n tail.next = h2\n h2 = h2.next\n tail = tail.next\n tail.next = h1 or h2\n return dummy.next\n\n # fast and slow pointer\n def sortList(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n pre = None\n slow = head\n fast = head\n while fast and fast.next:\n pre = slow\n slow = slow.next\n fast = fast.next.next\n pre.next = None\n return self.merge(self.sortList(head), self.sortList(slow))\n\n\nif __name__ == '__main__':\n l1 = ListNode(-1, ListNode(5, ListNode(3, ListNode(4, ListNode(0)))))\n\n solution = Solution()\n newList = solution.sortList(l1)\n print(\"\\nAfter:\")\n tmp_list = newList\n while tmp_list != None:\n print(str(tmp_list.val), end='->')\n tmp_list = tmp_list.next\n l1 = ListNode(4, ListNode(2, ListNode(1, ListNode(3))))\n\n newList = solution.sortList(l1)\n print(\"\\nAfter:\")\n tmp_list = newList\n while tmp_list != None:\n print(str(tmp_list.val), end='->')\n tmp_list = tmp_list.next","sub_path":"LinkedList/148-SortList.py","file_name":"148-SortList.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"516942763","text":"from graph import Graph\nfrom util import Stack\n\ndef earliest_ancestor(ancestors, starting_node):\n # set up empty graph\n g = Graph()\n # Create list for possible ancestors\n possible_ancestors = []\n\n # add 11 vertices to the graph\n for n in range(1, 12):\n g.add_vertex(n)\n\n # Add the edges\n for ancestor in ancestors:\n # ancestor at position 0 for first vertex \n # and position 1 for the connecting vertex\n g.add_edge(ancestor[0], ancestor[1])\n # Create a list of starting vertices to iterate through\n starting_nodes = [10, 2, 4, 11]\n # iterate through each starting node\n for id in starting_nodes:\n # create tempty stack\n path = Stack()\n # add vertex id to path stack\n path.push([id])\n # create empty set for visited vertices\n visited = set()\n\n # while path stack isn't empty\n while path.size() > 0:\n # pop vertex path from path stack\n vert = path.pop()\n # check if last vertex in path is in visited\n if vert[-1] not in visited:\n # check if vertex is the vertex we want\n if vert[-1] == starting_node:\n # append vertex into list of possible ancestors\n possible_ancestors.append(vert[0])\n else:\n # add vertex to visited list\n visited.add(vert[-1])\n # iterate through vertex's neighbors\n for n in g.get_neighbors(vert[-1]):\n # create list for second path\n second_path = list(vert)\n # append neighbor to second path\n second_path.append(n)\n # push second path into path stack\n path.push(second_path)\n # return -1 if ancestor is equal to starting node (no ancestor)\n if min(possible_ancestors) == starting_node:\n return -1\n # return 10 if 10 is in possible_ancestors list\n elif 10 in possible_ancestors:\n return 10\n # return the minimum of the possible ancestors list if list isn't empty\n elif len(possible_ancestors) > 0:\n return min(possible_ancestors)\n # return -1 for any other scenario (ex; starting node not found in graph)\n else:\n return -1\n \nif __name__ == \"__main__\":\n test_ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\n print(earliest_ancestor(test_ancestors, 6))\n print(earliest_ancestor(test_ancestors, 10))\n print(earliest_ancestor(test_ancestors, 9))","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"300845253","text":"from xml.etree import ElementTree as ET\n\nHGAP=10\nVHAP=20\nLAY_LEFT = 0\nLAY_RIGHT = 1\nLAY_ROOT = 2\n\nclass _node_parser:\n def parse_node(self, ctx, xml_root, xml_node, child, root, node):\n node.go_child(ctx, xml_root, xml_node, child, root);\n\n def parse_icon(self, ctx, xml_root, xml_node, child, root, node):\n node.icon = child.attrib['BUILTIN']\n \n def parse(self, ns, xml_root, xml_node, child, root, node):\n f = getattr(self, \"parse_\"+child.tag, None)\n if f:\n f(ns, xml_root, xml_node, child, root, node)\n\nclass _node_base:\n def __init__(self):\n self.childs = [];\n\n def go(self, ctx, xml_root, xml_node, root):\n self.id = xml_node.attrib['ID']\n self.text = xml_node.attrib['TEXT']\n ctx.root_stack.append(self);\n for child in xml_node.getchildren():\n ctx.parser.parse(ctx, xml_root, xml_node, child, root, self)\n\n def back(self, ctx, xml_root, xml_node, root):\n pass\n\nclass _node_root(_node_base):\n def __init__(self):\n self.childs = []\n self.left_childs = []\n self.right_childs = []\n\n def go_child(self, ctx, xml_root, xml_node, child, root):\n n = None;\n if child.attrib['POSITION'] == \"left\":\n n = _node_left()\n self.left_childs.append(n)\n else:\n n = _node_right()\n self.right_childs.append(n)\n ctx.ns.push(xml_node, child, self, n, True);\n self.childs.append(n)\n \n def back(self, ctx, xml_root, xml_node, root):\n ctx.html_string += '
' + self.text + '
\\n';\n str_obj = 'theObjMap.' + self.id;\n ctx.obj_string += str_obj + ' = theRoot;\\n';\n ctx.obj_string += str_obj + '.id = \"' + self.id + '\";\\n';\n str = ''\n for c in self.childs:\n str += 'theObjMap.' + c.id + ', '\n if len(str) > 0:\n str = str[0:len(str)-2]\n ctx.link_string += str_obj + '.child = ' + '[' + str + '];\\n';\n str = ''\n for c in self.left_childs:\n str += 'theObjMap.' + c.id + ', '\n if len(str) > 0:\n str = str[0:len(str)-2]\n ctx.link_string += str_obj + '.left_child = ' + '[' + str + '];\\n';\n str = ''\n for c in self.right_childs:\n str += 'theObjMap.' + c.id + ', '\n if len(str) > 0:\n str = str[0:len(str)-2]\n ctx.link_string += str_obj + '.right_child = ' + '[' + str + '];\\n';\n ctx.leaf_stack.append(self);\n\nclass _node_right(_node_base):\n def go_child(self, ctx, xml_root, xml_node, child, root):\n n = _node_right()\n ctx.ns.push(xml_node, child, self, n, True);\n self.childs.append(n);\n \n def back(self, ctx, xml_root, xml_node, root):\n ctx.html_string += '
' + self.text + '
\\n';\n str_obj = 'theObjMap.' + self.id;\n ctx.obj_string += str_obj + ' = new right_imp();\\n';\n ctx.obj_string += str_obj + '.id = \"' + self.id + '\";\\n';\n str = ''\n for c in self.childs:\n str += 'theObjMap.' + c.id + ', '\n if len(str) > 0:\n str = str[0:len(str)-2]\n ctx.link_string += str_obj + '.child = ' + '[' + str + '];\\n';\n ctx.leaf_stack.append(self);\n \nclass _node_left(_node_base):\n def go_child(self, ctx, xml_root, xml_node, child, root):\n n = _node_left()\n ctx.ns.push(xml_node, child, self, n, True);\n self.childs.append(n);\n \n def back(self, ctx, xml_root, xml_node, root):\n ctx.html_string += '
' + self.text + '
\\n';\n str_obj = 'theObjMap.' + self.id;\n ctx.obj_string += str_obj + ' = new left_imp();\\n';\n ctx.obj_string += str_obj + '.id = \"' + self.id + '\";\\n';\n str = ''\n for c in self.childs:\n str += 'theObjMap.' + c.id + ', '\n if len(str) > 0:\n str = str[0:len(str)-2]\n ctx.link_string += str_obj + '.child = ' + '[' + str + '];\\n';\n ctx.leaf_stack.append(self);\n\nclass _node_stack_node:\n def __init__(self, xml_root, xml_node, root, node, go):\n self.xml_root = xml_root;\n self.xml_node = xml_node;\n self.root = root;\n self.node = node;\n self.go = go;\n\nclass _node_stack:\n def __init__(self):\n self.stack = [];\n def push(self, xml_root,xml_node,root,node,go):\n self.stack.append(_node_stack_node(xml_root,xml_node,root,node,go));\n def leaf_first(self):\n while len(self.stack) > 0:\n node = self.stack[len(self.stack)-1];\n if node.go:\n yield node;\n node.go = False;\n else:\n yield node;\n self.stack.pop();\n\nclass xml_context:\n def __init__(self):\n self.ns = _node_stack();\n self.parser = _node_parser();\n self.the_root = _node_root();\n self.leaf_stack = [];\n self.root_stack = [];\n self.html_string = ''\n self.script_string = ''\n self.obj_string = ''\n self.link_string = ''\n\ndef xml_leaf_first(ctx, map):\n ctx.ns.push(map, map.getchildren()[0], None, ctx.the_root, True);\n for ns_node in ctx.ns.leaf_first():\n if ns_node.go:\n ns_node.node.go(ctx, ns_node.xml_root, ns_node.xml_node, ns_node.root)\n else:\n ns_node.node.back(ctx, ns_node.xml_root, ns_node.xml_node, ns_node.root)\n\ndef mind_read_xml(path, ctx):\n xml = ET.parse(path);\n map = xml.getroot();\n xml_leaf_first(ctx, map)\n ctx.script_string = ctx.obj_string + ctx.link_string;\n '''str = \"\";\n str2 = \"\";\n for n in ctx.root_stack:\n str += \"theObjMap.\" + n.id + \", \"\n str2 += n.text + \" \"\n print str2 + \"\\n\\n\"\n if len(str) > 0:\n str = str[0:len(str)-2]\n str = 'window.mindMap.root_iter = ['+str+'];\\n'\n ctx.script_string += str;'''\n str = \"\";\n for n in ctx.leaf_stack:\n str += \"theObjMap.\" + n.id + \", \"\n if len(str) > 0:\n str = str[0:len(str)-2]\n str = 'window.mindMap.leaf_iter = ['+str+'];\\n'\n ctx.script_string += str;\n","sub_path":"omind/mind_parser.py","file_name":"mind_parser.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"382703029","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom emprestar_app.model import ArcoLogado, Emprestar\nfrom gaebusiness.business import CommandExecutionException\nfrom gaegraph.business_base import CreateArc, DeleteArcs\nfrom tekton.gae.middleware.json_middleware import JsonResponse\nfrom emprestar_app import facade\n\n\ndef index():\n cmd = facade.list_emprestars_cmd()\n emprestar_list = cmd()\n short_form=facade.emprestar_short_form()\n emprestar_short = [short_form.fill_with_model(m) for m in emprestar_list]\n return JsonResponse(emprestar_short)\n\n\ndef save(_logged_user, **emprestar_properties):\n cmd = facade.save_emprestar_cmd(**emprestar_properties)\n pertence = cmd()\n CreateArc(arc_class=ArcoLogado, origin=_logged_user, destination=pertence)()\n return _save_or_update_json_response(cmd)\n\n\ndef update(emprestar_id, **emprestar_properties):\n cmd = facade.update_emprestar_cmd(emprestar_id, **emprestar_properties)\n return _save_or_update_json_response(cmd)\n\n\ndef delete(_logged_user, emprestar_id):\n pertence = Emprestar.get_by_id(emprestar_id)\n DeleteArcs(arc_class=ArcoLogado, origin=_logged_user, destination=pertence)()\n facade.delete_emprestar_cmd(emprestar_id)()\n return JsonResponse({'id': emprestar_id})\n\n\ndef edit(**emprestar_properties):\n emprestar_id = emprestar_properties['emprestar_id']\n pertence = Emprestar.get_by_id(emprestar_properties['emprestar_id'])\n emprestar_properties.pop('emprestar_id')\n pertence.populate(**emprestar_properties)\n pertence.put()\n return JsonResponse({'id': emprestar_id})\n\ndef _save_or_update_json_response(cmd):\n try:\n emprestar = cmd()\n except CommandExecutionException:\n return JsonResponse({'errors': cmd.errors})\n short_form=facade.emprestar_short_form()\n return JsonResponse(short_form.fill_with_model(emprestar))\n\n","sub_path":"tekton/backend/appengine/routes/emprestars/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"427211942","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Configuration.Generator.PythiaUEZ2starSettings_cfi import *\n\nbfilter = cms.EDFilter(\n \"PythiaFilter\",\n MaxEta = cms.untracked.double(9999.),\n MinEta = cms.untracked.double(-9999.),\n ParticleID = cms.untracked.int32(521)\n )\n\ngenerator = cms.EDFilter(\n \"Pythia6GeneratorFilter\",\n ExternalDecays = cms.PSet(\n EvtGen = cms.untracked.PSet(\n use_default_decay = cms.untracked.bool(False),\n decay_table = cms.FileInPath('GeneratorInterface/ExternalDecays/data/DECAY_NOLONGLIFE.DEC'),\n particle_property_file = cms.FileInPath('GeneratorInterface/ExternalDecays/data/evtJpsiKKKY.pdl'),\n user_decay_file = cms.FileInPath('GeneratorInterface/ExternalDecays/data/Bu_Y4727K_JpsiPhi_mumuKK.dec'),\n list_forced_decays = cms.vstring('MyB+',\n 'MyB-'),\n operates_on_particles = cms.vint32(0)\n ),\n parameterSets = cms.vstring('EvtGen')\n ),\n pythiaPylistVerbosity = cms.untracked.int32(0),\n filterEfficiency = cms.untracked.double(6e-05),\n pythiaHepMCVerbosity = cms.untracked.bool(False),\n comEnergy = cms.double(7000.0),\n maxEventsToPrint = cms.untracked.int32(0),\n PythiaParameters = cms.PSet(\n pythiaUESettingsBlock,\n bbbarSettings = cms.vstring('MSEL = 1 !'),\n parameterSets = cms.vstring(\n 'pythiaUESettings', \n 'bbbarSettings')\n )\n )\n\nProductionFilterSequence = cms.Sequence(generator * bfilter)\n","sub_path":"genfragments/SevenTeV/PYTHIA6_Bp2Y4727K_BpFilter_TuneZ2_7TeV-pythia6-evtgen_cff.py","file_name":"PYTHIA6_Bp2Y4727K_BpFilter_TuneZ2_7TeV-pythia6-evtgen_cff.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"11862346","text":"from fastapi import APIRouter,WebSocket\n\nrouter = APIRouter(\n prefix=\"/room\",\n tags=[\"room\",]\n)\n\n\n@router.websocket(\"/ws\")\nasync def websocket_endpoint(websocket: WebSocket):\n await websocket.accept()\n while True:\n data = await websocket.receive_text()\n await websocket.send_text(f\"Message Text was: {data}\")\n","sub_path":"api/api/presentations/controllers/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"607790877","text":"# heuristics.py\n# ----------------\n# COMP3620/6320 Artificial Intelligence\n# The Australian National University\n# For full attributions, see attributions.txt on Wattle at the end of the\n# course\n\n\"\"\" This class contains heuristics which are used for the search procedures that\n you write in search_strategies.py.\n\n The first part of the file contains heuristics to be used with the algorithms\n that you will write in search_strategies.py.\n\n In the second part you will write a heuristic for Q4 to be used with a\n MultiplePositionSearchProblem.\n\"\"\"\n\n#-------------------------------------------------------------------------\n# A set of heuristics which are used with a PositionSearchProblem\n# You do not need to modify any of these.\n#-------------------------------------------------------------------------\n\nfrom search_problems import SearchProblem\n\n\ndef null_heuristic(pos, problem):\n \"\"\" The null heuristic. It is fast but uninformative. It always returns 0.\n (State, SearchProblem) -> int\n \"\"\"\n return 0\n\n\ndef manhattan_heuristic(pos, problem):\n \"\"\" The Manhattan distance heuristic for a PositionSearchProblem.\n ((int, int), PositionSearchProblem) -> int\n \"\"\"\n return abs(pos[0] - problem.goal_pos[0]) + abs(pos[1] - problem.goal_pos[1])\n\n\ndef euclidean_heuristic(pos, problem):\n \"\"\" The Euclidean distance heuristic for a PositionSearchProblem\n ((int, int), PositionSearchProblem) -> float\n \"\"\"\n return ((pos[0] - problem.goal_pos[0]) ** 2 + (pos[1] - problem.goal_pos[1]) ** 2) ** 0.5\n\n# Abbreviations\nnull = null_heuristic\nmanhattan = manhattan_heuristic\neuclidean = euclidean_heuristic\n\n#-------------------------------------------------------------------------\n# You have to implement the following heuristics for Q4 of the assignment.\n# It is used with a MultiplePositionSearchProblem\n#-------------------------------------------------------------------------\n\n# You can make helper functions here, if you need them\n\n\ndef bird_counting_heuristic(state: tuple, problem: SearchProblem)-> int:\n \"\"\" Returns the cound of remaining yellow birds as the heuristic\n\n ((tuple, (tuple, tuple, ...)), SearchProblem) -> int\n\n @type problem: SearchProblem\n @type state: (tuple, (tuple, tuple, ...))\n \"\"\"\n position, yellow_birds = state\n heuristic_value = len(yellow_birds)\n\n return heuristic_value\n\nbch = bird_counting_heuristic\n\n\ndef better_bird_in_hand_than_two_in_the_bush_admissible(state, problem):\n \"\"\" PDB Heuristic, considers each goal as an individual subproblem and\n returns the max of the sub-problems as the current heuristi values\n\n ((tuple, (tuple, tuple, ...)), SearchProblem) -> int\n\n @type problem: SearchProblem\n @type state: (tuple, (tuple, tuple, ...))\n \"\"\"\n position, yellow_birds = state\n heuristic_value = 0\n for yb in yellow_birds:\n rb_yb_manh_dist = abs(position[0] - yb[0]) + abs(position[1] - yb[1])\n if heuristic_value < rb_yb_manh_dist:\n heuristic_value = rb_yb_manh_dist\n return heuristic_value\n\nbbih_adm = better_bird_in_hand_than_two_in_the_bush_admissible\n\n\ndef better_bird_in_hand_than_two_in_the_bush_inadmissible(state, problem):\n position, yellow_birds = state\n heuristic_value = 0\n for yb in yellow_birds:\n rb_yb_manh_dist = abs(position[0] - yb[0]) + abs(position[1] - yb[1])\n heuristic_value += rb_yb_manh_dist\n return heuristic_value\n\n\nbbih_inadm = better_bird_in_hand_than_two_in_the_bush_inadmissible\n\n\ndef all_your_birds_belong_to_me(state, problem):\n\n position, yellow_birds = state\n heuristic_value = 0\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n return heuristic_value\n\naybbtm = all_your_birds_belong_to_me\n","sub_path":"code/heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"88522232","text":"import rospy\nfrom std_msgs.msg import Int16\nimport time\n\n\ndef main():\n rospy.init_node('motor_left_publisher')\n\n pub_motor_left = rospy.Publisher('motor/dps/left', Int16, queue_size=1)\n\n msg = Int16()\n # dps 35 low, max 460\n while(True):\n msg.data = input(\"speed: \")\n print(msg.data)\n pub_motor_left.publish(msg)\n\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/odm.py","file_name":"odm.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"551430678","text":"import re\n\nmy_dict = {}\n\nwith open(\"text_6.txt\", \"r\", encoding=\"utf-8\") as content:\n for line in content.readlines():\n res_1 = line.split()\n res_2 = [el for el in res_1 if el != \"-\"]\n regex_num = re.compile('\\d+')\n res_3 = regex_num.findall(line)\n time = 0\n for el in res_3:\n time = time + int(el)\n my_dict[res_1[0]] = time\nprint(my_dict)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Lesson5ex6.py","file_name":"Lesson5ex6.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"119909226","text":"#!/usr/bin/python3\nimport csv\nimport glob\nimport os\nimport re\nimport datetime\nimport itertools\nimport json\nimport numpy as np\nimport subprocess\nimport sys\nimport socket\nimport copy\nfrom bs4 import BeautifulSoup\nfrom distutils.dir_util import copy_tree\n\nfrom sofa_print import *\n\ndef ds_preprocess(cfg):\n from sofa_preprocess import sofa_preprocess\n from sofa_analyze import sofa_analyze\n\n save_logdir = cfg.logdir\n # ds_logpath = cfg.logdir + \"ds_finish/\"\n # os.chdir(ds_logpath)\n\n nodes_record_dir = []\n for dir in filter(lambda x: os.path.isdir(x), os.listdir('.')):\n if dir.find('_sofalog') == -1:\n continue\n nodes_record_dir.append(dir)\n\n sofa_timebase_min = sys.maxsize\n for i in range(len(nodes_record_dir)):\n time_fd = open('%s/sofa_time.txt' % nodes_record_dir[i])\n unix_time = time_fd.readline()\n unix_time.rstrip()\n unix_time = float(unix_time)\n\n # get minimum timebase among sofalog directories\n sofa_timebase_min = min(sofa_timebase_min, unix_time)\n\n for i in range(len(nodes_record_dir)):\n time_fd = open('%s/sofa_time.txt' % nodes_record_dir[i])\n unix_time = time_fd.readline()\n unix_time.rstrip()\n unix_time = float(unix_time)\n cfg.cpu_time_offset = 0\n if (unix_time > sofa_timebase_min):\n basss = float(sofa_timebase_min) - float(unix_time)\n if basss < -28700:\n basss += 28800\n cfg.cpu_time_offset = basss\n # cfg.cpu_time_offset = float(sofa_timebase_min) - float(unix_time)\n print(\"%s, %f\" % (nodes_record_dir[i], cfg.cpu_time_offset))\n\n cfg.logdir = './' + str(nodes_record_dir[i]) + '/'\n sofa_preprocess(cfg)\n sofa_analyze(cfg)\n\n # pid2y_pos_dic = ds_connect_preprocess(cfg)\n # dds_calc_topic_latency(cfg)\n #ds_dds_create_span(cfg)\n\ndef ds_viz(cfg):\n nodes_record_dir = []\n for dir in filter(lambda x: os.path.isdir(x), os.listdir('.')):\n if dir.find('_sofalog') == -1:\n continue\n nodes_record_dir.append(dir)\n\n local = os.path.basename(os.path.dirname(cfg.logdir))\n idx = nodes_record_dir.index(local)\n nodes_record_dir.pop(idx)\n\n master = BeautifulSoup(open(os.path.join(cfg.logdir, 'index.html')), 'html.parser')\n with open(os.path.join(local, 'timeline.js')) as f:\n sofa_fig_highchart = f.read()\n sidenav = master.find('div', {'class': 'sidenav'})\n subtitle = master.new_tag('figcaption', **{'class': 'sidenav-element-title'})\n subtitle.string = socket.gethostname()\n sidenav.insert(0, subtitle)\n\n for i, dir in enumerate(nodes_record_dir, 2):\n hostname = re.sub('_sofalog$', '', dir)\n subtitle = master.new_tag('figcaption', **{'class': 'sidenav-element-title'})\n subtitle.string = hostname\n sidenav.append(subtitle)\n \n # sofa figure\n sofa_fig = BeautifulSoup('''\n \n
\n
\n ''')\n\n sofa_fig.a['name'] = 'e' + str(i)\n sofa_fig.div['id'] = 'container' + str(i)\n new_sofa_fig_highchart = sofa_fig_highchart.replace('container',\n 'container' + str(i))\n sofa_fig_sidenav = BeautifulSoup('''\n Functions and Events\n ''')\n sofa_fig_sidenav.a['href'] = '#e' + str(i)\n sidenav.append(sofa_fig_sidenav.a)\n\n report = master.new_tag('script', src=os.path.join(dir, 'report.js'))\n timeline = master.new_tag('script', src=os.path.join(dir, 'timeline.js'))\n\n master.body.append(sofa_fig.a)\n master.body.append(report)\n master.body.append(timeline)\n \n copied_sofalog_dir = os.path.join(local, dir)\n copy_tree(dir, copied_sofalog_dir)\n with open(os.path.join(copied_sofalog_dir, 'timeline.js'), 'w') as f:\n f.write(new_sofa_fig_highchart)\n \n # Plotly\n sofa_plotly = master.find('a', attrs={'name':'n'})\n sofa_plotly = copy.copy(sofa_plotly)\n sofa_plotly['name'] = 'n' + str(i)\n sofa_plotly.div['id'] = 'main_net' + str(i)\n sofa_plotly.script.string = sofa_plotly.script.string.replace('netbandwidth.csv',\n os.path.join(dir, 'netbandwidth.csv'))\n sofa_plotly.script.string = sofa_plotly.script.string.replace('main_net', 'main_net' + str(i))\n sofa_plotly_sidenav = BeautifulSoup('''\n Network Utilization\n ''')\n sofa_plotly_sidenav.a['href'] = '#n' + str(i)\n sidenav.append(sofa_plotly_sidenav.a)\n\n master.body.append(sofa_plotly)\n\n print(master.prettify())\n\n with open(os.path.join(cfg.logdir, 'index.html'), 'w') as f:\n f.write(master.prettify())","sub_path":"plugins/ros2_performance_test/sofa_ros2_ds.py","file_name":"sofa_ros2_ds.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"210223040","text":"def setup():\n global MEGAMAN_POSITION, PAGE, LEVEL, TIMER, RESET_TIMER, DEATHS, GRAVITY, LEFT_PRESSED, FLOOR\n global RIGHT_PRESSED, UP_PRESSED, stars, rectx, recty, title_background, instructions_background, logo\n global zero_deaths, megaman_spawn_points, on_screen_bullet, bulletX, bulletY, bullet_size, bullet_timer, draw_bullet\n global unkillable, unkillable_enemies, unkillable_enemies_size, megaman_size, unkillable_enemies_spawn_points\n global in_air, airtime, floor_collision, saw1_X, saw1_Y, saw1_up, saw_size, secret, unkillable_enemy1_up, unkillable_enemy2_up\n size(1280, 480)\n import random\n stars = []\n add_star = 0\n # while loop is used instead of a for loop due to the lack of while loops in the \n # program and the lack of places where a while loop would be optimal\n while add_star < 201:\n stars.append([random.randint(1, 1280), random.randint(1, 350)])\n add_star += 1\n noStroke()\n FLOOR = [300, 300]\n MEGAMAN_POSITION = [50, FLOOR[0]]\n PAGE = 1\n LEVEL = 1\n TIMER = [151, 101]\n RESET_TIMER = TIMER[:]\n DEATHS = 0\n GRAVITY = 5\n in_air = False\n airtime = 0\n floor_collision = False\n LEFT_PRESSED, RIGHT_PRESSED, UP_PRESSED = False, False, False\n bulletX = MEGAMAN_POSITION[0]\n bulletY = MEGAMAN_POSITION[1]\n bullet_timer = 0\n bullet_size = 10\n rectx = 200\n recty = 100\n saw1_X = 1000\n saw1_Y = 100\n saw1_up = False\n unkillable_enemy1_up, unkillable_enemy2_up = False, False\n saw_size = 100\n megaman_spawn_points = [50, FLOOR[0]]\n title_background = loadImage('title_screen.jpg')\n title_background.resize(1280, 480)\n instructions_background = loadImage('instructions_background.png')\n instructions_background.resize(1280, 480)\n logo = loadImage('megaman_logo.png')\n logo.resize(400, 180)\n on_screen_bullet = False\n draw_bullet = False\n zero_deaths = loadImage('zero_deaths.jpeg')\n zero_deaths.resize(75, 75)\n megaman_size = 50\n unkillable_enemies_size = 50\n unkillable = loadImage('unkillable.png')\n unkillable.resize(unkillable_enemies_size, unkillable_enemies_size)\n unkillable_enemies = [[800, FLOOR[0]], [700, 250], [1000, 300]]\n unkillable_enemies_spawn_points = [[800, FLOOR[0]], [700, 250], [1000, 300]]\n secret = False\n screen = 1\n \n# Used to track user inputs and move megaman accordingly\ndef keyPressed():\n global LEFT_PRESSED, RIGHT_PRESSED, UP_PRESSED, MEGAMAN_POSITION, on_screen_bullet, in_air, secret\n if keyCode == RIGHT:\n RIGHT_PRESSED = True\n elif keyCode == LEFT:\n LEFT_PRESSED = True\n if keyCode == UP:\n in_air = True\n if keyCode == 32:\n on_screen_bullet = True\n if keyCode == 77 and PAGE == 6:\n secret = True\n'''\nTESTING ONLY\n if keyCode == DOWN:\n MEGAMAN_POSITION[1] += 3\n'''\n\n\ndef keyReleased():\n global LEFT_PRESSED, RIGHT_PRESSED, UP_PRESSED\n if keyCode == RIGHT:\n RIGHT_PRESSED = False\n elif keyCode == LEFT:\n LEFT_PRESSED = False\n\n\n# Used to check the mouse's location. This is used on the intro screen \n# to either take the player to the main game or to the instructions\ndef mousePressed():\n global rectx, recty, PAGE\n if PAGE == 1 and mouseX > width/4-(rectx/2) and mouseX < width/4+(rectx/2) and mouseY > height-200 and mouseY width-2*rectx and mouseX < width-rectx and mouseY > height-200 and mouseY width/2-rectx/2 and mouseX < width/2+rectx/2 and mouseY > height-125 and mouseY < height-125+recty:\n PAGE = 3\n if PAGE == 2 and mouseX > 1075 and mouseX < 1175 and mouseY > 200 and mouseY < 210:\n PAGE = 6\n\n\ndef page1():\n background(title_background)\n image(logo, width/2-200, height-400)\n # Code for start button. Changes color to a darker shade of green if the\n # mouse hovers over it\n fill(0, 255, 0)\n rect(width/4-(rectx/2), height-200, rectx, recty, 5)\n textSize(16)\n fill(255)\n text(\"Click here to Start\", rectx+50, height-recty-40)\n\n if mouseX > width/4-(rectx/2) and mouseX < width/4+(rectx/2) and mouseY > height-200 and mouseY < height-recty:\n fill(0,128,0)\n rect(width/4-(rectx/2), height-200, rectx, recty, 5)\n fill(255)\n textSize(16) \n text(\"Click here to Start\", rectx+50, height-recty-40)\n \n # Code for Instructions button. Changes color to a darker shade of red\n # if the mouse hovers over it\n fill(255, 0, 0)\n rect(width-2*rectx, height-200, rectx, recty, 5) \n textSize(14) \n fill(255)\n text(\"Click here for Instructions\", width/2+rectx+55, height-recty-40)\n if mouseX > width-2*rectx and mouseX < width-rectx and mouseY > height-200 and mouseY 1075 and mouseX < 1175 and mouseY > 200 and mouseY < 210:\n fill(0)\n text(\"(UNKILLABLE)\", 1075, 210)\n\n \n# Code for start button in the instructions menu\n fill(255,255,0)\n rect(width/2-rectx/2, height-125, rectx, recty, 5) \n fill(218,112,214)\n text(\"Click here to Start\", width/2-rectx+130, height/2+recty+75)\n if mouseX > width/2-rectx/2 and mouseX < width/2+rectx/2 and mouseY > height-125 and mouseY < height-125+recty:\n fill(255, 219, 88)\n rect(width/2-rectx/2, height-125, rectx, recty, 5) \n fill(218,112,214)\n text(\"Click here to Start\", width/2-rectx+130, height/2+recty+75)\n\n\ndef draw_stars():\n import random\n fill(255)\n for star in range(len(stars)):\n ellipse(stars[star][0], stars[star][1], 5, 5)\n \n for star in range(len(stars)):\n stars[star][0] += 0.1\n \n #remove stars\n for star in range(len(stars)-1, 0, -1):\n if stars[star-1][0] >= width + 5:\n stars.pop(star-1)\n \n #add new star every second\n if frameCount % 60 == 0:\n stars.append([0, random.randint(1, height)])\n\n\ndef level1_spikes(x, y, mid):\n global MEGAMAN_POSITION, DEATHS, unkillable_enemies, TIMER, bullet_timer, on_screen_bullet, draw_bullet\n noFill()\n rect(x, y-2*mid, 1.35*x, 2*mid)\n fill(211,211,211)\n for spike in range(10):\n triangle(x, y, x+2*mid, y, x+mid, y-mid*2)\n x += 2*mid\n\n # Hitbox for spikes. NOTE: Since the player is not able to clip through the \n # floor, the collision continues below the spikes all the way to infinity\n if MEGAMAN_POSITION[0] > x/3.5 and MEGAMAN_POSITION[0] < x and MEGAMAN_POSITION[1] > y/1.25: \n level1_reset()\n\n\ndef level1_saw(x, y):\n global MEGAMAN_POSITION, DEATHS, TIMER, saw1_Y, saw1_up, bullet_timer, on_screen_bullet, draw_bullet\n # draw saw\n fill(211,211,211)\n ellipse(x, y, saw_size, saw_size)\n # saw movement\n if saw1_up == False:\n saw1_Y += 1\n if saw1_Y >= 300:\n saw1_up = True\n if saw1_up == True:\n saw1_Y -= 1\n if saw1_Y < 50:\n saw1_up = False\n \n # saw hitbox\n if MEGAMAN_POSITION[0]+megaman_size > saw1_X and MEGAMAN_POSITION[0] < saw1_X + saw_size and MEGAMAN_POSITION[1] < saw1_Y + saw_size/2 and MEGAMAN_POSITION[1] > saw1_Y - saw_size:\n level1_reset()\n\n\ndef unkillable_enemy1_hitbox():\n # If the player hits the enemy, they die, the level resets, and the death counter increments\n global MEGAMAN_POSITION, DEATHS, TIMER, unkillable_enemies, bullet_timer, on_screen_bullet, draw_bullet\n if MEGAMAN_POSITION[0]+megaman_size/1.25 >= unkillable_enemies[0][0] and MEGAMAN_POSITION[0]/0.99 <= unkillable_enemies[0][0]+unkillable_enemies_size and MEGAMAN_POSITION[1] > unkillable_enemies[0][1]-unkillable_enemies_size and MEGAMAN_POSITION[1] < unkillable_enemies[0][1]+unkillable_enemies_size:\n level1_reset()\n \n # If the bullet hits this enemy, the bullet despawns and the enemy is left untouched\n if bulletX >= unkillable_enemies[0][0] and bulletX <= unkillable_enemies[0][0]+unkillable_enemies_size and bulletY <= unkillable_enemies[0][1]+bullet_size and bulletY > unkillable_enemies[0][1]-bullet_size:\n bullet_timer = 0\n on_screen_bullet = False\n draw_bullet = False\n \n\ndef unkillable_enemy2_hitbox():\n global MEGAMAN_POSITION, DEATHS, TIMER, unkillable_enemies, bullet_timer, on_screen_bullet, draw_bullet\n if MEGAMAN_POSITION[0]+megaman_size/1.25 >= unkillable_enemies[1][0] and MEGAMAN_POSITION[0]/0.99 <= unkillable_enemies[1][0]+unkillable_enemies_size and MEGAMAN_POSITION[1] > unkillable_enemies[1][1]-unkillable_enemies_size and MEGAMAN_POSITION[1] < unkillable_enemies[1][1]+unkillable_enemies_size:\n level2_reset()\n\n if bulletX >= unkillable_enemies[1][0] and bulletX <= unkillable_enemies[1][0]+unkillable_enemies_size and bulletY <= unkillable_enemies[1][1]+bullet_size and bulletY > unkillable_enemies[1][1]-bullet_size:\n bullet_timer = 0\n on_screen_bullet = False\n draw_bullet = False\n \n\ndef unkillable_enemy3_hitbox():\n global MEGAMAN_POSITION, DEATHS, TIMER, unkillable_enemies, bullet_timer, on_screen_bullet, draw_bullet\n if MEGAMAN_POSITION[0]+megaman_size/1.25 >= unkillable_enemies[2][0] and MEGAMAN_POSITION[0]/0.99 <= unkillable_enemies[2][0]+unkillable_enemies_size and MEGAMAN_POSITION[1] > unkillable_enemies[2][1]-unkillable_enemies_size and MEGAMAN_POSITION[1] < unkillable_enemies[2][1]+unkillable_enemies_size:\n level2_reset()\n \n if bulletX >= unkillable_enemies[2][0] and bulletX <= unkillable_enemies[2][0]+unkillable_enemies_size and bulletY <= unkillable_enemies[2][1]+bullet_size and bulletY > unkillable_enemies[2][1]-bullet_size:\n bullet_timer = 0\n on_screen_bullet = False\n draw_bullet = False \n \n \ndef level1():\n global LEVEL, PAGE, TIMER, MEGAMAN_POSITION, unkillable_enemies, bullet_timer, on_screen_bullet, draw_bullet\n background(0)\n draw_stars()\n fill(255)\n text(\"DEATHS:\" + str(DEATHS), 10, 15)\n text(\"TIMER:\" + str(int(TIMER[0])), 100, 15)\n rect(0, 350, width, height)\n fill(255,0,0)\n rect(MEGAMAN_POSITION[0], MEGAMAN_POSITION[1], megaman_size, megaman_size)\n level1_spikes(150, 350, 10)\n level1_saw(saw1_X, saw1_Y)\n # Unkillable enemy and its hitbox\n image(unkillable, unkillable_enemies[0][0], unkillable_enemies[0][1])\n unkillable_enemies[0][0] -= 1\n unkillable_enemy1_hitbox()\n TIMER[0] -= 0.1\n \n if TIMER[0] <= 0:\n level1_reset()\n \n if draw_bullet == True:\n fill(255)\n rect(bulletX, bulletY, bullet_size, bullet_size)\n \n if MEGAMAN_POSITION[0] > 1200:\n LEVEL = 2\n draw_bullet = False\n on_screen_bullet = False\n bullet_timer = 0\n MEGAMAN_POSITION = megaman_spawn_points[:]\n\n \ndef level2():\n global LEVEL, PAGE, TIMER, MEGAMAN_POSITION, unkillable_enemies, unkillable_enemy1_up, unkillable_enemy2_up, bullet_timer, on_screen_bullet, draw_bullet\n background(0)\n draw_stars()\n fill(255)\n text(\"DEATHS:\" + str(DEATHS), 10, 15)\n text(\"TIMER:\" + str(int(TIMER[1])), 100, 15)\n rect(0, 350, width, height)\n fill(255,0,0)\n rect(MEGAMAN_POSITION[0], MEGAMAN_POSITION[1], megaman_size, megaman_size)\n image(unkillable, unkillable_enemies[1][0], unkillable_enemies[1][1])\n image(unkillable, unkillable_enemies[2][0], unkillable_enemies[2][1])\n \n # Moves the first unkillable enemy up and down\n if unkillable_enemy1_up == False:\n unkillable_enemies[1][1] += 4\n if unkillable_enemies[1][1] >= 300:\n unkillable_enemy1_up = True\n if unkillable_enemy1_up == True:\n unkillable_enemies[1][1] -= 4\n if unkillable_enemies[1][1] < 10:\n unkillable_enemy1_up = False\n\n # Moves the second unkillable enemy up and down\n if unkillable_enemy2_up == False:\n unkillable_enemies[2][1] += 4\n if unkillable_enemies[2][1] >= 300:\n unkillable_enemy2_up = True\n if unkillable_enemy2_up == True:\n unkillable_enemies[2][1] -= 6\n if unkillable_enemies[2][1] < 10:\n unkillable_enemy2_up = False\n\n unkillable_enemy2_hitbox()\n unkillable_enemy3_hitbox()\n TIMER[1] -= 0.1\n \n if TIMER[1] <= 0:\n level2_reset()\n \n if draw_bullet == True:\n fill(255)\n rect(bulletX, bulletY, bullet_size, bullet_size)\n \n ''' \n if MEGAMAN_POSITION[0] > 1200:\n LEVEL = 3\n draw_bullet = False\n on_screen_bullet = False\n bullet_timer = 0\n ''' \n \ndef level1_reset():\n global DEATHS, TIMER, MEGAMAN_POSITION, unkillable_enemies, unkillable_enemies_spawn_points, megaman_spawn_points, bullet_timer, on_screen_bullet, draw_bullet\n DEATHS += 1\n TIMER[0] = RESET_TIMER[0]\n MEGAMAN_POSITION = megaman_spawn_points[:]\n unkillable_enemies[0][0] = unkillable_enemies_spawn_points[0][0]\n bullet_timer = 0\n on_screen_bullet = False\n draw_bullet = False\n\n\ndef level2_reset():\n global DEATHS, TIMER, MEGAMAN_POSITION, unkillable_enemies, unkillable_enemies_spawn_points, megaman_spawn_points, bullet_timer, on_screen_bullet, draw_bullet\n DEATHS += 1\n TIMER[1] = RESET_TIMER[1]\n MEGAMAN_POSITION = megaman_spawn_points[:]\n unkillable_enemies[1][1] = unkillable_enemies_spawn_points[1][1]\n unkillable_enemies[2][1] = unkillable_enemies_spawn_points[2][1]\n bullet_timer = 0\n on_screen_bullet = False\n draw_bullet = False\n \n \ndef physics():\n global LEVEL, PAGE, MEGAMAN_POSITION, bulletX, bulletY, on_screen_bullet, bullet_timer, draw_bullet\n global in_air, airtime\n if RIGHT_PRESSED == True:\n MEGAMAN_POSITION[0] += 3\n if LEFT_PRESSED == True and MEGAMAN_POSITION[0] > 0:\n MEGAMAN_POSITION[0] -= 3\n if in_air == True:\n airtime += 0.2\n if airtime > 10:\n MEGAMAN_POSITION[1] += 1\n if LEVEL == 1 and MEGAMAN_POSITION[1] > FLOOR[0]: \n airtime = 0\n in_air = False\n MEGAMAN_POSITION[1] = FLOOR[0]\n if LEVEL == 2 and MEGAMAN_POSITION[1] > FLOOR[1]: \n airtime = 0\n in_air = False\n MEGAMAN_POSITION[1] = FLOOR[0]\n else:\n MEGAMAN_POSITION[1] -= 2\n\n if on_screen_bullet == False:\n bulletX = MEGAMAN_POSITION[0]\n bulletY = MEGAMAN_POSITION[1]\n \n if on_screen_bullet == True:\n draw_bullet = True\n bulletX += 8\n bullet_timer += 1\n if bullet_timer > 100:\n bulletX = MEGAMAN_POSITION[0]\n bulletY = MEGAMAN_POSITION[1]\n draw_bullet = False\n on_screen_bullet = False\n bullet_timer = 0\n\n\ndef page3():\n physics() \n if LEVEL == 1:\n level1() \n if LEVEL == 2:\n level2()\n\ndef page6():\n global PAGE\n background(0)\n fill(255)\n text(\"Hey uh you're not supposed to be here so just reset the program and go back ok?\", width/2-400, height/2)\n if secret == True:\n PAGE = 4 #Warps to boss fight\n\ndef draw():\n global DEATHS\n if PAGE == 1:\n page1()\n DEATHS = 0\n elif PAGE == 2:\n page2()\n elif PAGE == 3:\n page3()\n elif PAGE == 4:\n pass\n elif PAGE == 6:\n page6()\n","sub_path":"beta/beta.pyde","file_name":"beta.pyde","file_ext":"pyde","file_size_in_byte":16018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"491339267","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.10.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nfrom typing import Tuple, List\nfrom functools import partial\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, random_split, Dataset, RandomSampler\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.nn import functional as F\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning import loggers as pl_loggers\nfrom sklearn import model_selection, preprocessing\nfrom sklearn.metrics import roc_auc_score\nimport numpy as np\nimport torchvision.models as models\nfrom collections import Counter\nimport pickle\nimport os\nimport transformers\nfrom tqdm import tqdm\nfrom transformers import BertTokenizer, BertModel, AdamW,BertPreTrainedModel\nimport pandas as pd\nos.environ[\"TORCH_HOME\"] = \"/media/hdd/Datasets\"\n# -\n\nimport torchsnooper as tp\n\n# # Verifying the data\n\ndata_path = \"/media/hdd/Datasets/jigsaw/\"\n\nos.listdir(data_path)\n\ndf = pd.read_csv(data_path+\"train.csv\", engine = \"python\")\n\ndf.head(3)\n\ndf.shape\n\n\n# # Create model\n\n# +\nclass LitModel(pl.LightningModule):\n def __init__(self, num_classes, num_train_steps, learning_rate=2e-4):\n super().__init__()\n \n # log hyperparameters\n self.save_hyperparameters()\n self.learning_rate = learning_rate\n self.bert = transformers.BertModel.from_pretrained(\n \"bert-base-uncased\" , return_dict = False\n )\n self.bert_drop = nn.Dropout(.3)\n self.num_classes= num_classes\n self.out = nn.Linear(self.bert.config.hidden_size, self.num_classes)\n self.num_train_steps = num_train_steps\n self.acc = pl.metrics.PrecisionRecallCurve(num_classes=self.num_classes)\n \n# @tp.snoop()\n def forward(self, ids, mask = None , token_type_ids = None):\n _, x = self.bert(ids, mask, token_type_ids)\n x = self.bert_drop(x)\n x = self.out(x)\n x = torch.sigmoid(x)\n return x\n# @tp.snoop()\n\n def training_step(self, train_batch, batch_idx):\n# i,m,to,ta= train_batch['ids'] , train_batch['mask'],train_batch['token_type_ids'], train_batch['targets']\n x,y = train_batch\n logits = self(x)\n loss = nn.BCEWithLogitsLoss()(logits,y)\n# print(loss)\n# acc = self.er(logits,ta.view(-1,1))\n \n# self.log('train_acc_step', self.acc())\n self.log('train_loss', loss)\n return loss\n# return acc , loss\n \n def test_step(self, test_batch, batch_idx):\n x,y = train_batch\n logits = self(x)\n loss = nn.BCEWithLogitsLoss()(logits,y)\n# acc = self.er(logits,ta.view(-1,1))\n# self.log('test_acc_step', acc)\n self.log('test_loss', loss)\n return loss\n# return acc , loss\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n\n\n# -\n\n# # Load data\n\n# +\n# id \tcomment_text \ttoxic \tsevere_toxic \tobscene \tthreat \tinsult \tidentity_hate\n\n# +\nclass SentiDs:\n def __init__(self, dataframe, max_len = 64):\n self.tokenizer = transformers.BertTokenizer.from_pretrained(\n \"bert-base-uncased\", do_lower_case = False)\n #run once\n# self.X = []\n# self.Y = []\n# for i, (row) in tqdm(dataframe.iterrows()):\n# x, y = self.row_to_tensor(self.tokenizer,row)\n# self.X.append(x)\n# self.Y.append(y)\n \n# with open(\"x_saved.pkl\",\"wb+\") as f:\n# pickle.dump(self.X,f)\n \n# with open(\"y_saved.pkl\",\"wb+\") as f:\n# pickle.dump(self.Y,f)\n\n #comment above and run these next time\n \n with open(\"x_saved.pkl\",\"rb+\") as f:\n self.X = pickle.load(f)\n \n with open(\"y_saved.pkl\",\"rb+\") as f:\n self.Y = pickle.load(f)\n print(\"Loaded\")\n \n self.max_len= max_len\n \n @staticmethod\n def row_to_tensor(tokenizer: BertTokenizer, row: pd.Series) -> Tuple[torch.LongTensor, torch.LongTensor]:\n tokens = tokenizer.encode(row[\"comment_text\"], add_special_tokens=True)\n if len(tokens) > 120:\n tokens = tokens[:119] + [tokens[-1]]\n x = torch.LongTensor(tokens)\n y = torch.FloatTensor(row[[\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]])\n return x, y\n \n def __len__(self):\n return len(self.X)\n \n def __getitem__(self, idx):\n return self.X[idx], self.Y[idx]\n\n\n# -\n\ndef collate_fn(batch: List[Tuple[torch.LongTensor, torch.LongTensor]]) \\\n -> Tuple[torch.LongTensor, torch.LongTensor]:\n x, y = list(zip(*batch))\n x = pad_sequence(x, batch_first=True, padding_value=0)\n y = torch.stack(y)\n return x,y\n\n\nclass CSVDataModule(pl.LightningDataModule):\n def __init__(self, batch_size,data_dir):\n super().__init__()\n self.data_dir = data_dir\n self.batch_size = batch_size\n\n def setup(self, stage=None):\n # build dataset\n df = pd.read_csv(self.data_dir+\"train.csv\", engine = \"python\")\n print(df.dtypes)\n \n # split dataset\n self.train, self.test = model_selection.train_test_split(df, test_size = 0.2, random_state = 42)\n print(len(self.train) , len(self.test))\n \n def train_dataloader(self):\n md = SentiDs(self.train)\n train_sampler = RandomSampler(self.train)\n return DataLoader(md, batch_size=self.batch_size, num_workers=12, collate_fn=collate_fn,sampler = train_sampler)\n\n def test_dataloader(self):\n md = SentiDs(self.test)\n test_sampler = RandomSampler(self.test)\n return DataLoader(md, batch_size=self.batch_size, num_workers=12, collate_fn=collate_fn,sampler = test_sampler)\n\n\ndef on_batch_end(self):\n if self.sched is not None:\n self.sched.step()\n\n\n# # Model\n\nEPOCHS = 1\nbatch_size = 20\nnum_classes = 6\nn_train_steps = int(len(df)/batch_size*EPOCHS)\n\ndm = CSVDataModule(batch_size=batch_size, data_dir= \"/media/hdd/Datasets/jigsaw/\")\ndm.setup()\n\nmodel = LitModel(num_classes = num_classes , num_train_steps = n_train_steps)\n\nlogger = pl_loggers.CSVLogger(\"logs\", name=\"eff-b5\")\n\ntrainer = pl.Trainer(auto_select_gpus=True, gpus=1,\n precision=16, profiler=False,max_epochs=EPOCHS,\n callbacks = [pl.callbacks.ProgressBar()],\n automatic_optimization=True,enable_pl_optimizer=True,\n accelerator='ddp',\n plugins='ddp_sharded',\n logger = logger)\n\ntrainer.fit(model, dm)\n\ntrainer.test()\n\ntrainer.save_checkpoint('model1.ckpt')\n\n\n\n# # Inference\n\nbest_checkpoints = trainer.checkpoint_callback.best_model_path\n\npre_model = LitModel.load_from_checkpoint(checkpoint_path= best_checkpoints).to(\"cuda\")\n\npre_model.eval()\npre_model.freeze()\n\ntokenizer_inf= transformers.BertTokenizer.from_pretrained(\n \"bert-base-u`ncased\", do_lower_case = False)\n\nmapping = [\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\nmapping_d = {i:mapping[i] for i in range(len(mapping))}\n\ninp = \"you are an idiot dude\"\ninp = tokenizer_inf.encode(inp, add_special_tokens=True); print(inp)\npr = torch.Tensor(inp).unsqueeze(0).long();pr\n# print(tokenizer_inf.pad_token_id)\npr = pad_sequence(pr, batch_first=True, padding_value=tokenizer_inf.pad_token_id).to(\"cuda\")\noutput = pre_model(pr); output\n\nmapping_d[int(torch.argmax(output))]\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"applications/jigsawToxicCommentClassification/textClass_multiLabel.py","file_name":"textClass_multiLabel.py","file_ext":"py","file_size_in_byte":7681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"612732713","text":"import numpy as np\nimport os\n\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass FruitDataset(Dataset):\n def __init__(self, path):\n self.path = path\n self.class_labels = os.listdir(path)\n self.class_counts = [len(os.listdir(path + '/' + d))\n for d in os.listdir(path)]\n self.cum_class_counts = np.cumsum(self.class_counts)\n self.transform = transforms.ToTensor()\n\n def __len__(self):\n return sum(self.class_counts)\n\n def __getitem__(self, item):\n # Correct class is the one for which `item` divided by cumulative sum\n # of class counts and floored is 0 for the first time. We want to keep\n # the class label as integer\n y = np.sum((item // self.cum_class_counts) > 0)\n label = self.class_labels[y]\n\n # Pick correct filename\n if y == 0:\n filename = os.listdir(self.path + '/' + label)[item]\n else:\n filename = os.listdir(\n self.path + '/' + label\n )[item - self.cum_class_counts[y - 1]]\n\n img = Image.open(self.path + '/' + label + '/' + filename)\n img = self.transform(img)\n return {'image': img, 'y': y, 'filename': filename}\n","sub_path":"task_1_convnet/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"364972083","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('OPENING CLOSING.JPG',0)\nkernel = np.ones((5,5),np.uint8)\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n\nplt.subplot(121),plt.imshow(img, cmap = 'gray')\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(122),plt.imshow(opening, cmap = 'gray')\nplt.title('Opening'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n","sub_path":"IMG LAB FINAL/8th_lab_26_oct/opening.py","file_name":"opening.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"284523178","text":"#-*- coding: utf-8 -*-\n\"\"\"\nCreated on Tues Jul 30 3:31:55 2019\n@author: Ahil Srikrishnar\n\"\"\"\n\nimport scrapy\nfrom IMDBSpider.items import MovieItem\n\n\nclass ImdbspiderSpider(scrapy.Spider):\n name = 'imdbspider'\n allowed_domains = ['imdb.com']\n start_urls = ['https://www.imdb.com/chart/top?ref_=nv_mv_250']\n\n def parse(self, response):\n links = response.xpath(\"//td[@class='titleColumn']/a/@href\").extract()\n i = 1\n for link in links:\n abs_url = response.urljoin(link)\n rating = response.xpath(\"//td[@class='ratingColumn imdbRating']/strong/text()[\" + str(i) + \"]\").extract()\n if (i <= len(links)):\n i = i + 1\n yield scrapy.Request(abs_url, callback=self.parse_indetail, meta={'rating' : rating})\n \n def parse_indetail(self, response):\n item = MovieItem()\n item['title'] = response.xpath(\"//div[@class='title_wrapper']/h1/text()\").extract()[0][:-1]\n item['directors'] = response.xpath(\"//div[@class='credit_summary_item']/a/text()\").extract()[0]\n item['writers'] = response.xpath(\"//div[@class='credit_summary_item'][2]/a/text()\").extract()\n item['stars'] = response.xpath(\"//div[@class='credit_summary_item'][3]/a[position()<=3]/text()\").extract()\n item['popularity'] = response.xpath(\"//span[@class='subText']/text()\").extract()[6][21:-8]\n \n return item","sub_path":"IMDBSpider/IMDBSpider/spiders/imdbspider.py","file_name":"imdbspider.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"178890376","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# =============================================================================\n# Filename: edit_post.py\n# Author: Steve Tautonico\n# Date Created: 5/1/2019\n# Date Last Modified: 5/1/2019\n# Python Version: 3.6 - 3.7\n# =============================================================================\n\"\"\"Handles editing posts\"\"\"\n# =============================================================================\n# Imports\n# =============================================================================\nfrom flask import Blueprint, render_template, redirect, abort, flash, request, url_for\nfrom flask_login import current_user, login_required\n\nfrom app import db\nfrom app.models import Post\nfrom app.forms import EditPost\n\nblueprint = Blueprint(\"edit_post\", __name__)\n\n\n@blueprint.route('/edit_post/', methods=[\"GET\", \"POST\"])\n@login_required\ndef edit_post(id):\n post = Post.query.filter_by(id=id).first()\n if not post:\n abort(404)\n if post.user_id != current_user.id:\n flash(\"You can't edit someone else's post\", \"danger\")\n return redirect(url_for(\"post.post\", id=id))\n form = EditPost()\n if form.validate_on_submit():\n post.title = form.title.data\n post.description = form.description.data\n db.session.commit()\n flash(\"Post edited successfully\", \"success\")\n return redirect(url_for('post.post', id=id))\n elif request.method == \"GET\":\n form.title.data = post.title\n form.description.data = post.description\n return render_template(\"edit_post.html\", title=\"Edit Post\", form=form, post=post)","sub_path":"app/controllers/edit_post.py","file_name":"edit_post.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"215353822","text":"import adsk.core, adsk.fusion, adsk.cam, traceback\n\nfrom . import Fusion360CommandBase\n\ndef getInputs(command, inputs):\n selectionInput = None\n for inputI in inputs:\n global commandId\n if inputI.id == command.parentCommandDefinition.id + '_selection':\n selectionInput = inputI\n elif inputI.id == command.parentCommandDefinition.id + '_plane':\n planeInput = inputI\n elif inputI.id == command.parentCommandDefinition.id + '_spacing':\n spacingInput = inputI\n spacing = spacingInput.value\n elif inputI.id == command.parentCommandDefinition.id + '_edge':\n edgeInput = inputI\n\n objects = getSelectedObjects(selectionInput)\n plane = getSelectedObjects(planeInput)[0]\n\n edge = False\n # edge = adsk.fusion.BRepEdge.cast(edgeInput.selection(0).entity)\n\n # if not objects or len(objects) == 0:\n # TODO this probably requires much better error handling\n # return\n # return(objects, plane, edge, spacing, subAssy)\n return plane # (objects, plane, edge, spacing)\n\ndef getSelectedObjects(selectionInput):\n objects = []\n for i in range(0, selectionInput.selectionCount):\n selection = selectionInput.selection(i)\n selectedObj = selection.entity\n if adsk.fusion.BRepBody.cast(selectedObj) or \\\n adsk.fusion.BRepFace.cast(selectedObj) or \\\n adsk.fusion.Occurrence.cast(selectedObj):\n objects.append(selectedObj)\n return objects\n\ndef selectFile():\n # return \"/Users/ryosuzuki/Desktop/test.svg\"\n\n app = adsk.core.Application.get()\n ui = app.userInterface\n\n fileDialog = ui.createFileDialog()\n fileDialog.isMultiSelectEnabled = False\n fileDialog.title = \"Select SVG file\"\n fileDialog.filter = \"*.svg\"\n # fileDialog.initialDirectory = \"\"\n dialogResult = fileDialog.showOpen()\n\n if (dialogResult == adsk.core.DialogResults.DialogOK):\n filepath = fileDialog.filename\n return filepath\n\ndef draw(selectedPlane, selectedFile):\n app = adsk.core.Application.get()\n ui = app.userInterface\n product = app.activeProduct\n rootComp = product.rootComponent\n extrudes = rootComp.features.extrudeFeatures\n design = adsk.fusion.Design.cast(product)\n\n rootComp = design.rootComponent\n extrudes = rootComp.features.extrudeFeatures\n sketches = rootComp.sketches\n\n planes = rootComp.constructionPlanes\n planeInput = planes.createInput()\n offset = adsk.core.ValueInput.createByReal(0)\n planeInput.setByOffset(selectedPlane, offset)\n # planeInput.setByOffset(basePlane, offsetValue)\n planeOne = planes.add(planeInput)\n\n sketch = sketches.add(planeOne)\n svg = sketch.importSVG(selectedFile, -30, -30, 0.01)\n\n materialLib = app.materialLibraries.itemByName(\"Fusion 360 Appearance Library\")\n appearance = materialLib.appearances.itemByName(\"Plastic - Matte (Yellow)\") # \"Paint - Enamel Glossy (Yellow)\"\n\n toolBodies = adsk.core.ObjectCollection.create()\n for i in range(sketch.profiles.count):\n prof = sketch.profiles.item(i)\n distance = adsk.core.ValueInput.createByReal(0.1)\n extInput = extrudes.createInput(prof, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)\n extInput.setDistanceExtent(False, distance)\n extrude1 = extrudes.add(extInput)\n for body in extrude1.bodies:\n body.appearance = appearance\n body.name = \"sheet\"\n toolBodies.add(body)\n\n\ndef healthState(object):\n health = object.healthState\n if health == adsk.fusion.FeatureHealthStates.ErrorFeatureHealthState or health == adsk.fusion.FeatureHealthStates.WarningFeatureHealthState:\n message = planeOne.errorOrWarningMessage\n\ndef componentsFromBodies(items):\n components = []\n for item in items:\n originalBody = item[1]\n copiedBody = originalBody.copyToComponent(originalBody.parentComponent)\n outputBody = originalBody.createComponent()\n\n component = {\n 'output_body': outputBody,\n 'copied_body': copiedBody,\n 'mid_face': item[0],\n 'end_face': item[2]\n }\n components.append(component)\n return components\n\n\n\nclass TestCommand(Fusion360CommandBase.Fusion360CommandBase):\n def onPreview(self, command, inputs):\n pass\n\n def onDestroy(self, command, inputs, reason_):\n pass\n\n def onInputChanged(self, command, inputs, changedInput):\n pass\n\n def onExecute(self, command, inputs):\n # (objects, plane, edge, spacing) = getInputs(command, inputs)\n plane = getInputs(command, inputs)\n filepath = selectFile()\n if (plane):\n draw(plane, filepath)\n\n return\n\n\n app = adsk.core.Application.get()\n ui = app.userInterface\n product = app.activeProduct\n design = adsk.fusion.Design.cast(product)\n rootComp = design.rootComponent\n\n targetBody = rootComp.bRepBodies.item(0)\n spacing = 10\n quantity = 10\n plane = targetBody.parentComponent.xZConstructionPlane\n thickness = 1\n\n results = slice(targetBody, spacing, quantity, plane, thickness)\n\n components = componentsFromBodies(results)\n\n # futil.combine_feature(point[1], tool_bodies, adsk.fusion.FeatureOperations.CutFeatureOperation)\n\n combineFeatures = targetBody.parentComponent.features.combineFeatures\n\n combineTools = adsk.core.ObjectCollection.create()\n for tool in tool_bodies:\n combineTools.add(tool)\n\n # Create Combine Feature\n combine_input = combine_features.createInput(target_body, combine_tools)\n combine_input.operation = operation\n combine_features.add(combine_input)\n\n def onCreate(self, command, inputs):\n selectionPlaneInput = inputs.addSelectionInput(command.parentCommandDefinition.id + '_plane', 'Select Base Face', 'Select Face to mate to')\n selectionPlaneInput.setSelectionLimits(1,1)\n selectionPlaneInput.addSelectionFilter('PlanarFaces')\n\n selectionInput = inputs.addSelectionInput(command.parentCommandDefinition.id + '_selection', 'Select other faces', 'Select bodies or occurrences')\n selectionInput.setSelectionLimits(1,0)\n selectionInput.addSelectionFilter('PlanarFaces')\n\n selectionEdgeInput = inputs.addSelectionInput(command.parentCommandDefinition.id + '_edge', 'Select Direction (edge)', 'Select an edge to define spacing direction')\n selectionEdgeInput.setSelectionLimits(1,1)\n selectionEdgeInput.addSelectionFilter('LinearEdges')\n\n app = adsk.core.Application.get()\n product = app.activeProduct\n design = adsk.fusion.Design.cast(product)\n unitsMgr = design.unitsManager\n spacingInput = inputs.addValueInput(command.parentCommandDefinition.id + '_spacing', 'Component Spacing', unitsMgr.defaultLengthUnits, adsk.core.ValueInput.createByReal(2.54))\n\n # # Add construction plane by angle\n # angle = adsk.core.ValueInput.createByString('30.0 deg')\n # planeInput.setByAngle(sketchLineOne, angle, prof)\n # planes.add(planeInput)\n\n # # Add construction plane by two planes\n # planeInput.setByTwoPlanes(prof, planeOne)\n # planes.add(planeInput)\n\n # # Add construction plane by tangent\n # cylinderFace = extrude.sideFaces.item(0)\n # planeInput.setByTangent(cylinderFace, angle, rootComp.xYConstructionPlane)\n # planes.add(planeInput)\n\n # # Add construction plane by two edges\n # planeInput.setByTwoEdges(sketchLineOne, sketchLineTwo)\n # planes.add(planeInput)\n\n # # Add construction plane by three points\n # planeInput.setByThreePoints(sketchPointOne, sketchPointTwo, sketchPointThree)\n # planes.add(planeInput)\n\n # # Add construction plane by tangent at point\n # planeInput.setByTangentAtPoint(cylinderFace, sketchPointOne)\n # planes.add(planeInput)\n\n # # Add construction plane by distance on path\n # distance = adsk.core.ValueInput.createByReal(1.0)\n # planeInput.setByDistanceOnPath(sketchLineOne, distance)\n # planes.add(planeInput)\n\n\ndef slice(targetBody, spacing, qty, basePlane, thickness):\n targetComp = targetBody.parentComponent\n # Feature Collections\n planes = targetComp.constructionPlanes\n sketches = targetComp.sketches\n thickenFeatures = targetComp.features.thickenFeatures\n slices = []\n thickness /= 2\n\n for i in range(qty):\n planeInput = planes.createInput()\n offset = adsk.core.ValueInput.createByReal(i * spacing)\n planeInput.setByOffset(basePlane, offset)\n plane = planes.add(planeInput)\n\n sketch = sketches.add(plane)\n for curve in sketch.sketchCurves:\n curve.isConstruction = True\n sketch.projectCutEdges(targetBody)\n\n for profile in sketch.profiles:\n surfaces = adsk.core.ObjectCollection.create()\n patches = targetComp.features.patchFeatures\n patchInput = patches.createInput(profile, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)\n patchFeature = patches.add(patchInput)\n\n for face in patchFeature.faces:\n point = face.pointOnFace\n containment = targetBody.pointContainment(point)\n if containment == adsk.fusion.PointContainment.PointInsidePointContainment:\n surfaces.add(face)\n thickness = adsk.core.ValueInput.createByReal((thickness/2))\n thickenInput = thickenFeatures.createInput(surfaces, thickness, True,adsk.fusion.FeatureOperations.NewBodyFeatureOperation)\n thickenFeatures = thickenFeatures.add(thickenInput)\n newBody = thickenFeatures.bodies[0]\n ret = face.evaluator.getNormalAtPoint(face.pointOnFace)\n direction = ret[1]\n # Not currently working or used\n # end_face = find_end_face(thickenFeatures, direction)\n\n # slices.append((face, newBody, end_face))\n slices.append((face, newBody))\n\n else:\n patchFeature.deleteMe()\n\n return slices\n","sub_path":"TestCommand.py","file_name":"TestCommand.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"167960936","text":"from r2.lib.configparse import ConfigValue\nfrom r2.lib.js import Module\nfrom r2.lib.plugin import Plugin\n\nfrom reddit_dfp import queue\n\nclass Dfp(Plugin):\n needs_static_build = False\n\n config = {\n ConfigValue.int: [\n \"dfp_network_code\",\n \"dfp_selfserve_salesperson_id\",\n \"dfp_selfserve_trafficker_id\",\n \"dfp_selfserve_template_id\",\n ],\n ConfigValue.str: [\n \"dfp_project_id\",\n \"dfp_client_id\",\n \"dfp_service_account_email\",\n \"dfp_cert_fingerprint\",\n \"dfp_service_version\",\n ],\n }\n\n def declare_queues(self, queues):\n from r2.config.queues import MessageQueue\n\n queues.declare({\n queue.DFP_QUEUE: MessageQueue(bind_to_self=True),\n })\n\n def add_routes(self, mc):\n mc(\"/api/dfp/link\", controller=\"link\", action=\"link_from_id\")\n\n def load_controllers(self):\n from reddit_dfp.controllers.linkcontroller import LinkController\n from reddit_dfp.hooks import hooks\n\n hooks.register_all()\n","sub_path":"reddit_dfp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"200574144","text":"# -*- encoding: utf-8 -*-\n# Copyright 2016 Vinzor Co.,Ltd.\n#\n# Schedule view\n#\n# 2016/3/1 fengyc : Init\n\nimport json\nfrom flask import render_template, abort, request, jsonify\nfrom flask.ext.login import login_required, current_user\n\nfrom ..auth.principal import admin_permission\nfrom . import schedule\nfrom ..celery_sqlalchmey_scheduler_models import DatabaseSchedulerEntry, CrontabSchedule, IntervalSchedule\nfrom ..models import DesktopTask, DesktopType, DesktopState, TaskState, StageResult, Course\nfrom sqlalchemy import or_\nfrom .. import db\nfrom sqlalchemy import asc\nfrom ..jinja_filters import datetime_format\nfrom ..log.utils import UserActionLogger\n\nua_logger = UserActionLogger()\n\n\n@schedule.route('/tasks', methods=['GET'])\n@login_required\ndef tasks():\n # 权限检查\n if not admin_permission.can():\n abort(403)\n\n \"\"\" Show all running task\n \"\"\"\n return render_template('schedule/tasks.html')\n\n\n@schedule.route('/task_table', methods=['GET'])\n@login_required\ndef task_table():\n # 权限检查\n if not admin_permission.can():\n abort(403)\n\n \"\"\"获取task_table指定页所需的task列表\n \"\"\"\n # 列号对应的排序属性\n col_map = {\"0\": \"id\",\n \"1\": \"updated_at\",\n \"2\": \"action\",\n \"3\": \"state\",\n \"4\": \"stage\",\n \"5\": \"retries\",\n \"6\": \"enabled\",\n \"7\": \"result\",\n \"8\": \"id\",\n \"9\": \"id\"}\n\n sEcho = request.args.get('sEcho')\n iDisplayStart = request.args.get(\"iDisplayStart\")\n iDisplayLength = request.args.get(\"iDisplayLength\")\n sSearch = request.args.get(\"sSearch\")\n iSortCol = request.args.get(\"iSortCol_0\")\n sSortDir = request.args.get(\"sSortDir_0\")\n if iSortCol is None:\n iSortCol = '1'\n if sSortDir is None:\n sSortDir = \"desc\"\n sort_col = col_map[iSortCol]\n\n if sSearch == '' or sSearch is None:\n query = DesktopTask.query\n # query = DesktopTask.query.order_by(DesktopTask.result_order, sort_col) // 结果为ERROR的任务置顶\n else:\n query = DesktopTask.query.filter(or_(\n # DesktopTask.id.like(\"%\"+sSearch+\"%\"),\n DesktopTask.action.like(\"%\"+sSearch+\"%\"),\n DesktopTask.state.like(\"%\"+sSearch+\"%\"),\n DesktopTask.stage.like(\"%\"+sSearch+\"%\"),\n DesktopTask.retries.like(\"%\"+sSearch+\"%\"),\n # DesktopTask.enabled.like(\"%\"+sSearch+\"%\"),\n DesktopTask.result.like(\"%\"+sSearch+\"%\")))\n # Course.query.filter_by(id=json.loads(DesktopTask.context).course_id).first().name.like(\"%\"+sSearch+\"%\")))\n # query.order_by(DesktopTask.result_order, sort_col) // 结果为ERROR的任务置顶\n\n courses = Course.query.filter(Course.name.like(\"%\"+sSearch+\"%\")).all()\n for course in courses:\n query_task_for_course = DesktopTask.query.filter(or_(\n DesktopTask.context.like('%\"course\": '+str(course.id)+',%'),\n DesktopTask.context.like('%\"course\": '+str(course.id)+'}%')))\n query = query.union(query_task_for_course)\n\n sort_col = getattr(DesktopTask, sort_col)\n if sSortDir == \"desc\":\n query = query.order_by(sort_col.desc())\n else:\n query = query.order_by(sort_col.asc())\n\n page = int(iDisplayStart) // int(iDisplayLength)\n pagination = query.paginate(page+1, int(iDisplayLength), False)\n task_list = pagination.items\n task_json_list = []\n for item in task_list:\n task_json_list.append(\n {\n 'id': item.id,\n 'updated_at': datetime_format(item.updated_at),\n 'idAndAction': {\n 'id':item.id,\n 'action':item.action\n },\n 'state': TaskState.get_state_chs(item.state),\n 'stage': item.stage,\n 'retries':item.retries,\n 'enabled':item.enabled,\n 'result':item.result,\n 'context':item.context,\n 'id_enabled_result':{\n 'id':item.id,\n 'enabled':item.enabled,\n 'result':item.result\n }\n }\n )\n len(task_json_list)\n data = {\"sEcho\": sEcho,\n \"iTotalRecords\": str(pagination.total),\n \"iTotalDisplayRecords\": str(pagination.total),\n \"aaData\": task_json_list\n }\n db.session.remove()\n return jsonify(data)\n\n\n@schedule.route('/tasks/', methods=['GET'])\n@login_required\ndef task_detail(id):\n # 权限检查\n if not admin_permission.can():\n abort(403)\n\n \"\"\" Show task detail\n \"\"\"\n task = DesktopTask.query.filter_by(id=id).first()\n if task is None:\n abort(404)\n stage_results = task.stage_results\n task_context_json = json.loads(task.context)\n return render_template('schedule/task_detail.html', stage_results=stage_results, task_context_json=task_context_json)\n\n\n@schedule.route('/tasks/', methods=['DELETE'])\n@login_required\ndef delete_task():\n # 权限检查\n if not admin_permission.can():\n abort(403)\n\n result_json = {\n \"status\": \"success\",\n \"data\": {\n \"success_list\": [],\n \"fail_list\": []\n }\n }\n # 请求中包含所要删除的task_id列表\n tasks = request.json\n for id in tasks:\n task = DesktopTask.query.filter_by(id=int(id)).first()\n if task:\n stage_results = StageResult.query.filter_by(task_id = task.id).all()\n for stage_result in stage_results:\n db.session.delete(stage_result)\n db.session.delete(task)\n db.session.commit()\n ua_logger.info(current_user, \"删除任务: %s\" % task.id)\n result_json[\"data\"][\"success_list\"].append(id)\n else:\n result_json[\"data\"][\"fail_list\"].append(id)\n return jsonify(result_json)\n\n\n@schedule.route('/tasks/', methods=['PUT'])\n@login_required\ndef tasks_action(action):\n # 权限检查\n if not admin_permission.can():\n abort(403)\n\n result_json = {\n \"status\": \"success\",\n \"data\": {\n \"success_list\": [],\n \"fail_list\": []\n }\n }\n tasks = request.json\n for id in tasks:\n task = DesktopTask.query.filter_by(id=int(id)).first()\n if task:\n if action == \"resume\":\n # Continue the task from the fail stage\n task.resume()\n ua_logger.info(current_user, \"重做任务: %s\" % task.id)\n elif action == \"reset\":\n # Reset the task to the initial state\n task.reset()\n ua_logger.info(current_user, \"重置任务: %s\" % task.id)\n elif action == \"disable\":\n # Disable the task\n ua_logger.info(current_user, \"禁用任务: %s\" % task.id)\n task.disable()\n elif action == \"enable\":\n # Enable the task\n ua_logger.info(current_user, \"启用任务: %s\" % task.id)\n task.enable()\n result_json[\"data\"][\"success_list\"].append(id)\n else:\n result_json[\"data\"][\"fail_list\"].append(id)\n return jsonify(result_json)\n\n\n\n\n\n\n","sub_path":"src/web/app/schedule/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"30367677","text":"from __future__ import annotations\nfrom typing import Optional, List, Any\nimport abc\n\nfrom . import interface\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from .main import Storage\n\n\nclass QueryBuilder(interface.QueryBuilder):\n def __init__(self, storage: Storage):\n self.storage = storage\n self.conditions: List[str] = []\n self.params: List[Any] = []\n self.offset: Optional[int] = None\n self.limit: Optional[int] = None\n self.order_conditions: List[str] = []\n\n def equals(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} = ${len(self.params)}\"\n\n def not_equals(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} != ${len(self.params)}\"\n\n def greater(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} > ${len(self.params)}\"\n\n def greater_or_equals(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} >= ${len(self.params)}\"\n\n def less(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} < ${len(self.params)}\"\n\n def less_or_equals(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} <= ${len(self.params)}\"\n\n def contains(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"${len(self.params)} = ANY({field})\"\n\n def like(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} ILIKE '%' || ${len(self.params)} || '%'\"\n\n def in_(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} = ANY(${len(self.params)})\"\n\n def not_in_(self, field: str, value: Any) -> str:\n self.params.append(value)\n return f\"{field} != ANY(${len(self.params)})\"\n\n def and_(self, *conditions) -> str:\n result = \"(\"\n for i in range(len(conditions)):\n if i != 0:\n result += \" AND \"\n result += conditions[i]\n result += \")\"\n return result\n\n def or_(self, *conditions) -> str:\n result = \"(\"\n for i in range(len(conditions)):\n if i != 0:\n result += \" OR \"\n result += conditions[i]\n result += \")\"\n return result\n\n def add(self, condition: str) -> None:\n self.conditions.append(condition)\n\n def paginate(self, offset: int, limit: int) -> None:\n self.offset = offset\n self.limit = limit\n\n def order(self, field: str, value: str) -> None:\n self.order_conditions.append(f\"{field} {value}\")\n\n def _format_query(self, query: str, count_only: bool = False) -> str:\n if len(self.conditions) > 0:\n query += \" WHERE \"\n for i in range(len(self.conditions)):\n if i != 0:\n query += \" AND \"\n query += self.conditions[i]\n if not count_only:\n if len(self.order_conditions) > 0:\n query += f\" ORDER BY \"\n for i in range(len(self.order_conditions)):\n if i != 0:\n query += \", \"\n query += self.order_conditions[i]\n if self.offset is not None:\n query += f\" OFFSET {self.offset}\"\n if self.limit is not None:\n query += f\" LIMIT {self.limit}\"\n return query\n\n async def count(self) -> int:\n pass\n\n async def fetch(self) -> List[Any]:\n pass\n\n async def fetch_one(self) -> Any:\n pass","sub_path":"storage/query_builder.py","file_name":"query_builder.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"470002433","text":"from django.conf import settings\nimport WRCCUtils, WRCCData, AcisWS\nimport copy\n\ntoday = WRCCUtils.set_back_date(0)\ntoday_year = today[0:4]\ntoday_month = today[5:7]\ntoday_day = today[8:10]\nbegin_10yr = WRCCUtils.set_back_date(3660)\nyesterday = WRCCUtils.set_back_date(1)\nfourtnight = WRCCUtils.set_back_date(14)\n\n\n\ndef create_kml_file(area_type, overlay_state):\n kml_file_name = overlay_state + '_' + area_type + '.kml'\n kml_file_path = settings.TMP_URL + kml_file_name\n status = WRCCUtils.generate_kml_file(area_type, overlay_state, kml_file_name, settings.TEMP_DIR)\n if status != 'Success':\n return 'ERROR: ' + status\n return kml_file_path\n\ndef set_GET(request):\n try:\n rm = request.method\n except:\n def Get(key, default):\n if key in request.keys():\n return request[key]\n else:\n return default\n return Get\n\n if rm == 'GET':\n Get = getattr(request.GET, 'get')\n elif rm == 'POST':\n Get = getattr(request.POST, 'get')\n return Get\n\ndef set_GET_list(request):\n try:\n rm = request.method\n except:\n def Get(key, default):\n if key in request.keys():\n val = request[key]\n if isinstance(request[key],basestring):\n val = request[key].replace(' ','').split(',')\n return val\n else:\n return default\n return Get\n\n if rm == 'GET':\n Getlist = getattr(request.GET, 'getlist')\n elif rm == 'POST':\n Getlist = getattr(request.POST, 'getlist')\n return Getlist\n\ndef set_min_max_dates(initial):\n sd = '9999-99-99';ed = '9999-99-99'\n sd_fut = sd; ed_fut = ed\n data_type = 'station'\n if 'location' in initial.keys():\n data_type = 'grid'\n if initial['app_name'] in ['monthly_spatial_summary', 'temporal_summary']:\n data_type = 'grid'\n if 'data_type' in initial.keys() and initial['data_type'] == 'grid':\n data_type = 'grid'\n if 'station_id' in initial.keys():\n stn_json = settings.MEDIA_DIR + '/json/US_station_id.json'\n stn_id, stn_name = WRCCUtils.find_id_and_name(initial['station_id'],stn_json)\n els = []\n if 'variable' in initial.keys():\n els = [initial['variable']]\n if 'variables' in initial.keys():\n els = initial['variables']\n vd, no_vd_els = WRCCUtils.find_valid_daterange(stn_id,el_list=els,max_or_min='min')\n sd = vd[0];ed = vd[1]\n #sd_fut = sd;ed_fut = ed\n elif data_type == 'grid':\n sd = WRCCData.GRID_CHOICES[str(initial['grid'])][3][0][0]\n #ed = WRCCUtils.advance_date(sd,10*365,'forward')\n ed = WRCCData.GRID_CHOICES[str(initial['grid'])][3][0][1]\n #sd_fut = sd;ed_fut = ed\n if len(WRCCData.GRID_CHOICES[initial['grid']][3]) == 2:\n sd_fut = WRCCData.GRID_CHOICES[initial['grid']][3][1][0]\n #ed_fut = WRCCUtils.advance_date(sd,10*365,'forward')\n ed_fut = WRCCData.GRID_CHOICES[initial['grid']][3][1][1]\n else:\n if 'data_type' in initial.keys() and initial['data_type'] == 'station':\n sd = '1850-01-01'\n ed = today\n return sd, ed, sd_fut, ed_fut\n\ndef set_initial(request,app_name):\n '''\n Set html form\n Args:\n request: django request object\n app_name: application, one of\n single_lister, multi_lister, station_finder\n map_overlay,\n sf_download\n spatial_summary, temporal_summary\n monthly_summary, climatology\n data_comparison, liklihood,\n data_download\n Returns:\n two dictionaries\n initial: form input\n '''\n initial = {}\n initial['app_name'] = app_name\n Get = set_GET(request)\n Getlist = set_GET_list(request)\n #Set area type: station_id(s), location, basin,...\n area_type = None\n if app_name in ['single_lister','climatology','monthly_summary', 'seasonal_summary','single_year']:\n initial['area_type'] = Get('area_type','station_id')\n elif app_name in ['data_comparison']:\n initial['area_type'] = 'location'\n else:\n initial['area_type'] = Get('area_type','state')\n #Set todays date parameters\n initial['today_year'] = today_year\n initial['today_month'] = today_month\n initial['today_day'] = today_day\n #Set area depending on area_type\n if app_name == 'data_comparison':\n location = Get('location',None)\n station_id = Get('station_id',None)\n if location is None and station_id is not None:\n #Link from station finder,\n #set location to station lon, lat if we are\n stn_id, stn_name = WRCCUtils.find_id_and_name(station_id,settings.MEDIA_DIR + '/json/US_station_id.json')\n meta = AcisWS.StnMeta({'sids':stn_id,'meta':'ll'})\n ll = None\n ll = str(meta['meta'][0]['ll'][0]) + ',' + str(meta['meta'][0]['ll'][1])\n initial['location'] = ll\n else:\n initial[str(initial['area_type'])] = Get(str(initial['area_type']), WRCCData.AREA_DEFAULTS[str(initial['area_type'])])\n else:\n initial[str(initial['area_type'])] = Get(str(initial['area_type']), WRCCData.AREA_DEFAULTS[str(initial['area_type'])])\n initial['area_type_label'] = WRCCData.DISPLAY_PARAMS[initial['area_type']]\n initial['area_type_value'] = initial[str(initial['area_type'])]\n\n #Set data type and map parameters\n if initial['area_type'] in ['station_id','station_ids']:\n initial['autofill_list'] = 'US_' + initial['area_type']\n initial['data_type'] = 'station'\n elif initial['area_type'] in ['location','locations']:\n initial['data_type'] = 'grid'\n elif initial['area_type'] in ['basin','county_warning_area','county','climate_division','state','shape']:\n initial['autofill_list'] = 'US_' + initial['area_type']\n initial['data_type'] = Get('data_type','station')\n if app_name in ['temporal_summary','monthly_spatial_summary','data_comparison']:\n initial['data_type'] = 'grid'\n if app_name in ['station_finder','sf_download']:\n initial['data_type'] = 'station'\n #Grid\n if app_name not in ['station_finder', 'sf_download']:\n initial['grid'] = Get('grid','1')\n #Set up map parameters\n initial['overlay_state'] = Get('overlay_state','nv').lower()\n initial['host'] = settings.HOST\n #Create kml files for oerlay state\n for at in ['basin', 'county', 'county_warning_area', 'climate_division']:\n kml_file_path = create_kml_file(at, initial['overlay_state'])\n if initial['area_type'] == at:\n initial['kml_file_path'] = kml_file_path\n #If station_finder download, we need to set the station_ids\n #and override the original area type fields\n if app_name == 'sf_download':\n #delete old are type\n del initial[str(initial['area_type'])]\n #set new area params\n initial['station_ids'] = str(Get('station_ids_string',''))\n initial['station_ids_string'] = initial['station_ids']\n initial['area_type'] = 'station_ids'\n initial['area_type_label'] = 'Station IDs'\n initial['area_type_value'] = initial['station_ids']\n initial['station_json'] = Get('station_json','')\n\n #If station finder set hidden var station_ids_string for results\n if app_name == 'station_finder':\n initial['station_ids_string'] = str(Get('station_ids_string',''))\n #Set variable(s)--> always as list if multiple\n if app_name == 'map_overlay':\n initial['variables'] = Get('variables','maxt,mint,pcpn').split(',')\n initial['variables_str'] = ','.join(initial['variables'])\n elif app_name in ['monthly_spatial_summary','monthly_summary','data_comparison', 'seasonal_summary','single_year']:\n initial['variable'] = Get('variable',None)\n if initial['variable'] is not None and len(initial['variable'].split(',')) > 1:\n initial['variable'] = str(initial['variable'].split(',')[0])\n if initial['variable'] is None:\n #Link from station finder\n initial['variable'] = Get('variables','pcpn')\n if len(initial['variable'].split(',')) > 1:\n initial['variable'] = str(initial['variable'].split(',')[0])\n else:\n els = Getlist('variables',None)\n if not els:\n els = Get('variables',None)\n if not els:\n els = ['maxt','mint','pcpn']\n elif isinstance(els, basestring):\n els = els.replace(' ','').split(',')\n elif isinstance(els, list) and len(els) == 1 and len(els[0].split(',')) > 1:\n els = els[0].replace(' ','').split(',')\n elif isinstance(els, basestring):\n els = els.replace(' ','').split(',')\n initial['variables'] = [str(el) for el in els]\n initial['variables_str'] = ','.join(initial['variables'])\n #Set units\n initial['units'] = Get('units','english')\n\n #Set degree days\n if app_name not in ['station_finder', 'monthly_summary', 'climatology', 'data_comparison']:\n initial['add_degree_days'] = Get('add_degree_days', 'F')\n if initial['units'] == 'metric':\n initial['degree_days'] = Get('degree_days', 'gdd13,hdd21').replace(', ', ',')\n else:\n initial['degree_days'] = Get('degree_days', 'gdd55,hdd70').replace(', ',',')\n\n #Set dates\n sd, ed, sd_fut, ed_fut = set_min_max_dates(initial)\n initial['min_date'] = WRCCUtils.format_date_string(sd,'-')\n initial['max_date'] = WRCCUtils.format_date_string(ed,'-')\n initial['min_date_fut'] = WRCCUtils.format_date_string(sd_fut,'-')\n initial['max_date_fut'] = WRCCUtils.format_date_string(ed_fut,'-');\n if app_name in ['monthly_summary','climatology']:\n initial['start_year'] = Get('start_year', None)\n if initial['start_year'] is None:\n #Link from station finder\n initial['start_year'] = Get('start_date', '9999')[0:4]\n if initial['start_year'] == '9999':initial['start_year'] = sd[0:4]\n initial['end_year'] = Get('end_year', None)\n if initial['end_year'] is None:\n #Link from station finder\n initial['end_year'] = Get('end_date', '9999')[0:4]\n if initial['end_year'] == '9999':\n ey = str(int(initial['start_year']) + 10)\n if int(ey) >= int(sd[0:4]) and int(ey) <= int(ed[0:4]):\n initial['end_year'] = ey\n elif int(ey) >=int(sd_fut[0:4]) and int(ey) < int(ed_fut[0:4]):\n initial['end_year'] = ey\n else:\n initial['end_year'] = ed[0:4]\n initial['min_year'] = Get('min_year',sd[0:4])\n initial['max_year'] = Get('max_year', ed[0:4])\n initial['min_year_fut'] = sd_fut[0:4]\n initial['max_year_fut'] = ed_fut[0:4]\n elif app_name == 'monthly_spatial_summary':\n initial['year'] = Get('year',str(int(ed[0:4]) - 1))\n initial['min_year'] = Get('min_year',sd[0:4])\n initial['max_year'] = Get('max_year', ed[0:4])\n initial['min_year_fut'] = sd_fut[0:4]\n initial['max_year_fut'] = ed_fut[0:4]\n initial['season'] = Get('season','1')\n elif app_name in ['seasonal_summary', 'single_year']:\n initial['start_year'] = Get('start_year',sd[0:4])\n initial['end_year'] = Get('end_year',ed[0:4])\n initial['start_month'] = Get('start_month', '1')\n initial['start_day'] = Get('start_day', '1')\n initial['min_year_fut'] = sd_fut[0:4]\n initial['max_year_fut'] = ed_fut[0:4]\n if app_name == 'seasonal_summary':\n initial['min_year'] = Get('min_year',sd[0:4])\n initial['max_year'] = Get('max_year', ed[0:4])\n initial['end_month'] = Get('end_month', '1')\n initial['end_day'] = Get('end_day', '31')\n if app_name in ['single_year']:\n '''\n if initial['start_year'].lower() != 'por':\n initial['min_year'] = initial['start_year']\n else:\n initial['min_year'] = Get('min_year',sd[0:4])\n if initial['end_year'].lower() != 'por':\n initial['max_year'] = initial['end_year']\n else:\n initial['max_year'] = Get('max_year', ed[0:4])\n '''\n initial['min_year'] = Get('min_year',sd[0:4])\n initial['max_year'] = Get('max_year', ed[0:4])\n #Plotting vars\n initial['show_climatology'] = Get('show_climatology','F')\n initial['show_percentile_5'] = Get('show_percentile_5','F')\n initial['show_percentile_10'] = Get('show_percentile_10','F')\n initial['show_percentile_25'] = Get('show_percentile_25','F')\n initial['target_year'] = Get('target_year_figure', None)\n if initial['target_year'] is None:\n if initial['start_year'].lower()!= 'por':\n initial['target_year'] = Get('target_year_form',initial['start_year'])\n else:\n initial['target_year'] = Get('target_year_form',initial['min_year'])\n if initial['variable'] in ['pcpn','snow','evap','pet']:\n initial['calculation'] = Get('calculation','cumulative')\n else:\n initial['calculation'] = Get('calculation','values')\n else:\n initial['start_date'] = Get('start_date', WRCCUtils.format_date_string(fourtnight,'-'))\n initial['end_date'] = Get('end_date', WRCCUtils.format_date_string(yesterday,'-'))\n #data windows and flags\n sw = '01-01'; ew = '01-31'\n if 'start_date' in initial.keys() and 'end_date' in initial.keys():\n if initial['start_date'] and initial['end_date']:\n sw, ew = WRCCUtils.set_start_end_window(initial['start_date'],initial['end_date'])\n if app_name in ['single_lister', 'multi_lister','map_overlay']:\n initial['start_window'] = Get('start_window', sw)\n initial['end_window'] = Get('end_window',ew)\n initial['temporal_resolution'] = Get('temporal_resolution','dly')\n initial['show_flags'] = Get('show_flags', 'F')\n initial['show_observation_time'] = Get('show_observation_time', 'F')\n if app_name in ['station_finder']:\n initial['start_window'] = Get('start_window', sw)\n initial['end_window'] = Get('end_window',ew)\n #data summaries\n if app_name in ['monthly_spatial_summary','temporal_summary', 'seasonal_summary']:\n initial['data_summary'] = Get('data_summary', 'temporal_summary')\n elif app_name in ['spatial_summary','multi_lister','map_overlay']:\n initial['data_summary'] = Get('data_summary', 'spatial_summary')\n else:\n initial['data_summary'] = Get('data_summary', 'none')\n\n if app_name in ['temporal_summary', 'monthly_spatial_summary','seasonal_summary', 'sf_download']:\n if 'variable' in initial.keys() and initial['variable'] in ['pcpn','snow','evap','pet']:\n initial['temporal_summary'] = Get('temporal_summary', 'sum')\n else:\n initial['temporal_summary'] = Get('temporal_summary', 'mean')\n else:\n initial['temporal_summary'] = Get('temporal_summary', 'mean')\n if app_name in ['single_lister', 'multi_lister','spatial_summary','sf_download','map_overlay']:\n initial['spatial_summary'] = Get('spatial_summary', 'mean')\n\n #download options\n if app_name in ['single_lister','multi_lister']:\n initial['data_format'] = Get('data_format', 'html')\n else:\n initial['data_format'] = Get('data_format', 'xl')\n\n\n if app_name in ['single_lister','multi_lister','station_finder','sf_download','map_overlay']:\n initial['output_format'] = Get('output_format', 'verbose')\n initial['delimiter'] = Get('delimiter', 'space')\n initial['output_file_name'] = Get('output_file_name', 'Output')\n initial['user_name'] = Get('user_name', 'Your Name')\n initial['user_email'] = Get('user_email', 'Your Email')\n\n #Set app specific params\n if app_name == 'monthly_spatial_summary':\n initial['area_reduce'] = Get('area_reduce','climate_division')\n initial['area_statistic'] = Get('area_statistic','mean')\n if app_name in ['multi_lister','spatial_summary','station_finder']:\n initial['feature_id'] = 1\n if app_name in ['monthly_summary','climatology','sf_link']:\n initial['max_missing_days'] = Get('max_missing_days', '5')\n if app_name in ['station_finder','map_overlay','sf_download']:\n initial['variables_constraints'] = Get('variables_constraints', 'all')\n initial['dates_constraints'] = Get('dates_constraints', 'all')\n initial['display'] = Get('display', 'map')\n all_meta = ['name','state','ll','elev','ids','networks','valid_daterange']\n #all_meta = ['name','state','ll','elev','ids','networks']\n initial['metadata_keys'] = Getlist('metadata_keys',all_meta)\n initial['metadata_keys_str'] = ','.join(initial['metadata_keys'])\n initial['metadata_names'] = [WRCCData.DISPLAY_PARAMS[meta] for meta in initial['metadata_keys']]\n initial['metadata_names_str'] = ','.join(initial['metadata_names'])\n if app_name in ['monthly_summary','sf_link']:\n initial['start_month'] = Get('start_month','01')\n if initial['variable'] in ['pcpn','snow','evap','pet']:\n initial['statistic'] = Get('statistic','msum')\n else:\n initial['statistic'] = Get('statistic','mave')\n initial['less_greater_or_between'] = Get('less_greater_or_between','b')\n initial['threshold_low_for_between'] = Get('threshold_low_for_between',0.01)\n initial['threshold_high_for_between'] = Get('threshold_high_for_between',0.1)\n initial['threshold_for_less_than'] = Get('threshold_for_less_than',1)\n initial['threshold_for_greater_than'] = Get('threshold_for_greater_than',1)\n initial['departures_from_averages'] = Get('departures_from_averages','F')\n initial['frequency_analysis'] = Get('frequency_analysis','F')\n #Set initial plot options\n initial['chart_summary'] = Get('chart_summary','individual')\n #initial['plot_months'] = Get('plot_months','0,1')\n if app_name == 'monthly_summary':\n initial['base_temperature'] = Get('base_temperature','65')\n initial['statistic_period'] = Get('statistic_period','monthly')\n if app_name in ['climatology','sf_link']:\n initial['summary_type'] = Get('summary_type', 'all')\n if app_name == 'temporal_summary':\n initial['show_plot_opts'] = Get('show_plot_opts','T')\n initial['image_size'] = Get('image_size', 'medium')\n initial['level_number'] = Get('level_number', '5')\n initial['cmap'] = Get('cmap', 'rainbow')\n initial['cmaps'] = WRCCData.CMAPS\n initial['map_ol'] = Get('map_ol', 'state')\n initial['interpolation'] = Get('interpolation', 'cspline')\n initial['projection'] = Get('projection', 'lcc')\n #Ploting options for all pages that have charts\n if app_name in ['monthly_summary', 'spatial_summary','seasonal_summary', 'single_year','data_comparison','map_overlay']:\n if app_name in ['spatial_summary','monthly_summary','single_year','map_overlay']:\n if app_name in ['spatial_summary','monthly_spatial_summary','map_overlay']:\n shown_indices = ','.join([str(idx) for idx in range(len(initial['variables']))])\n elif app_name == 'single_year':\n try:\n shown_indices = str(int(initial['target_year']) - int(initial['min_year']))\n except:\n shown_indices = '0'\n else:\n shown_indices = '0'\n initial['chart_indices_string'] = Get('chart_indices_string',shown_indices)\n initial['chart_type'] = Get('chart_type','spline')\n initial['show_running_mean'] = Get('show_running_mean','F')\n if app_name in ['monthly_summary', 'seasonal_summary']:\n initial['running_mean_years'] = Get('running_mean_years',5)\n else:\n initial['running_mean_days'] = Get('running_mean_days',9)\n initial['show_average'] = Get('show_average','F')\n if app_name in ['monthly_summary']:\n initial['show_range'] = Get('show_range','F')\n initial['form_options'] = WRCCData.SCENIC_FORM_OPTIONS[app_name]\n return initial\n\ndef set_map_plot_options(request):\n initial = {}\n Get = set_GET(request)\n initial['image_size'] = Get('image_size', 'medium')\n initial['level_number'] = Get('level_number', '5')\n initial['cmap'] = Get('cmap', 'rainbow')\n initial['cmaps'] = WRCCData.CMAPS\n initial['map_ol'] = Get('map_ol', 'state')\n initial['interpolation'] = Get('interpolation', 'cspline')\n initial['projection'] = Get('projection', 'lcc')\n return initial\n\n\ndef set_form(request, clean=True):\n '''\n Coverts request input to usable form input:\n Deals with unicode issues\n and autofill options for identifiers\n NOTE: variables should always be a list (also when clean = False)\n If Clean == True,\n We also clean up some form fields for submission:\n date fields, convert to yyyymmdd\n window fields, convert to mmdd\n name strings are converted to ids\n Combine elemenst weith degree days\n '''\n try:\n req_method = request.method\n except:\n if isinstance(request,dict):\n req_method = 'dict'\n else:req_method = None\n form= {}\n form['req_method'] = req_method\n #Convert request object to python dictionary\n if req_method == 'dict':\n form = copy.deepcopy(request)\n #Special case variables, always needs to be list\n if 'variable' in request.keys() and not 'variables' in request.keys():\n form['variables'] = [form['variable']]\n if 'variables' in request.keys():\n form['variables'] = WRCCUtils.convert_variables_to_list(request['variables'])\n elif req_method == 'POST':\n for key, val in request.POST.items():\n form[str(key)]= val\n #form = dict((str(x),str(y)) for x,y in request.POST.items())\n #Special case variables, always needs to be list\n if 'variable' in request.POST.keys() and not 'variables' in request.POST.keys():\n form['variables'] = [str(request.POST['variable'])]\n if 'variables' in request.POST.keys():\n #form['variables'] = WRCCUtils.convert_variables_to_list(request.POST['variables'])\n els = request.POST.getlist('variables',request.POST.get('variables','').split(','))\n form['variables'] = [str(el) for el in els]\n if 'metadata_keys' in request.POST.keys():\n form['metadata_keys'] = request.POST.getlist('metadata_keys',request.POST.get('metadata_keys','').split(','))\n elif req_method == 'GET':\n #form = dict((str(x),str(y)) for x,y in request.GET.items())\n for key, val in request.GET.items():\n form[str(key)]= val\n #Special case variables, always needs to be list\n if 'variable' in request.GET.keys() and not 'variables' in request.GET.keys():\n form['variables'] = [str(request.GET['variable'])]\n if 'variables' in request.GET.keys():\n #form['variables'] = WRCCUtils.convert_variables_to_list(request.GET['variables'])\n form['variables'] = request.GET.get('variables','').split(',')\n if 'metadata_keys' in request.GET.keys():\n form['metadata_keys'] = request.GET.getlist('metadata_keys',request.GET.get('metadata_keys','').split(','))\n else:\n form = {}\n\n #set data type for single apps\n if 'data_type' not in form.keys():\n if 'station_id' in form.keys():\n form['data_type'] = 'station'\n if 'location' in form.keys():\n form['data_type'] = 'grid'\n if 'app_name' in form.keys() and form['app_name'] in ['temporal_summary','monthly_spatial_summary']:\n form['data_type'] = 'grid'\n #Convert unicode to string\n if 'variables' in form.keys():\n form['variables'] = [str(el) for el in form['variables']]\n if 'csrfmiddlewaretoken' in form.keys():\n del form['csrfmiddlewaretoken']\n if 'formData' in form.keys():\n del form['formData']\n if 'form_options' in form.keys():\n del form['form_options']\n\n if not clean:\n return form\n #Clean up form for submission\n\n #Get element list for vd\n el_list = None\n if 'variable' in form.keys() and not 'variables' in form.keys():\n el_list = [form['variable']]\n if 'variables' in form.keys() and not 'variable' in form.keys():\n if isinstance(form['variables'],basestring):\n el_list = form['variables'].replace(', ',',').split(',')\n else:\n el_list = form['variables']\n\n #Get valid daterange\n vd = ['9999-99-99', '9999-99-99']\n #Clean Dates and windows\n for key in ['start_date', 'end_date', 'start_year', 'end_year','start_window','end_window']:\n if key not in form.keys():\n continue\n if form[key].lower() == 'por':\n if str(key) in ['start_date']:\n k=key; idx = 0;sd = 'por'; ed = form['end_date']\n if str(key) in ['end_date']:\n k=key; idx = 1;ed = 'por'; sd = form['start_date']\n if str(key) in ['start_year']:\n k='start_date'; idx = 0;sd = 'por'\n if form['end_year'].lower() == 'por':ed = 'por'\n else:ed = str(int(form['end_year']) -1) + '-12-31'\n if str(key) in ['end_year']:\n k='end_date'; idx = 1;ed = 'por'\n if form['start_year'].lower() == 'por':sd = 'por'\n else:sd = form['start_year'] + '-01-01'\n\n if 'station_id' in form.keys():\n stn_id, stn_name = WRCCUtils.find_id_and_name(str(form['station_id']),settings.MEDIA_DIR +'json/US_station_id.json')\n vd, no_vd_els = WRCCUtils.find_valid_daterange(stn_id, start_date=sd, end_date=ed, el_list=el_list, max_or_min='max')\n form[k] = vd[idx]\n if key == 'start_year' and form['start_year'].lower() == 'por':\n if vd[0] != '9999-99-99':form['start_year'] = vd[0][0:4]\n if key == 'end_year' and form['end_year'].lower() == 'por':\n if vd[1] != '9999-99-99':form['end_year'] = vd[1][0:4]\n else:\n form[str(key)] = str(form[key]).replace('-','').replace(':','').replace('/','').replace(' ','')\n else:\n form[str(key)] = str(form[key]).replace('-','').replace(':','').replace('/','').replace(' ','')\n\n #Convert user input of area names to ids\n for key in ['station_id','county', 'basin', 'county_warning_area', 'climate_division']:\n if not key in form.keys():\n continue\n ID,name = WRCCUtils.find_id_and_name(form[key],settings.MEDIA_DIR +'json/US_' + key + '.json')\n form[key] = ID\n form['user_area_id'] = str(name) + ', ' + str(ID)\n if not 'user_area_id' in form.keys():\n try:\n form['user_area_id'] = form[form['area_type']]\n except:\n try:\n form['user_area_id'] = form[form['data_type']]\n except:\n pass\n #station_ids is special case\n if 'station_ids' in form.keys():\n stn_ids = ''\n stn_list = form['station_ids'].rstrip(',').split(',')\n #Remove leading spaces from list items\n stn_list = [v.lstrip(' ').rstrip(' ') for v in stn_list]\n stn_ids, stn_names = WRCCUtils.find_ids_and_names(stn_list,settings.MEDIA_DIR +'json/US_' + 'station_id' + '.json')\n form['station_ids'] = stn_ids\n uai = ''\n stn_names_list = stn_names.split(',')\n for idx, stn_id in enumerate(stn_ids.split(',')):\n uai+=str(stn_names[idx]) + ', ' + str(stn_id) + ';'\n form['user_area_id'] = uai\n #set data summary if needed\n if 'data_summary' not in form.keys():\n if 'temporal_summary' in form.keys():\n form['data_summary'] = 'temporal_summary'\n if 'spatial_summary' in form.keys():\n form['data_summary'] = 'spatial_summary'\n #Combine variables\n if 'add_degree_days' in form.keys() and form['add_degree_days'] == 'T':\n for dd in form['degree_days'].replace(' ','').split(','):\n '''\n if form['units'] == 'metric':\n el_strip, base_temp = WRCCUtils.get_el_and_base_temp(dd)\n form['variables'].append(el_strip + str(WRCCUtils.convert_to_english('base_temp',base_temp)))\n else:\n form['variables'].append(dd)\n '''\n form['variables'].append(dd)\n return form\n","sub_path":"DJANGOUtils.py","file_name":"DJANGOUtils.py","file_ext":"py","file_size_in_byte":28858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"62054796","text":"\"\"\"A python script to compare SALT surfaces\n\"\"\"\nfrom optparse import OptionParser\nimport os\nimport JLA_library as JLA\nimport numpy as numpy\nfrom astropy.table import Table\nfrom scipy import interpolate\n\n# See wavelength.h\nwave_B=4302.57\nwave_V=5428.55\n\ndef reduced_lambda(x):\n return (x-wave_B) / (wave_V - wave_B)\n\ndef CCM(wave,R_V=3.1):\n val=[]\n for w in wave:\n x=10000.0/w\n if x < 1.1 and x > 0.3:\n a=0.574*x**1.61\n b=-0.527*x**1.61\n elif x< 3.3 and x > 1.1:\n y=x-1.82\n a = 1 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 + 0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7\n b = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 - 0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7\n else:\n # We drop the other terms as we do not reach that far in the UV\n a = 1.752 - 0.316*x - 0.104 / ((x-4.67)**2 + 0.341)\n b =-3.091 + 1.825*x + 1.206 / ((x-4.62)**2 + 0.263)\n\n val.append(a + b / R_V)\n return numpy.array(val)\n\n#def Fitz99(wave):\n # For R_V=3.1\n # See http://iopscience.iop.org/article/10.1086/316293/pdf\n# from scipy.interpolate import CubicSpline\n# knots=numpy.array([26500, 12200, 6000, 5470, 4670, 4110, 2700, 2600])\n# values=numpy.array([0.265, 0.829, 2.688, 3.055, 3.806, 4.315, 6.265, 6.591])\n# cs = CubicSpline(knots[::-1], values[::-1]) \n# return cs(wave)\n\ndef Fitz99_Spline(wave,R_V):\n # See http://iopscience.iop.org/article/10.1086/316293/pdf\n # for lambda > 2700 Angstroms\n from scipy.interpolate import CubicSpline\n knots=numpy.array([26500., 12200., 6000., 5470., 4670., 4110., 2700., 2600.])\n # Ojo!\n # There is a sign error in the last row of Table 4\n values=numpy.array([0.265, 0.829, -0.426+1.0044*R_V, -0.050+1.0016*R_V, 0.701+1.0016*R_V, 1.208+1.0032*R_V-0.00033*R_V**2, 6.265, 6.591])\n # R_V=3.1\n #values=numpy.array([0.265, 0.829, 2.688, 3.055, 3.806, 4.315, 6.265, 6.591])\n cs = CubicSpline(knots[::-1], values[::-1])\n return cs(wave)\n\n\ndef Fitz99(wave,R_V=3.1):\n # See http://iopscience.iop.org/article/10.1086/316293/pdf\n # and earlier references\n x0=4.596\n gam=0.99\n c2=-0.824+4.717/R_V\n c1=2.030-3.007*c2\n c3=3.23\n c4=0.41\n val=[]\n \n for w in wave:\n x=10000./w\n D=x**2 / ((x**2-x0**2)**2+gam**2*x**2)\n if x > 5.9:\n F=0.5392*(x-5.9)**2 + 0.0564*(x-5.9)**3\n else:\n F=0.0\n val.append(c1+c2*x+c3*D+c4*F)\n \n \n return numpy.array(val)\n\n\nclass readSALTsurface:\n \"\"\"A SALT surface\"\"\"\n def __init__(self,f, salt3Model):\n if salt3Model:\n prefix='salt3'\n else:\n prefix='salt2'\n\n template_0=Table(numpy.genfromtxt(f+prefix+'_template_0.dat',dtype=[('phase',float),\n ('wave',float),\n ('flux',float)]))\n\n template_1=Table(numpy.genfromtxt(f+prefix+'_template_1.dat',dtype=[('phase',float),\n ('wave',float),\n ('flux',float)]))\n\n salt2_lc_relative_variance_0=Table(numpy.genfromtxt(f+prefix+'_lc_relative_variance_0.dat',dtype=[('phase',float),\n ('wave',float),\n ('variance',float)]))\n\n salt2_lc_relative_variance_1=Table(numpy.genfromtxt(f+prefix+'_lc_relative_variance_1.dat',dtype=[('phase',float),\n ('wave',float),\n ('variance',float)]))\n\n salt2_lc_relative_covariance_01=Table(numpy.genfromtxt(f+prefix+'_lc_relative_covariance_01.dat',dtype=[('phase',float),\n ('wave',float),\n ('covariance',float)]))\n\n\n salt2_spec_variance_0=Table(numpy.genfromtxt(f+prefix+'_spec_variance_0.dat',dtype=[('phase',float),\n ('wave',float),\n ('variance',float)]))\n\n salt2_spec_variance_1=Table(numpy.genfromtxt(f+prefix+'_spec_variance_1.dat',dtype=[('phase',float),\n ('wave',float),\n ('variance',float)]))\n\n salt2_spec_covariance_01=Table(numpy.genfromtxt(f+prefix+'_spec_covariance_01.dat',dtype=[('phase',float),\n ('wave',float),\n ('covariance',float)]))\n\n salt2_lc_dispersion_scaling=Table(numpy.genfromtxt(f+prefix+'_lc_dispersion_scaling.dat',dtype=[('phase',float),\n ('wave',float),\n ('scale',float)]))\n\n \n # Extract the coefficients of the colour law and the region over it was fitted.\n colour_law=f+prefix+'_color_correction.dat'\n \n self.colour_law=numpy.genfromtxt(colour_law,dtype=[('coeff',float)],\n skip_header=1,skip_footer=3)\n\n cl=open(colour_law)\n lines = cl.readlines()\n cl.close()\n for line in lines:\n if line.split()[0]==\"Salt2ExtinctionLaw.min_lambda\":\n self.min_lambda=float(line.split()[1])\n if line.split()[0]==\"Salt2ExtinctionLaw.max_lambda\":\n self.max_lambda=float(line.split()[1])\n \n \n \n\n self.colour_law_error=numpy.genfromtxt(f+prefix+'_color_dispersion.dat',dtype=[('wave',float),\n ('sig',float)], \n skip_header=3)\n \n\n self.surfaceName=f\n self.template_0={}\n self.template_1={}\n self.salt2_lc_relative_variance_0={}\n self.salt2_lc_relative_variance_1={}\n self.salt2_lc_relative_covariance_01={}\n self.salt2_spec_variance_0={}\n self.salt2_spec_variance_1={}\n self.salt2_spec_covariance_01={}\n self.salt2_lc_dispersion_scaling={}\n self.colourlaw=[]\n\n for phase in numpy.unique(template_0['phase']):\n selection=(template_0['phase']==phase)\n self.template_0['%5.1f' % phase]=template_0[selection]['wave','flux']\n\n for phase in numpy.unique(template_1['phase']):\n selection=(template_1['phase']==phase)\n self.template_1['%5.1f' % phase]=template_1[selection]['wave','flux']\n\n for phase in numpy.unique(salt2_lc_dispersion_scaling['phase']):\n selection=(salt2_lc_dispersion_scaling['phase']==phase)\n self.salt2_lc_dispersion_scaling['%5.1f' % phase]=salt2_lc_dispersion_scaling[selection]['wave','scale']\n\n for phase in numpy.unique(salt2_lc_relative_variance_0['phase']):\n selection=(salt2_lc_relative_variance_0['phase']==phase)\n self.salt2_lc_relative_variance_0['%5.1f' % phase]=salt2_lc_relative_variance_0[selection]['wave','variance']\n #self._checkVariance(self.salt2_lc_relative_variance_0['%5.1f' % phase],phase,'variance_1')\n \n for phase in numpy.unique(salt2_lc_relative_variance_1['phase']):\n selection=(salt2_lc_relative_variance_1['phase']==phase)\n self.salt2_lc_relative_variance_1['%5.1f' % phase]=salt2_lc_relative_variance_1[selection]['wave','variance']\n #self._checkVariance(self.salt2_lc_relative_variance_1['%5.1f' % phase],phase,'variance_1')\n\n for phase in numpy.unique(salt2_lc_relative_covariance_01['phase']):\n selection=(salt2_lc_relative_covariance_01['phase']==phase)\n self.salt2_lc_relative_covariance_01['%5.1f' % phase]=salt2_lc_relative_covariance_01[selection]['wave','covariance']\n #self._checkVariance(self.salt2_lc_relative_covariance_01['%5.1f' % phase],phase,'covariance_01')\n\n for phase in numpy.unique(salt2_spec_variance_0['phase']):\n selection=(salt2_spec_variance_0['phase']==phase)\n self.salt2_spec_variance_0['%5.1f' % phase]=salt2_spec_variance_0[selection]['wave','variance']\n\n for phase in numpy.unique(salt2_spec_variance_1['phase']):\n selection=(salt2_spec_variance_1['phase']==phase)\n self.salt2_spec_variance_1['%5.1f' % phase]=salt2_spec_variance_1[selection]['wave','variance']\n\n for phase in numpy.unique(salt2_spec_covariance_01['phase']):\n selection=(salt2_spec_covariance_01['phase']==phase)\n self.salt2_spec_covariance_01['%5.1f' % phase]=salt2_spec_covariance_01[selection]['wave','covariance']\n\n return\n\n def _checkVariance(self, error, phase, kind):\n # Add a check to see if we are exceeding limits\n if \"covariance\" in kind:\n print(\"Maximal for phase %5.1f is %5.3e\" % (phase, numpy.max(error['covariance'])))\n else:\n print(\"Maximal for phase %5.1f is %5.3e\" % (phase, numpy.max(error['variance'])))\n\n return\n \n \n\ndef derivative(alpha,surface,reduced_wave):\n d=alpha\n for exponent,coeff in enumerate(surface.colour_law['coeff']):\n d+=(exponent+2)*coeff*reduced_wave**(exponent+1)\n return d\n \ndef colourLaw(alpha,surface,reduced_wave):\n d=alpha*reduced_wave\n for exponent,coeff in enumerate(surface.colour_law['coeff']):\n d+=coeff*reduced_wave**(exponent+2)\n return d\n\n\ndef compareSALTsurfaces(surface):\n\n import matplotlib.pyplot as plt\n\n # ----------- Read in the configuration file ------------\n\n params=JLA.build_dictionary(options.config)\n \n # ----------- Read in the SALT models -------------------\n\n # If the SALT model is SALT3 - only for the second surface\n \n surface1=readSALTsurface(JLA.get_full_path(params['model1']),False)\n surface2=readSALTsurface(JLA.get_full_path(params['model2']),options.salt3)\n\n if surface.prefix is None:\n name=\"%s-%s\" % (surface1.surfaceName.split('/')[-3],surface2.surfaceName.split('/')[-3])\n else:\n name=surface.prefix\n \n \n # ----------- Plot the surfaces ----------------------\n fig1=plt.figure()\n first=True\n model1='/'.join(params['model1'].split('/')[-3:-1])\n model2='/'.join(params['model2'].split('/')[-3:-1])\n\n for axes,x1 in enumerate([-3,0,3]):\n ax1=fig1.add_subplot(3,1,axes+1)\n if first:\n ax1.set_title(\"model1:%s \\nvs\\n model2:%s\" % (model1,model2), fontsize=8)\n\n phase =\"%5.1f\" % options.phase\n wave1=surface1.template_0[phase]['wave']\n wave2=surface2.template_0[phase]['wave']\n flux1=surface1.template_0[phase]['flux'] + x1 * surface1.template_1[phase]['flux']\n flux2=surface2.template_0[phase]['flux'] + x1 * surface2.template_1[phase]['flux']\n\n ax1.plot(wave1,flux1,color='b',label=\"model 1\")\n ax1.plot(wave2,flux2,color='k',label=\"model 2\")\n if first:\n ax1.legend()\n first=False\n\n ax1.text(7000,0.3,\"C=0 x1=%2d @ %d days\" % (x1,options.phase))\n\n # The variance spectra are computed in the bin middles\n phase1=\"%5.1f\" % (options.phase-0.5)\n phase2=\"%5.1f\" % (options.phase+0.5)\n\n # We use the LC variance files\n # The variance and error_snake are sampled at -19.5, -18.5 days, etc.\n # The flux is sampled at -20, -19 days, etc.\n \n var1=(surface1.salt2_lc_relative_variance_0[phase1]['variance']+surface1.salt2_lc_relative_variance_0[phase2]['variance'])/2.0 \\\n + x1**2. * (surface1.salt2_lc_relative_variance_1[phase1]['variance']+surface1.salt2_lc_relative_variance_1[phase2]['variance'])/2.0 \\\n + 2*x1 * (surface1.salt2_lc_relative_covariance_01[phase1]['covariance']+surface1.salt2_lc_relative_covariance_01[phase2]['covariance'])/2.0\n \n var2=(surface2.salt2_lc_relative_variance_0[phase1]['variance']+surface2.salt2_lc_relative_variance_0[phase2]['variance'])/2.0 \\\n + x1**2. * (surface2.salt2_lc_relative_variance_1[phase1]['variance']+surface2.salt2_lc_relative_variance_1[phase2]['variance'])/2.0 \\\n + 2*x1 * (surface2.salt2_lc_relative_covariance_01[phase1]['covariance']+surface2.salt2_lc_relative_covariance_01[phase2]['covariance'])/2.0\n\n errorSnake1=(surface1.salt2_lc_dispersion_scaling[phase1]['scale']+surface1.salt2_lc_dispersion_scaling[phase2]['scale'])/2.0\n errorSnake2=(surface2.salt2_lc_dispersion_scaling[phase1]['scale']+surface2.salt2_lc_dispersion_scaling[phase2]['scale'])/2.0\n \n # Resample the variance and error snake using the wavelengths the flux is sampled at\n var1_int=interpolate.interp1d(surface1.salt2_lc_relative_variance_0[phase1]['wave'], var1,fill_value=\"extrapolate\")(wave1)\n var2_int=interpolate.interp1d(surface2.salt2_lc_relative_variance_0[phase1]['wave'], var2,fill_value=\"extrapolate\")(wave2)\n\n errorSnake1_int=interpolate.interp1d(surface1.salt2_lc_dispersion_scaling[phase1]['wave'], errorSnake1,fill_value=\"extrapolate\")(wave1)\n errorSnake2_int=interpolate.interp1d(surface2.salt2_lc_dispersion_scaling[phase1]['wave'], errorSnake2,fill_value=\"extrapolate\")(wave2)\n\n # We have not included the uncertainty from salt2_color_dispersion.dat\n # From the weight matrix of fullfit.cc\n # *w += sqr(model.ModelRelativeError(filter,*d,true)*(*mf));\n # return (f0*sqrt(model_relative_var)/ftot)*error_scaling;\n # These are the relate uncertainties in the light curve points\n # The spectral uncertainty is cast differently - todo - look into the code to see how they relative lc uncertainty\n # differs from the spectral uncertainty\n \n \n error1=numpy.sqrt(numpy.abs(flux1)*var1_int)*errorSnake1_int\n error2=numpy.sqrt(numpy.abs(flux2)*var2_int)*errorSnake2_int\n\n ax1.fill_between(wave1, flux1+error1,flux1-error1,alpha=0.4,label='SALT2 error',color='b')\n ax1.fill_between(wave2, flux2+error2,flux2-error2,alpha=0.4,label='SALT2 error',color='k')\n ax1.set_ylim(-0.1,0.6)\n\n \n if options.subtract:\n ax1b = ax1.twinx()\n # Resample if the sampling differs\n if len(wave1) != len(wave2):\n flux_a=interpolate.interp1d(wave1,flux1)(wave2)\n flux_b=interpolate.interp1d(wave2,flux2)(wave2)\n ax1b.plot(wave2,(flux_a-flux_b)/flux_a*100.,color='r',alpha=0.4)\n ax1.plot(wave2,(flux_a-flux_b),color='g',alpha=0.4)\n else:\n ax1b.plot(wave1,(flux1-flux2)/flux1*100.,color='r',alpha=0.4)\n ax1.plot(wave1,(flux1-flux2),color='g',alpha=0.4)\n\n ax1b.plot([2000,9200],[0,0],'m--',marker=None)\n ax1b.set_ylim(-20,20)\n ax1b.tick_params(axis='y', labelcolor='r')\n ax1b.set_ylabel('% Difference', color='r')\n \n ax1.set_xlabel(\"wavelength ($\\AA$)\")\n ax1.set_ylabel(\"flux density\")\n \n plt.savefig(\"%s_SED.png\" % (name))\n\n\n # Examine the variance and covariances\n fig2=plt.figure(figsize=(15,8))\n ax2_1=fig2.add_subplot(321)\n ax2_3=fig2.add_subplot(323)\n ax2_5=fig2.add_subplot(325)\n phaseRange=[-15.5,-10.5,-5.5,0.5,5.5,10.5,15.5,20.5,25.5,30.5,35.5]\n offset=0.0\n for p in phaseRange:\n phaseKey=\"%5.1f\" % p\n wave1=surface1.salt2_lc_relative_variance_0[phaseKey]['wave']\n ax2_1.plot(wave1,surface1.salt2_lc_relative_variance_0[phaseKey]['variance']+offset,label=\"%s +%3.1f\" % (phaseKey,offset))\n ax2_3.plot(wave1,surface1.salt2_lc_relative_variance_1[phaseKey]['variance']+offset,label=\"%s +%3.1f\" % (phaseKey,offset))\n ax2_5.plot(wave1,surface1.salt2_lc_relative_covariance_01[phaseKey]['covariance']+offset,label=\"%s +%3.1f\" % (phaseKey,offset))\n offset+=0.1\n\n ax2_1.set_ylim(-0.2,1.5) \n ax2_3.set_ylim(-0.2,1.5) \n ax2_5.set_ylim(-1,1.5) \n ax2_1.set_title(\"model1: %s\\n variance_1, variance_2, covariance_01\" % (model1), fontsize=8)\n ax2_1.legend()\n\n ax2_1=fig2.add_subplot(322)\n ax2_3=fig2.add_subplot(324)\n ax2_5=fig2.add_subplot(326)\n offset=0.0\n for p in phaseRange:\n phaseKey=\"%5.1f\" % p\n wave2=surface2.salt2_lc_relative_variance_0[phaseKey]['wave']\n ax2_1.plot(wave2,surface2.salt2_lc_relative_variance_0[phaseKey]['variance']+offset,label=phaseKey)\n ax2_3.plot(wave2,surface2.salt2_lc_relative_variance_1[phaseKey]['variance']+offset,label=phaseKey)\n ax2_5.plot(wave2,surface2.salt2_lc_relative_covariance_01[phaseKey]['covariance']+offset,label=phaseKey)\n offset+=0.1\n\n \n ax2_1.set_ylim(-0.2,1.5)\n ax2_3.set_ylim(-0.2,1.5) \n ax2_5.set_ylim(-1,1.5) \n ax2_1.set_title(\"model2: %s\\n variance_1, variance_2, covariance_01\" % (model2), fontsize=8)\n \n\n plt.savefig(\"%s_variance.png\" % (name))\n\n # ----------- Plot the colour laws ----------------------\n\n # See salt2extinction.cc\n # Note the extrapolation\n\n# /*\n# ========================================================\n# VERSION 1\n# ========================================================\n# if(l_B<=l<=l_R)\n# ext = exp( color * constant * ( alpha*l + params(0)*l^2 + params(1)*l^3 + ... ))\n# = exp( color * constant * P(l) )\n# alpha = 1-params(0)-params(1)-...\n# if(l>l_R)\n# ext = exp( color * constant * ( P(l_R) + P'(l_R)*(l-l_R) ) )\n# if(l wave1_max_reduced:\n p1[index]=p1_wave_max_reduced+p1_derivative_max*(rl-wave1_max_reduced)\n else:\n p1[index]=colourLaw(alpha1,surface1,rl)\n \n # Model 2\n alpha2=1.0\n\n for coeff in surface2.colour_law['coeff']:\n alpha2-=coeff\n\n\n p2=numpy.zeros(len(reduced_wave2))\n \n # Compute derivatives for extrapolations\n p2_derivative_min=derivative(alpha2,surface2,wave2_min_reduced)\n p2_derivative_max=derivative(alpha2,surface2,wave2_max_reduced)\n\n # Compute colour law at the points of extrapolations\n p2_wave_min_reduced=colourLaw(alpha2,surface2,wave2_min_reduced)\n p2_wave_max_reduced=colourLaw(alpha2,surface2,wave2_max_reduced)\n\n for index,rl in enumerate(reduced_wave2):\n if rl < wave2_min_reduced:\n p2[index]=p2_wave_min_reduced+p2_derivative_min*(rl-wave2_min_reduced)\n elif rl > wave2_max_reduced:\n p2[index]=p2_wave_max_reduced+p2_derivative_max*(rl-wave2_max_reduced)\n else:\n p2[index]=colourLaw(alpha2,surface2,rl)\n \n # See Fig.3 of B14.\n # p1 and p2 are the log (colour law)\n \n C=-0.1\n\n # Allowing for the fact that the SEDs might have finer sampling\n # We should use inerpolation instead.\n if len(p1) > len(surface1.colour_law_error['sig']):\n p1_s2=p1[0::2]\n else:\n p1_s2=p1\n\n A1_wave=p1_s2*C\n A1_wave_err_plus=(p1_s2+surface1.colour_law_error['sig'])*C\n A1_wave_err_minus=(p1_s2-surface1.colour_law_error['sig'])*C\n ax3.fill_between(surface1.colour_law_error['wave'], A1_wave_err_plus, A1_wave_err_minus, alpha=0.4,label='model1+err',color='b')\n ax3.plot(surface1.colour_law_error['wave'], A1_wave,label='model1')\n \n \n A2_wave=p2*C\n ax3.plot(wave2, A2_wave, label='model2',color='k')\n\n # Plot CCM R_V=3.1\n E_BV=0.1\n R_V=3.1\n a_wave=E_BV * R_V * CCM(wave1, R_V)\n a_B=E_BV * R_V * CCM(numpy.array([wave_B]),R_V)\n ax3.plot(wave1,a_wave-a_B,label='CCM R_V=3.1')\n\n #CCM R_V=1.0\n R_V=1.0\n a_wave=E_BV * R_V * CCM(wave1, R_V)\n a_B=E_BV * R_V * CCM(numpy.array([wave_B]),R_V)\n ax3.plot(wave1,a_wave-a_B,label='CCM R_V=1.0')\n\n # F99 R_V=3.1\n \n # Fitz99 UV extiction - note the limited wavelength range\n # https://iopscience.iop.org/article/10.1086/316293/pdf\n # http://adsabs.harvard.edu/abs/1990ApJS...72..163FE_BV=0.1\n R_V=3.1\n wave=numpy.arange(2000,2700,1.0)\n ax3.plot(wave, E_BV*(Fitz99(wave,R_V)-1.0),label=\"Fitz99 UV R_V=%3.1f\" % R_V)\n # Fitz99 - Spline function in the optical\n # https://iopscience.iop.org/article/10.1086/316293/pdf\n wave=numpy.arange(2700,9200,1.0)\n a_B=E_BV*(Fitz99_Spline(numpy.array([wave_B]),R_V))\n ax3.plot(wave,E_BV*Fitz99_Spline(wave,R_V)-a_B,label=\"Fitz99 Spline R_V=%3.1f\" % R_V)\n \n ax3.legend()\n ax3.set_xlabel(\"wavelength ($\\AA$)\")\n ax3.set_ylim(-0.3,0.8)\n ax3.set_title(\"model1:%s\\n vs\\n model2:%s\" % (model1,model2), fontsize=8)\n plt.savefig(\"%s_colourlaw.png\" % (name))\n\n # ----------- Plot examples of the impact of colour ----------------------\n # Assume x1=0\n # Note\n # The colour laws p1 and p2 have the absorption in the B band subtracted\n # The units are magnitudes\n # Are we correctly applyng the colour law?\n\n plt.show()\n plt.close()\n\n return\n\nif __name__ == '__main__':\n\n parser = OptionParser()\n\n parser.add_option(\"-c\", \"--config\", dest=\"config\", default=\"SALTmodels.config\",\n help=\"Directories containing the SALT models\")\n\n parser.add_option(\"-s\", \"--subtract\", dest=\"subtract\", default=True,action=\"store_false\",\n help=\"Subtract one surface from another\")\n\n parser.add_option(\"-p\", \"--phase\", dest=\"phase\", default=0.0, type=float,\n help=\"Lightcurve phase\")\n\n parser.add_option(\"-P\", \"--prefix\", dest=\"prefix\", default=None,\n help=\"prefix for output file names\")\n\n parser.add_option(\"--phase2\", dest=\"phase2\", default=0.5, type=float,\n help=\"Lightcurve phase\")\n\n parser.add_option(\"-3\", \"--salt3\", dest=\"salt3\", default=False, action=\"store_true\",\n help=\"SALT3 model\")\n\n (options, args) = parser.parse_args()\n\n\n compareSALTsurfaces(options)\n","sub_path":"scripts/jla_compare_SALTsurfaces.py","file_name":"jla_compare_SALTsurfaces.py","file_ext":"py","file_size_in_byte":23885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"284264380","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom collections import deque\nimport pickle\nfrom urllib import robotparser\nimport time\n\n# test run decrease count in scroll; modify while loop conditions\n\n\nclass nprCrawler(object):\n\n def __init__(self, base_urls=[\"https://www.npr.org/sections/national/archive\", \"https://www.npr.org/sections/world/archive\", \"https://www.npr.org/sections/politics/archive\", \"https://www.npr.org/sections/business/archive\"], archives=[\"https://www.npr.org/sections/national/archive?start={}\", \"https://www.npr.org/sections/world/archive?start={}\", \"https://www.npr.org/sections/politics/archive?start={}\", \"https://www.npr.org/sections/business/archive?start={}\"]):\n\n # setting up user agent\n opts = Options()\n opts.add_argument(\"user-agent=Amherst College SURF 2018, contact salfeld2018Amherst.edu with any questions.\")\n\n self.archives = archives\n\n self.base = base_urls\n\n self.crawled_url = set()\n\n self.driver = webdriver.Chrome(chrome_options=opts)\n\n self.excluded = [\"www.npr.org/podcasts/\", \"www.npr.org/event/music/\", \"slideshow\", \"audio\", \"https://www.npr.org/sections/pictureshow/\"]\n\n self.url_queue = deque()\n\n self.mal_url = []\n\n self.wrongs = []\n\n def check_link(self, link): # check individual links\n\n rp = robotparser.RobotFileParser()\n rp.set_url(\"https://www.npr.org/robots.txt\")\n rp.read()\n\n if rp.can_fetch(\"*\", link):\n return True\n else:\n return False\n\n def check_transcript(self):\n\n try:\n self.driver.find_element_by_class_name(\"transcript\")\n return False\n except Exception:\n return True\n\n def get_urls_from_base(self, base):\n\n if self.check_link(base):\n\n self.driver.get(base)\n archive_list = self.driver.find_element_by_class_name(\"archivelist\").find_elements_by_class_name(\"title\")\n\n for item in archive_list:\n temp = item.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n if self.check_link(temp):\n self.url_queue.append(temp)\n\n else:\n\n print(f\"--- error with base url {base}\")\n\n def get_urls_from_scroll(self, scroll_link):\n\n try:\n\n self.driver.get(scroll_link)\n\n WebDriverWait(self.driver, 10).until(EC.presence_of_all_elements_located((By.TAG_NAME, \"h2\")))\n\n scroll_article_list = self.driver.find_elements_by_class_name(\"item-info\")\n\n for scroll_item in scroll_article_list:\n\n s_link = scroll_item.find_element_by_tag_name(\"h2\").find_element_by_tag_name(\"a\").get_attribute(\"href\")\n\n if self.check_link(s_link) and any(exclude not in s_link for exclude in self.excluded):\n\n self.url_queue.append(s_link)\n\n else:\n\n self.wrongs.append(s_link)\n print(f\"--- access denied to scroll link {s_link}\")\n\n except Exception:\n\n print(f\"--- error with getting scroll link {scroll_link}!\")\n\n def infinite_scroll(self, archive_url):\n\n idx = 16\n\n for i in range(1000):\n\n prev_count = len(self.url_queue)\n\n print(f\"~~~ scroll {i+1} ~~~\")\n\n scroll_link = archive_url.format(idx + i)\n print(scroll_link)\n\n if self.check_link(scroll_link):\n\n self.driver.implicitly_wait(10) # to make more human pt.1\n\n self.get_urls_from_scroll(scroll_link)\n else:\n print(f\"--- access denied to scroll link {scroll_link}\")\n\n idx += 15\n\n print(f\"appended {len(self.url_queue)-prev_count} links\")\n\n def get_content(self, url):\n\n try:\n\n self.driver.get(url)\n\n self.driver.implicitly_wait(15) # to make more human pt.2\n\n if self.check_transcript():\n\n # headline\n try:\n headline = self.driver.find_element_by_class_name(\"storytitle\").find_element_by_tag_name(\"h1\").text\n except Exception:\n return url, \"n.a.\", \"-\"\n\n # article\n article_collect = []\n try:\n contents = self.driver.find_element_by_id(\"storytext\").find_elements_by_tag_name(\"p\")\n\n for content in contents:\n article_collect.append(content.text)\n\n article = \" \".join([ar for ar in article_collect])\n\n return url, headline, article\n\n except Exception:\n return url, \"-\", \"n.a.\"\n\n except Exception:\n\n self.wrongs.append(url)\n print(f\"--- error with accessing url {url}\")\n\n def start(self):\n\n # collecting urls\n for i in range(4):\n\n print(f\"... retrieving data from {self.base[i]}\")\n\n print(\"=====part 1=====\")\n self.get_urls_from_base(self.base[i])\n print(self.url_queue)\n print(len(self.url_queue))\n\n print(\"\\n=====part 2=====\")\n self.infinite_scroll(self.archives[i])\n print(self.url_queue)\n print(len(self.url_queue))\n print(\"================\\n\\n\")\n\n # collecting url, title and article content\n print(\"... now collecting contents\\n\\n\")\n\n counter = 0\n empty_titles = []\n empty_articles = []\n\n full_list = deque()\n\n while len(self.url_queue) > 0: #and counter < 200:\n\n current_url = self.url_queue.popleft()\n\n if current_url not in self.crawled_url:\n\n try:\n url, title, article = self.get_content(current_url)\n\n if title == \"n.a.\":\n empty_titles.append(url)\n elif article == \"n.a.\":\n empty_articles.append(url)\n else:\n temp = [url, title, article]\n full_list.append(temp)\n self.crawled_url.add(current_url)\n counter += 1\n\n except Exception:\n pass\n\n else:\n\n self.wrongs.append(current_url)\n\n # save\n print(\"... saving\\n\\n\")\n\n # save main pickle\n pickle_main_out = open(\"./data/npr.pkl\", \"wb\")\n desc = f\"{counter} npr news articles stored in the following pickle format: [[url, title, content], [url, title, content], [], ...]\"\n pickle.dump((full_list, desc), pickle_main_out)\n pickle_main_out.close()\n\n # save side pickle\n pickle_side_out = open(\"./data/npr_side.pkl\", \"wb\")\n desc2 = f\"three lists of urls of {len(empty_titles)} empty_titles, {len(empty_articles)} empty articles, and {len(self.wrongs)} articles that includes the to_excludes \"\n pickle.dump(((empty_titles, empty_articles, self.wrongs), desc2), pickle_side_out)\n pickle_side_out.close()\n\n # summary\n print(f\"summary:\\n\\t{counter} full articles obtained; excluded {len(self.wrongs)} wrong articles, {len(empty_titles)} empty titled articles, and {len(empty_articles)} empty content articles\")\n\n\nif __name__ == \"__main__\":\n\n npr = nprCrawler()\n npr.start()","sub_path":"news_source/webscrapers/full_npr.py","file_name":"full_npr.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"512852869","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 15 20:16:38 2018\n\n@author: Zhewei Zhang\n\n\nfind the when and which neurons in the hidden layer of the network\n\"\"\"\n\n## load model\nimport os\nimport yaml\nimport copy\nimport glob\nimport torch\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nimport tkinter as tk\nfrom tkinter.filedialog import askopenfilenames as file_selection\n\nfrom toolkits_bhv import shape_config\n\npath = os.getcwd()\nos.chdir('../seqrnn_multitask')\nfrom model import GRUNetwork\nos.chdir(path)\n\nnumNeuron = 128\ninput_setting = shape_config()\nchoice_pos = input_setting['choice']\ninput_size = input_setting['input_size']\n\n\n# In[]: tools finding the when and which units\nclass neurons_inNetwork:\n \"\"\"\n \n \"\"\"\n def __init__(self, value, threshold):\n self.threshold = threshold\n self.value = value\n self.pos = []\n self.neg = []\n self.neuron_constructer()\n \n def __getitem__(self,key):\n return self.dict[key]\n \n def __setitem__(self,key,value):\n self.dict[key] = value\n \n def neuron_constructer(self):\n index = np.argsort(self.value)[::-1]\n # Get neurons with positive values and their sum is more than a half of \n # the sum of positive values. In the mean time, we minmize the number \n # the neurons\n pos_v = self.value[np.where(self.value > 0)]\n pos_v_cumsum = np.sort(pos_v)[::-1].cumsum()\n necePosProportion = self.threshold*pos_v.sum()\n numPosSelected = np.where(pos_v_cumsum >= necePosProportion)[0][0]+1\n self.pos = index[:numPosSelected]\n \n # Get neurons with positive values and their sum is more than a half of \n # the sum of positive values. In the mean time, we minmize the number \n # the neurons\n neg_v_abs = np.abs(self.value[np.where(self.value < 0)])\n neg_v_cumsum = np.sort(neg_v_abs)[::-1].cumsum()\n neceNegProportion = self.threshold*neg_v_abs.sum()\n numNegSelected = np.where(neg_v_cumsum >= neceNegProportion)[0][0]+1\n self.neg = index[-numNegSelected:]\n\ndef model_select():\n \"\"\"\n GUI , select the model files\n \"\"\"\n root = tk.Tk()\n root.withdraw()\n file_paths = file_selection(initialdir = '../save/RT',\n parent=root, \n title='Choose the model',\n filetypes=[(\"model files\", \".pt\")]\n )\n return file_paths\n\ndef model_loading(file_path):\n # load the model parameters\n # it doesn't matter whether it is true setting \n model_parameters = {\n \"nhid\": 128,\n \"nlayers\": 1,\n \"input_size\": input_size,\n \"batch_size\": 1,\n \"clip\": 0.25,\n \"lr\": 0.6\n }\n # load model\n rnn_model = GRUNetwork(model_parameters[\"input_size\"],\n model_parameters[\"nhid\"],\n model_parameters[\"batch_size\"],\n model_parameters[\"nlayers\"],\n model_parameters[\"lr\"],\n )\n rnn_model.load_state_dict(torch.load(file_path))\n \n # get the connection weight of the trained model\n ho_weight = rnn_model.decoder.weight.detach().numpy()\n w_hr, w_hi, w_hn = rnn_model.rnn.weight_hh_l0.chunk(3, 0)\n \n return ho_weight, w_hr, w_hi, w_hn\n \n \ndef when_which_constructer(ho_weight, neuron_proportion): \n \"\"\"\n when neuron: when should I make a choice\n which neuron: which target should I choose\n \"\"\"\n when_value = ho_weight[choice_pos[1]:choice_pos[-1],:].mean(axis = 0) - ho_weight[choice_pos[0],:] \n which_value = ho_weight[choice_pos[1],:] - ho_weight[choice_pos[2],:]\n\n when = neurons_inNetwork(when_value, neuron_proportion)\n which = neurons_inNetwork(which_value, neuron_proportion)\n\n return when, which\n\ndef datasaving(filepath, when, which, rnn_weight, ho_weight, prop):\n # save the results\n path, model = os.path.split(filepath)\n saving_name = path + '/WhenWhich_' + model[:-3] + '_threshold' + str(prop) +'.yaml'\n \n data = {'model':model,'when':when,'which':which, 'threshold':prop,\n 'rnn_weight':rnn_weight,'output_weight':ho_weight}\n \n with open(saving_name, 'w') as outfile:\n yaml.dump(data, outfile, default_flow_style=False)\n print('lesion file saved')\n print('file path, file name:',saving_name)\n \n\n# In[]: \n# tools calculating geodesic distance, maxmimum flow, and connection pattern\n\ndef wh_loading(saving_name):\n # load the file storing the identity of when/which units\n f = open(saving_name)\n yaml_load = lambda x: yaml.load(x, Loader=yaml.Loader)\n y = yaml_load(f)\n f.close()\n return y\n\n\ndef cal_mat_thresholded(mat, prob_largest = 0.3):\n # find the connection with the highest prob_largest of the \n # absolute values of the mat\n \n num_largest = np.round(mat.size*prob_largest).astype(int)\n value_sorted = np.sort(np.abs(mat), axis = None)[::-1]\n\n mat_binary = np.abs(mat)>=value_sorted[num_largest]\n \n return mat_binary\n\n\ndef get_others(when, which, zero_node):\n others = []\n for i in range(numNeuron):\n if not(any(i == when) or any(i == which) or any(i == zero_node)):#\n others.append(i)\n \n return others\n\ndef grouping(type1,type2,value):\n value_group = []\n for i in type1:\n for j in type2:\n if i!=j:\n value_group.append(value[i,j])\n return value_group\n\ndef interaction(when, which, others, value):\n \"\"\"\n classifying value between each groups\n \"\"\"\n when_when = grouping(when, when, value)\n when_which = grouping(when, which,value)\n which_which = grouping(which,which,value)\n \n control_1 = grouping(when, others,value)\n control_2 = grouping(which,others,value)\n control = control_1 + control_2\n \n value_mean = {'control': np.mean(control),\n 'when_when': np.mean(when_when),\n 'when_which': np.mean(when_which),\n 'which_which':np.mean(which_which)\n }\n \n value_sem = {'control': stats.sem(control),\n 'when_when': stats.sem(when_when),\n 'when_which': stats.sem(when_which),\n 'which_which':stats.sem(which_which)\n }\n \n num = {'control': len(control), 'when_when': len(when_when),\n 'when_which':len(when_which), 'which_which':len(which_which)\n }\n\n return value_mean, value_sem, num\n\ndef gesDistanceInv(graph, when, which, zero_node):\n \"\"\"\n get the shortest path length\n \"\"\"\n g = copy.deepcopy(graph)\n \n # geodesic distance between two nodes\n path_length = nx.all_pairs_dijkstra_path_length(g) # for weighted network\n gd = 1e10 + np.zeros([numNeuron, numNeuron])\n for i in path_length:\n for key, value in i[1].items():\n gd[i[0],key] = value\n \n # using the inverse of the distance\n gd = 1/gd\n others = get_others(when, which, zero_node)\n # get the inverse of the distance between each groups \n dInv_mean, dInv_sem, dInv_num = interaction(when, which, others, gd)\n \n return dInv_mean, dInv_sem, dInv_num\n\ndef maxFlow(graph, when, which, zero_node):\n \"\"\"\n maximun flow between two nodes\n \"\"\"\n g = copy.deepcopy(graph)\n max_flow = np.zeros([numNeuron,numNeuron])\n for i in range(numNeuron):\n for j in range(numNeuron):\n if i==j or np.any(zero_node==i) or np.any(zero_node==j):\n continue\n max_flow[i,j] = nx.maximum_flow_value(g,i,j)\n \n others = get_others(when, which, zero_node)\n \n flow_mean, flow_sem, flow_num = interaction(when, which, others, max_flow)\n \n return flow_mean, flow_sem, flow_num\n\ndef gesdes_plot(df_dInv):\n # plot the inverse of geodesic distance between each groups\n nFile, nGroup = df_dInv.label.count(), 4\n gdInv = np.zeros((nFile, nGroup))\n for i in range(nFile):\n gdInv[i,:] = np.array([df_dInv['mean'][i]['control'], \n df_dInv['mean'][i]['when_when'], \n df_dInv['mean'][i]['which_which'],\n df_dInv['mean'][i]['when_which'],\n ])\n fig = plt.figure()\n plt.boxplot(gdInv)\n plt.ylabel('1 / geodesic distance')\n plt.xticks(np.arange(1, nFile+1),\n ('when/which-others','when-when','which-which','when-which'))\n fig.savefig('../figs/invGeoDis.eps', format='eps', dpi=1000)\n plt.show()\n\ndef maxflow_plot(df_mflow, title = ''):\n # plot the maximum flow between each groups\n nFile, nGroup = df_mflow.label.count(), 4\n maxflow = np.zeros((nFile, nGroup))\n for i in range(df_mflow.label.count()):\n maxflow[i,:] = np.array([df_mflow['mean'][i]['control'], \n df_mflow['mean'][i]['when_when'], \n df_mflow['mean'][i]['which_which'], \n df_mflow['mean'][i]['when_which']])\n fig2 = plt.figure()\n plt.boxplot(maxflow)\n plt.ylabel('maximun flow')\n plt.xticks(np.arange(1, nFile+1),\n ('when/which-others','when-when','which-which','when-which'))\n fig2.savefig('../figs/maxFlow.eps', format='eps', dpi=1000)\n plt.show()\n\ndef conpattern_plot(df_output):\n \"\"\"\n plot connection pattern of when/which units\n \"\"\"\n fig_w, fig_h = (10, 7)\n plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})\n \n nFile, nOutput = df_output.label.count(), 3\n # connection pattern of when/which units\n when_p, when_n = np.zeros((nFile, nOutput)), np.zeros((nFile, nOutput))\n which_p, which_n = np.zeros((nFile, nOutput)), np.zeros((nFile, nOutput))\n for i in range(nFile):\n when = df_output.when.iloc[i]\n which = df_output.which.iloc[i]\n # output connection matrix \n output_C = df_output.output.iloc[i][choice_pos[:-1], :] \n \n when_p[i,:] = output_C[:, when.pos].mean(axis=1)\n when_n[i,:] = output_C[:, when.neg].mean(axis=1)\n which_p[i,:] = output_C[:, which.pos].mean(axis=1)\n which_n[i,:] = output_C[:, which.neg].mean(axis=1)\n \n # plot\n f = plt.figure()\n titles = ['when units','which units']\n for i, units_connection in enumerate([[when_p, when_n],[which_p, which_n]]):\n plt.subplot(2,1,i+1)\n pos, neg = units_connection[0], units_connection[1]\n plt.errorbar(range(nOutput), y = np.mean(pos, axis=0),\n yerr = stats.sem(neg,axis=0), label = 'pos')\n plt.errorbar(range(nOutput), y = np.mean(neg, axis=0),\n yerr = stats.sem(neg,axis=0), label = 'neg')\n plt.legend()\n plt.title(titles[i])\n plt.xticks(range(nOutput),('fixtaion', 'left target','right target'))\n \n f.savefig('../figs/weight pattern.eps', format='eps', dpi=1000)\n plt.show()\n\ndef graph_construct(rnn_weight, prob_largest = 0.3):\n \"\"\"\n construct a structure graph\n \"\"\"\n \n connectivity = np.abs(rnn_weight)\n # get the index matrix representing the connections are largest 30%\n net_mat = cal_mat_thresholded(connectivity, prob_largest = prob_largest)\n # construct a directed weighted graph\n graph = nx.DiGraph()\n norms = np.mean(connectivity[net_mat!=0])\n for i in range(net_mat.shape[0]):\n for j in range(net_mat.shape[1]):\n if net_mat[i,j] != 0:\n graph.add_edge(i, j,\n weight = norms/np.abs(connectivity[i,j]),\n capacity = np.abs(connectivity[i,j]) \n )\n net_mat[i,j] = norms/np.abs(connectivity[i,j])\n \n # find the nodes that are not connected with any other\n zero_node = np.intersect1d(np.where(net_mat.sum(axis=0)==0),\n np.where(net_mat.sum(axis=1)==0))\n return graph, zero_node\n\n\ndef graph_extract(file_paths):\n # the inverse of the geodesic distance\n df_dInv = pd.DataFrame([], columns = {'label','mean','num'})\n # the maximum flow\n df_mflow = pd.DataFrame([], columns = {'label','mean','num'})\n # the output connection pattern\n df_output = pd.DataFrame([], columns = {'label','when','which','output'})\n \n for i, file in enumerate(file_paths):\n print(file)\n y = wh_loading(file)\n when = y['when']\n which = y['which']\n when_units = np.append(when.pos, when.neg)\n which_units = np.append(which.pos,which.neg)\n # using the recurrent connection to construct a directed weighted graph\n rnn_weight = y['rnn_weight'].detach().numpy()\n graph, island = graph_construct(rnn_weight) \n # get the inverse distance between each groups\n dInv_mean, dInv_sem, dInv_num = gesDistanceInv(graph, when_units, \n which_units, island)\n\n df_output.loc[i] = {'label': file,'when':when,'which':which,\n 'output': y['output_weight']}\n \n df_dInv.loc[i] = {'label': file, 'mean': dInv_mean,'num':dInv_num}\n\n print('calcuating maximum flow takes a long time, please be patient')\n flow_mean, flow_sem, flow_num = maxFlow(graph, when_units, \n which_units, island)\n df_mflow.loc[i] = {'label': file, 'mean': flow_mean,'num':flow_num}\n \n return df_output, df_dInv, df_mflow\n\ndef wh_select():\n print(\"start\")\n print(\"select the model files\")\n root = tk.Tk()\n root.withdraw()\n file_path = file_selection(initialdir = '../save/RT',\n parent=root,\n title='Choose the when/which file',\n filetypes=[(\"model files\", \"*0.5.yaml\")]\n )\n print('*'*49)\n \n return file_path\n\n# In[]: tools about basic bhv analysis\n\nfrom toolkits_bhv import bhv_extract\n\ndef groups_files(file_paths):\n \n # group the files based on the lesion type\n group = []\n for nfile in file_paths:\n path, file = os.path.split(nfile)\n group.append(file.split('-')[0])\n \n files_pd = pd.DataFrame([list(file_paths),group],['name','lesion'])\n files_pd = files_pd.T\n files_groups = files_pd.name.groupby([files_pd.lesion])\n ncondition = files_pd.lesion.nunique()\n condition = files_pd.lesion.unique()\n\n return files_groups, condition, ncondition\n\ndef lesionBhv_selection(file_paths = None):\n print(\"start\")\n print(\"select the files\")\n root = tk.Tk()\n root.withdraw()\n if file_paths == None:\n file_paths = file_selection(initialdir ='../log/RT_lesion',\n parent = root,\n title = 'Choose a file',\n filetypes=[(\"HDF5 files\", \"*-0.5-*.hdf5\")]\n )\n print('*'*49)\n \n return file_paths\n\ndef lesion_extract(file_paths = None):\n files_groups, condition, ncondition = groups_files(file_paths)\n \n df = pd.DataFrame([], columns = {'label','rt','rt_sem','cr','cr_sem',\n 'fr','fr_sem','cr_log','cr_log_sem',\n 'bias','bias_sem'})\n \n for i, files_group in enumerate(files_groups):\n df_basic, _, _ = bhv_extract(files_group[1])\n\n df.loc[i] = {'label': files_group[0],\n 'rt': df_basic.rt_mean.mean(),\n 'rt_sem': df_basic.rt_mean.sem(),\n 'cr': df_basic.cr.mean(),\n 'cr_sem': df_basic.cr.sem(),\n 'fr': df_basic.fr.mean(),\n 'fr_sem': df_basic.fr.sem(),\n 'cr_log': df_basic.cr_log.mean(),\n 'cr_log_sem': df_basic.cr_log.sem(),\n 'bias': df_basic.choice_prop.mean(),\n 'bias_sem': df_basic.choice_prop.sem()\n }\n \n return df, condition, ncondition\n\ndef plot_basicBhv_lesion(df, condition, ncondition):\n # plot the reaction time\n # plot the correct rate, consistency with evidence and choice bias\n x = np.arange(ncondition)\n # reaction time changes\n fig = plt.figure()\n plt.boxplot(np.vstack(df.rt.values).T)\n plt.xticks(x+1, df.label.values)\n plt.ylabel('reaction time')\n fig.savefig('../figs/lesion_effect_rt.eps', format='eps', dpi=1000)\n \n # choices\n fig2 = plt.figure()\n plt.bar(x-0.2, df.cr, yerr = df.cr_sem, width=0.15,label = 'cr')\n plt.bar(x, df.cr_log, yerr = df.cr_log_sem, width=0.15,label = 'cr_log')\n plt.bar(x+0.2, df.bias, yerr = df.bias_sem, width=0.15,label = 'choice_prop')\n \n plt.legend()\n plt.ylabel('proportion(%)')\n # plt.xticks(x,('control','when neg','when pos','which neg','which pos'))\n plt.xticks(x, condition)\n fig2.savefig('../figs/lesion_effect_choice.eps', format='eps', dpi=1000)\n plt.show()\n \n# In[] speed-accuracy trade off\n\ndef speed_accuracy_extract():\n \n cr, cr_sem = np.zeros(11,), np.zeros(11,)\n rt, rt_sem = np.zeros(11,), np.zeros(11,)\n cr_log, cr_log_sem = np.zeros(11,), np.zeros(11,)\n \n for i, prop in enumerate(['0.5','0.4','0.3','0.2','0.1']):\n print('label: ', prop)\n # bhv files in which when units are inactivated\n files = glob.glob('../log/RT/lesion/when*'+prop+'*.hdf5')\n \n df, _, _ = lesion_extract(file_paths = files)\n \n cr[i] = df.loc[df.label == 'when_neg_output_les'].cr\n rt[i] = df.loc[df.label == 'when_neg_output_les'].rt\n cr_sem[i] = df.loc[df.label == 'when_neg_output_les'].cr_sem\n rt_sem[i] = df.loc[df.label == 'when_neg_output_les'].rt_sem\n cr_log[i] = df.loc[df.label == 'when_neg_output_les'].cr_log\n cr_log_sem[i] = df.loc[df.label == 'when_neg_output_les'].cr_log_sem\n \n cr[10-i] = df.loc[df.label == 'when_pos_output_les'].cr\n rt[10-i] = df.loc[df.label == 'when_pos_output_les'].rt.mean() \n cr_sem[10-i] = df.loc[df.label == 'when_pos_output_les'].cr_sem\n rt_sem[10-i] = df.loc[df.label == 'when_pos_output_les'].rt_sem\n cr_log[10-i] = df.loc[df.label == 'when_pos_output_les'].cr_log\n cr_log_sem[10-i] = df.loc[df.label == 'when_pos_output_les'].cr_log_sem\n \n # no lesion\n files = glob.glob('../log/RT/*.hdf5') \n df,_,_ = bhv_extract(file_paths = files)\n cr[5] = df.cr.mean()\n rt[5] = df.rt_mean.mean()\n cr_sem[5] = df.cr.sem()\n rt_sem[5] = df.rt_mean.sem()\n cr_log[5] = df.cr_log.mean()\n cr_log_sem[5] = df.cr_log.sem()\n\n return cr, cr_sem, rt, rt_sem, cr_log, cr_log_sem\n\ndef plot_speed_accuracy(cr, cr_sem, rt, rt_sem, cr_log, cr_log_sem):\n \n \n fig0, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n # plot the lesion effect on choice\n ax1.errorbar(range(11), cr, cr_sem, label = 'cr')\n ax1.errorbar(range(11), cr_log, cr_log_sem, label = 'cr_log')\n plt.ylim([0.7,1.25])\n ax1.plot(3, 1.02)\n ax1.legend()\n # plot lesion effect on reaction time\n ax2.errorbar(range(11), rt, rt_sem,color = 'k', label = 'rt')\n plt.ylim([2,10.5])\n ax2.legend()\n \n fig0.savefig('../figs/speed_accuracy_all.eps', format='eps', dpi=1000)\n plt.show()\n\n\n","sub_path":"holmes/toolkits_whenwhich.py","file_name":"toolkits_whenwhich.py","file_ext":"py","file_size_in_byte":19687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"638908331","text":"inFile = open('HeLa-peptide-snv-indel-predict-sv-virus')\n\nD = {}\nfor line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n pep = fields[5]\n D.setdefault(pep, []) \n D[pep].append(line)\ninFile.close()\n\nouFile = open('HeLa-peptide-snv-indel-predict-sv-virus-lysc-pep','w')\nfor k in D:\n fields = D[k][0].split('\\t')\n ouFile.write(fields[0]+'\\t'+fields[1]+'\\t'+k+'\\t'+str(len(D[k]))+'\\t'+'\\t'.join(D[k])+'\\n')\nouFile.close()\n","sub_path":"RNAseqMSMS/16-snv-indel-sv-virus-K/output-filter/2-peptide.py","file_name":"2-peptide.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"272257915","text":"# -*- coding: utf-8; -*-\nfrom selenium.webdriver.common.by import By\nfrom common.webpages.pageobject import PageObject\nfrom common.config_module import get_value_from_config\n\n\nclass OsagoPolicyDetailsPage(PageObject):\n\n def __init__(self, app):\n super().__init__(app)\n self.main_page_timeout = \\\n int(get_value_from_config(\"['osago_pages']['main_page_timeout']\",\n self.config_file_name))\n self.page_name = \"Cherehapa OSAGO Third page\"\n self.screenshot_name_suffix = \"osago_third_page\"\n self.expected_titles = [\"FIXME\"]\n\n def wait(self):\n self.result = True\n return self.wait_for_page_to_be_loaded(\n self.main_page_timeout, By.NAME, \"FIXME\")\n","sub_path":"che-test/scripts/autotests/pages/osago_policy_details_page.py","file_name":"osago_policy_details_page.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"183678268","text":"from PIL import Image, ImageTk\nfrom PIL import ImageDraw,ImageFont\n\nimport tkinter \n\nroot = tkinter.Tk()\nroot.title('应用程序窗口') #窗口标题\nroot.resizable(False, False) #固定窗口大小\nwindowWidth = 800 #获得当前窗口宽\nwindowHeight = 500 #获得当前窗口高\n\nimg = Image.open(r'C:\\Users\\Administrator\\Desktop\\百度人脸识别api\\3.jpg') #打开\n\n\n\n#创建绘制对象\ndraw = ImageDraw.Draw(img)\n\n#信息\nlocation= {'left': 217.5687561, 'top': 206.6197815, 'width': 231, 'height': 223, 'rotation': -3}\nx1= location['left']\ny1= location['top']\nx2= location['left'] + location['width']\ny2= location['top'] + location['height']\n\n#绘制直线:\n#从a点到b点\ndraw.line((x1, y1, x1, y2), 'cyan')\ndraw.line((x1, y2, x2, y2), 'cyan')\ndraw.line((x2, y2, x2, y1), 'cyan')\ndraw.line((x2, y1, x1, y1), 'cyan')\n\n\n#绘制文本\nfont = ImageFont.truetype(\"consola.ttf\", 40, encoding=\"unic\")#设置字体\nnumber = 1\ntext = 'hello:%d'%(number) #不支持中文\ndraw.text((x1, y1-30), text, 'fuchsia', font)\n #文字左上角坐标 内容 颜色 字体 \n\nimg = ImageTk.PhotoImage(img) # 让tk显示图片\n\n#展示在tk中\nlabel_img = tkinter.Label(root, image = img) #修改过后 还是原文件\nlabel_img.pack()\nroot.mainloop()\n\n\n\n","sub_path":"GUI/图片加框进tk.py","file_name":"图片加框进tk.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"295310321","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass LoadDimensionOperator(BaseOperator):\n \"\"\"Operator class for loading data into the dimension table.\n \n Args:\n redshift_conn_id (str): The Redshift Airflow Conn Id.\n table (str): The Redshift staging table name. \n sql (str): The SQL code to execute. \n insert_mode (str): The insert mode which can be \"with truncate\" or \"append\".\n \"\"\"\n \n ui_color = '#80BD9E'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"\",\n table=\"\",\n sql=\"\",\n insert_mode=\"\",\n *args, **kwargs):\n\n super(LoadDimensionOperator, self).__init__(*args, **kwargs) \n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.sql = sql\n self.insert_mode = insert_mode\n\n def execute(self, context):\n redshift = PostgresHook(self.redshift_conn_id)\n if self.insert_mode == 'with truncate':\n self.log.info(f'Truncate and insert data into dimension table: {self.table}') \n redshift.run(f'TRUNCATE TABLE {self.table}') \n else: \n self.log.info(f'Insert data into dimension table: {self.table}') \n redshift.run(self.sql)\n","sub_path":"plugins/operators/load_dimension.py","file_name":"load_dimension.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"564458790","text":"import resources.lib.nflcs\n\n\nclass Team(resources.lib.nflcs.NFLCS):\n _short = \"bears\"\n _cdaweb_url = \"http://www.chicagobears.com/cda-web/\"\n _categories = [\n \"Bear Down of the Week\",\n \"Bear Trax\",\n \"Bears Buzz\",\n \"Draft Central 2014\",\n \"Features\",\n \"Game Preview\",\n \"Highlights\",\n \"Inside The Bears\",\n \"Keys to the Game\",\n \"NFL Network\",\n \"Offseason\",\n \"Press Conferences\",\n \"Sounds of the Game\",\n \"Thayer's Playbook\",\n \"Training Camp\",\n ]\n\n def __init__(self, parameters):\n self._parameters = parameters\n self.go()\n","sub_path":"plugin.video.nfl-teams/resources/lib/teams/bears.py","file_name":"bears.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"209043447","text":"#\n# @lc app=leetcode id=1143 lang=python3\n#\n# [1143] Longest Common Subsequence\n#\n# https://leetcode.com/problems/longest-common-subsequence/description/\n#\n# algorithms\n# Medium (58.77%)\n# Likes: 3347\n# Dislikes: 40\n# Total Accepted: 218.5K\n# Total Submissions: 371.7K\n# Testcase Example: '\"abcde\"\\n\"ace\"'\n#\n# Given two strings text1 and text2, return the length of their longest common\n# subsequence. If there is no common subsequence, return 0.\n# \n# A subsequence of a string is a new string generated from the original string\n# with some characters (can be none) deleted without changing the relative\n# order of the remaining characters.\n# \n# \n# For example, \"ace\" is a subsequence of \"abcde\".\n# \n# \n# A common subsequence of two strings is a subsequence that is common to both\n# strings.\n# \n# \n# Example 1:\n# \n# \n# Input: text1 = \"abcde\", text2 = \"ace\" \n# Output: 3 \n# Explanation: The longest common subsequence is \"ace\" and its length is 3.\n# \n# \n# Example 2:\n# \n# \n# Input: text1 = \"abc\", text2 = \"abc\"\n# Output: 3\n# Explanation: The longest common subsequence is \"abc\" and its length is 3.\n# \n# \n# Example 3:\n# \n# \n# Input: text1 = \"abc\", text2 = \"def\"\n# Output: 0\n# Explanation: There is no such common subsequence, so the result is 0.\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= text1.length, text2.length <= 1000\n# text1 and text2 consist of only lowercase English characters.\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n \n # Dynamic Programming\n # Time complexity : O(M⋅N).\n # We're solving M⋅N subproblems. Solving each subproblem is an O(1) operation.\n # Space complexity : O(M⋅N).\n # We'e allocating a 2D array of size M⋅N to save the answers to subproblems.\n # def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n # # Make a grid of 0's with len(text2) + 1 columns \n # # and len(text1) + 1 rows.\n # dp_grid = [[0] * (len(text2) + 1) for _ in range(len(text1) + 1)]\n \n # # Iterate up each column, starting from the last one.\n # for col in reversed(range(len(text2))):\n # for row in reversed(range(len(text1))):\n # # If the corresponding characters for this cell are the same...\n # if text2[col] == text1[row]:\n # dp_grid[row][col] = 1 + dp_grid[row + 1][col + 1]\n # # Otherwise they must be different...\n # else:\n # dp_grid[row][col] = max(dp_grid[row + 1][col], dp_grid[row][col + 1])\n \n # # The original problem's answer is in dp_grid[0][0]. Return it.\n # return dp_grid[0][0]\n \n # Forwarding\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n # Make a grid of 0's with len(text2) + 1 columns \n # and len(text1) + 1 rows.\n dp_grid = [[0] * (len(text2) + 1) for _ in range(len(text1) + 1)]\n \n # Iterate up each column, starting from the last one.\n for col in range(len(text2)):\n for row in range(len(text1)):\n # If the corresponding characters for this cell are the same...\n if text2[col] == text1[row]:\n dp_grid[row+1][col+1] = 1 + dp_grid[row][col]\n # Otherwise they must be different...\n else:\n dp_grid[row+1][col+1] = max(dp_grid[row][col+1], dp_grid[row+1][col])\n \n # The original problem's answer is in dp_grid[0][0]. Return it.\n return dp_grid[-1][-1]\n \n# @lc code=end\n\n","sub_path":"problems/1143.longest-common-subsequence/1143.longest-common-subsequence.py","file_name":"1143.longest-common-subsequence.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"138098667","text":"import pygame\n\nclass SingleAlien:\n\n\tdef __init__(self, ai_game):\n\t\tself.screen = ai_game.screen\n\t\tself.screen_rect = ai_game.screen.get_rect()\n\n\t\tself.image = pygame.image.load('images/alien.bmp')\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = self.screen_rect.center\n\n\tdef blitme(self):\n\t\tself.screen.blit(self.image, self.rect)","sub_path":"single_alien.py","file_name":"single_alien.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"510511430","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#pylint: disable-msg=\n\"\"\"\nFile : keras_dn.py\nAuthor : Valentin Kuznetsov \nDescription: \n the code to train HEP images using DenseNet in TF keras implementation.\n This code is based on TF TPU example:\n https://github.com/tensorflow/tpu/blob/master/tools/colab/keras_mnist_tpu.ipynb\n and, on Keras DenseNet package:\n https://github.com/titu1994/DenseNet\n\nAdditional resouces for TPU and TF datasets:\n https://www.tensorflow.org/guide/using_tpu\n https://www.tensorflow.org/guide/performance/datasets\n https://www.tensorflow.org/api_docs/python/tf/data/Dataset\n https://github.com/tensorflow/models/blob/master/official/mnist/mnist_tpu.py\n https://www.tensorflow.org/guide/datasets\n https://www.tensorflow.org/guide/performance/datasets\n https://www.tensorflow.org/tutorials/load_data/tf_records\n\nTensorBoard and TF profile references:\n https://www.tensorflow.org/guide/graph_viz\n https://www.tensorflow.org/tensorboard/r2/scalars_and_keras\n https://cloud.google.com/tpu/docs/tensorboard-setup\n https://cloud.google.com/tpu/docs/cloud-tpu-tools#trace_viewer\n\nKeras and modern convnets, on TPUs\n https://codelabs.developers.google.com/codelabs/keras-flowers-tpu/index.html?index=..%2F..index#0\n\"\"\"\n\n# system modules\nimport os\nimport sys\nimport math\nimport time\nimport argparse\nfrom datetime import datetime\n\n# The GPU id to use, usually either \"0\" or \"1\";\n#os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n# import numpy\nimport numpy as np\nnp.random.seed(123)\n\n# import matplotlib\nimport matplotlib\nmatplotlib.use('Agg') # avoid using XWindows when no DISPLAY is set\nimport matplotlib.pyplot as plt\n\n# import keras, sklearn\nfrom sklearn import metrics\n\n# import densenet, we use local module tf_densenet which is modification of\n# https://github.com/titu1994/DenseNet where keras layers are replaced with\n# tf.keras ones.\nimport tf_densenet as densenet\n\n# import tensorflow\nimport tensorflow as tf\n\n# global variables\nIMG_SHAPE = None\nNCLASSES = None\n\nclass OptionParser():\n def __init__(self):\n \"User based option parser\"\n self.parser = argparse.ArgumentParser(prog='PROG')\n self.parser.add_argument(\"--fdir\", action=\"store\",\n dest=\"fdir\", default=\"\", help=\"Input directory of data images\")\n self.parser.add_argument(\"--fout\", action=\"store\",\n dest=\"fout\", default=\"\", help=\"Output file\")\n self.parser.add_argument(\"--batch_size\", action=\"store\",\n dest=\"batch_size\", default=10, help=\"Batch size\")\n self.parser.add_argument(\"--classes\", action=\"store\",\n dest=\"classes\", default=0, help=\"Number of classification classes\")\n self.parser.add_argument(\"--image-shape\", action=\"store\",\n dest=\"image_shape\", default=\"300,300,3\", help=\"Image shape, default 300,300,3 (color PNG)\")\n self.parser.add_argument(\"--dropout\", action=\"store\",\n dest=\"dropout\", default=\"0.1\", help=\"dropout rate, default 0.1\")\n self.parser.add_argument(\"--epochs\", action=\"store\",\n dest=\"epochs\", default=10, help=\"Number of epochs, default 10\")\n self.parser.add_argument(\"--steps\", action=\"store\",\n dest=\"steps\", default=0, help=\"Number of steps per epoch, default 0\")\n self.parser.add_argument(\"--test\", action=\"store_true\",\n dest=\"test\", default=False, help=\"use test DenseNet model\")\n self.parser.add_argument(\"--tpu\", action=\"store\",\n dest=\"tpu\", default=\"\", help=\"tpu to connect\")\n self.parser.add_argument(\"--tboard\", action=\"store\",\n dest=\"tboard\", default=\"\", help=\"Path to TensorBoard location\")\n self.parser.add_argument(\"--verbose\", action=\"store_true\",\n dest=\"verbose\", default=False, help=\"verbose output\")\n\ndef plot_roc_curve(fpr, tpr, fout, title='Keras'):\n \"Helper function to plot roc curve\"\n auc = metrics.auc(fpr, tpr)\n fout = fout.split('.')[0]\n plt.figure()\n plt.plot([0, 1], [0, 1], 'k--')\n plt.plot(fpr, tpr, label='{} (area = {:.3f})'.format(title, auc))\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend(loc='best')\n plt.savefig('{}-roc.pdf'.format(fout))\n plt.close()\n\ndef plot_acc(epochs, history, fname):\n \"Helper function to plot trainining accuracies\"\n acc_values = history['acc']\n plt.figure()\n plt.plot(epochs, history['acc'], label='Training accuracy')\n plt.plot(epochs, history['val_acc'], label='Validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.savefig('{}-acc.pdf'.format(fname))\n plt.close()\n\ndef plot_loss(epochs, history, fname):\n \"Helper function to plot training losses\"\n plt.figure()\n plt.plot(epochs, history['loss'], label='Training loss')\n plt.plot(epochs, history['val_loss'], label='Validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('{}-loss.pdf'.format(fname))\n plt.close()\n\ndef dataset_to_numpy(dataset, nentries=10):\n \"\"\"\n Helper function to return numpy representation of a TF dataset (images, labels)\n \"\"\"\n unbatched_ds = dataset.apply(tf.data.experimental.unbatch())\n images, labels = unbatched_ds.batch(nentries).make_one_shot_iterator().get_next()\n # get one batch \n# images, labels = dataset.make_one_shot_iterator().get_next()\n \n # Run once, get one batch. Session.run returns numpy results\n with tf.Session() as sess:\n data, classes = sess.run([images, labels])\n return data, classes\n\ndef parse_fn(filename, label):\n \"Helper function to return image and label from given file/label pair\"\n image_string = tf.read_file(filename, \"file_reader\")\n image_decoded = tf.image.decode_image(image_string)\n image = tf.cast(image_decoded, tf.float32)\n # we still need to explicitly set shape of the image\n # https://github.com/tensorflow/tensorflow/issues/16052\n global IMG_SHAPE\n image.set_shape(list(IMG_SHAPE))\n label.set_shape([None, ])\n return image, label\n\ndef get_files_labels(fdir):\n \"\"\"\n Helper function to return files and labels from given input directory.\n It can read either fdir/{train,test,valid}/{Sample1,Sample2}\n structure or fdir/{train,test,valid}/*.tfrecords\n\n It also takes care to replace fdir with proper STORAGE_BUCKET\n environment value (if it is set) which is useful when working on\n Google Cloud platform which requires to read data from Google Storage.\n \"\"\"\n samples = [d for d in os.listdir(fdir) if not d.startswith('.')]\n files = []\n labels = []\n for idx, sdir in enumerate(os.listdir(fdir)):\n if sdir.startswith('.'):\n continue\n idir = os.path.join(fdir, sdir)\n if os.path.isdir(idir):\n # if TPU or google cloud is present\n if 'STORAGE_BUCKET' in os.environ:\n tdir = idir.replace(fdir, os.environ['STORAGE_BUCKET'])\n for sd in ['train', 'valid', 'test']:\n if sd in fdir:\n tdir = os.path.join(os.environ['STORAGE_BUCKET'], sd)\n tdir = os.path.join(tdir, sdir)\n filenames = [os.path.join(tdir, f) for f in os.listdir(idir)]\n else:\n filenames = [os.path.join(idir, f) for f in os.listdir(idir)]\n files += filenames\n labels += [idx]*len(filenames)\n else:\n # labels here is irrelevant since we assume files\n # contains all the information, e.g. tfrecords\n\n # if TPU or google cloud is present\n if 'STORAGE_BUCKET' in os.environ:\n tdir = idir.replace(fdir, os.environ['STORAGE_BUCKET'])\n for sd in ['train', 'valid', 'test']:\n if sd in fdir:\n tdir = os.path.join(os.environ['STORAGE_BUCKET'], sd)\n tdir = os.path.join(tdir, sdir)\n files.append(tdir)\n else:\n files.append(idir)\n labels.append(0)\n return files, labels\n\ndef ds_dim(fdir):\n \"Helper function to return dimension of dataset (total number of files)\"\n files, _ = get_files_labels(fdir)\n return len(files)\n\ndef get_labels(fdir):\n \"Helper function to get labels from given input directory of images\"\n _, labels = get_files_labels(fdir)\n return labels\n\ndef get_dataset(fdir, batch_size, shuffle=False, cache=False, tpu=False):\n \"\"\"\n Top level function to create TF dataset from given input directory.\n This directory can either contain images or tfrecords files\n \"\"\"\n files, labels = get_files_labels(fdir)\n print(\"input directory: {}\".format(fdir))\n print(\"total files {}, total labels {}\".format(len(files), len(labels)))\n print(\"one file {} and label {}\".format(files[0], labels[0]))\n if files[0].endswith('tfrecords'):\n return get_tfrecords(files, batch_size, shuffle, cache, tpu)\n return get_dataset_img(files, labels, batch_size, shuffle, cache, tpu)\n\ndef get_dataset_img(files, labels, batch_size, shuffle=False, cache=False, tpu=False):\n \"Helper function to create TF dataset from given set of files and labels\"\n global NCLASSES\n labels = tf.keras.utils.to_categorical(labels, num_classes=NCLASSES)\n images = tf.convert_to_tensor(files)\n dataset = tf.data.Dataset.from_tensor_slices((images, labels))\n if cache:\n # this small dataset can be entirely cached in RAM, for TPU this is important\n dataset = dataset.cache()\n if shuffle:\n dataset = dataset.shuffle(len(files), reshuffle_each_iteration=True)\n if tpu:\n # for TPU it is important to delivery data fast, we need to choose\n # number of parallel calls appropriately, see\n # https://www.tensorflow.org/guide/performance/datasets\n dataset = dataset.map(parse_fn, num_parallel_calls=10)\n else:\n dataset = dataset.map(parse_fn)\n # drop_remainder is important on TPU, batch size must be fixed\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dataset = dataset.repeat() # Mandatory for Keras for now\n dataset = dataset.prefetch(batch_size) # fetch next batches while training on the current one\n return dataset\n\ndef parse_tfrec(example_proto):\n \"\"\"\n Helper function to parse tfrecords (see img2tfrecs.py which creates them\n from images). We assume that tfrecords provides the image parameters\n and label.\n \"\"\"\n features={\n 'height': tf.FixedLenFeature([], tf.int64, default_value=IMG_SHAPE[0]),\n 'width': tf.FixedLenFeature([], tf.int64, default_value=IMG_SHAPE[1]),\n 'depth': tf.FixedLenFeature([], tf.int64, default_value=IMG_SHAPE[2]),\n 'label': tf.FixedLenFeature([], tf.int64, default_value=0),\n 'image': tf.FixedLenFeature([], tf.string, default_value=''),\n }\n parsed_features = tf.parse_single_example(example_proto, features)\n height = tf.cast(parsed_features['height'], tf.int32)\n width = tf.cast(parsed_features['width'], tf.int32)\n depth = tf.cast(parsed_features['depth'], tf.int32)\n label = tf.cast(parsed_features['label'], tf.int32)\n # convert label numerical value into categorical vector based on NCLASSES\n global NCLASSES\n label = tf.one_hot(label, NCLASSES)\n image = tf.decode_raw(parsed_features['image'], tf.uint8)\n image = tf.cast(image, tf.float32) # cast to float32 on TPU\n image_shape = tf.stack([height, width, depth])\n image = tf.reshape(image, image_shape)\n # we still need to explicitly set shape of the image\n # https://github.com/tensorflow/tensorflow/issues/16052\n image.set_shape(list(IMG_SHAPE))\n return image, label\n\ndef get_tfrecords(files, batch_size, shuffle=False, cache=False, tpu=False):\n \"\"\"\n Helper function which allows to read tfrecords input files and return\n TF dataset which contains images and labels. The labels are converted\n from numerical values to categorical vector.\n \"\"\"\n dataset = tf.data.TFRecordDataset(files, num_parallel_reads=32)\n if cache:\n # this small dataset can be entirely cached in RAM, for TPU this is important\n dataset = dataset.cache()\n if shuffle:\n dataset = dataset.shuffle(len(files), reshuffle_each_iteration=True)\n if tpu:\n # for TPU it is important to delivery data fast, we need to choose\n # number of parallel calls appropriately, see\n # https://www.tensorflow.org/guide/performance/datasets\n dataset = dataset.map(parse_tfrec, num_parallel_calls=10)\n else:\n dataset = dataset.map(parse_tfrec)\n # drop_remainder is important on TPU, batch size must be fixed\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dataset = dataset.repeat() # Mandatory for Keras for now\n dataset = dataset.prefetch(batch_size) # fetch next batches while training on the current one\n return dataset\n\ndef train(fdir, batch_size, image_shape, classes, fout, epochs=10, dropout=0.1,\n steps_per_epoch=None, is_test=False, tpu_name='', tboard=None):\n \"\"\"\n Main function which does the training of our ML model either from\n images or tfrecords from provided input directory fdir.\n \"\"\"\n if tboard:\n# logdir=os.path.join(tboard, datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n logdir = tboard\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n with tf.Session() as sess:\n writer = tf.summary.FileWriter(tboard, sess.graph)\n\n # input parameters\n train_dir = os.path.join(fdir, 'train')\n valid_dir = os.path.join(fdir, 'valid')\n test_dir = os.path.join(fdir, 'test')\n if not steps_per_epoch:\n steps_per_epoch = 6000//batch_size # 60,000 items in this dataset\n tpu = None\n\n # build and train model\n if is_test:\n # for testing use very small DenseNet which is defined by depth parameter\n model = densenet.DenseNet(input_shape=image_shape, depth=10,\n classes=classes, dropout_rate=dropout, weights=None)\n else:\n model = densenet.DenseNetImageNet161(input_shape=image_shape,\n classes=classes, dropout_rate=dropout, weights=None)\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n trained_model = model\n\n # print model layers\n model.summary()\n\n print(\"### model\", type(model), type(model.fit), model.fit)\n\n # set up learning rate decay\n lr_decay = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 0.0001 + 0.02 * math.pow(0.5, 1+epoch), verbose=True)\n\n training_dataset = get_dataset(train_dir, batch_size, image_shape, classes)\n validation_dataset = get_dataset(valid_dir, batch_size, image_shape, classes)\n test_dataset = get_dataset(test_dir, batch_size, image_shape, classes)\n\n try: # TPU detection\n # Picks up a connected TPU on Google's Colab, ML Engine, Kubernetes\n # and Deep Learning VMs accessed through the 'ctpu up' utility\n if tpu_name:\n tpu = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_name)\n else:\n tpu = tf.contrib.cluster_resolver.TPUClusterResolver()\n print('### Training on TPU ###')\n except ValueError:\n print('### Training on GPU/CPU ###')\n # printout how our session is configured\n# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\n print(\"trainint dataset\", type(training_dataset), training_dataset)\n print(\"validation dataset\", type(validation_dataset), validation_dataset)\n print(\"test dataset\", type(test_dataset), test_dataset)\n\n callbacks = [lr_decay]\n if tboard:\n callbacks = [lr_decay, tensorboard_callback]\n if tpu: # TPU training\n # For TPU, we will need a function that returns the dataset\n training_input_fn = lambda: get_dataset(train_dir, batch_size,\n image_shape, classes, tpu=True)\n validation_input_fn = lambda: get_dataset(valid_dir, batch_size,\n image_shape, classes, tpu=True)\n\n strategy = tf.contrib.tpu.TPUDistributionStrategy(tpu)\n trained_model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)\n # Work in progress: reading directly from dataset object not yet implemented\n # for Keras/TPU. Keras/TPU needs a function that returns a dataset.\n fit = trained_model.fit(training_input_fn,\n steps_per_epoch=steps_per_epoch, epochs=epochs,\n validation_data=validation_input_fn, validation_steps=1,\n verbose=1, callbacks=callbacks)\n else: # GPU/CPU training\n fit = trained_model.fit(training_dataset,\n steps_per_epoch=steps_per_epoch, epochs=epochs,\n validation_data=validation_dataset, validation_steps=1,\n callbacks=callbacks)\n \n print(\"history keys {}\".format(fit.history.keys()))\n# print(\"accuracy: train={} valid={}\".format(fit.history['acc'], fit.history['val_acc']))\n# print(\"loss: train={} valid={}\".format(fit.history['loss'], fit.history['val_loss']))\n\n if fout:\n fname = fout.split(\".\")[0]\n loss_values = fit.history['loss']\n epochs = range(1, len(loss_values)+1)\n\n # make plots\n plot_loss(epochs, fit.history, fname)\n plot_acc(epochs, fit.history, fname)\n\n # choose which dataset/labels we'll use to test our predictions\n # we can use either valid or test datasets\n tdir = test_dir\n tdataset = test_dataset\n y_true = get_labels(tdir)\n steps = len(y_true)/batch_size\n if not steps:\n steps = 1\n if tpu:\n # so far predictions does not work on TPU I need to figure\n # out what to pass to predict method, and goal here is to train\n # the model which later can be used for inference\n return\n input_fn = lambda: get_dataset(tdir, batch_size, image_shape, classes, tpu=True)\n probs = trained_model.predict(input_fn, steps=steps)\n else:\n probs = trained_model.predict(tdataset, steps=steps)\n y_pred = np.argmax(probs, axis=1)\n print(\"probs\", type(y_true), np.shape(y_true), type(y_pred), np.shape(y_pred))\n print(\"y_true\", y_true[:10])\n print(\"y_pred\", y_pred[:10])\n print(\"probs \", probs[:10])\n tsize = min(len(y_true), len(y_pred)) - 1\n y_true = y_true[:tsize]\n y_pred = y_pred[:tsize]\n fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred)\n print(metrics.confusion_matrix(y_true, y_pred))\n plot_roc_curve(fpr, tpr, fname, title='Keras')\n\ndef main():\n \"Main function\"\n optmgr = OptionParser()\n opts = optmgr.parser.parse_args()\n fdir = opts.fdir\n if not fdir:\n print(\"please setup fdir\")\n sys.exit(1)\n batch_size = int(opts.batch_size)\n image_shape = tuple([int(s) for s in opts.image_shape.split(',')])\n global IMG_SHAPE\n IMG_SHAPE = image_shape\n classes = int(opts.classes)\n global NCLASSES\n NCLASSES = classes\n epochs = int(opts.epochs)\n dropout = float(opts.dropout)\n fout = opts.fout\n steps = int(opts.steps)\n is_test = opts.test\n tpu_name = opts.tpu\n tboard = opts.tboard\n print(\"{}\\n\".format(' '.join(sys.argv)))\n print(\"Input parameters\")\n print(\"fdir {}\".format(fdir))\n print(\"batch_size {}\".format(batch_size))\n print(\"image_shape {}\".format(image_shape))\n print(\"classes {}\".format(classes))\n print(\"epochs {}\".format(epochs))\n print(\"dropout {}\".format(dropout))\n print(\"fout {}\".format(fout))\n print(\"steps {}\".format(steps))\n print(\"TPU name {}\".format(tpu_name))\n if not classes:\n print(\"please setup number of trained classes\")\n sys.exit(1)\n time0 = time.time()\n train(fdir, batch_size, image_shape, classes, fout, epochs, dropout, steps, is_test, tpu_name, tboard)\n print(\"Elapsed time: {} sec\".format(time.time()-time0))\n\nif __name__ == '__main__':\n main()\n","sub_path":"keras_dn.py","file_name":"keras_dn.py","file_ext":"py","file_size_in_byte":20143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"299707121","text":"from pprint import pprint\n\nfrom knock40 import Morph\nfrom knock41 import Chunk, read_file\nfrom knock43 import pos_in_phrase\n\n# extract most left verb\n# extract all postpositional particles\ndef obtain_word(pos, chunk):\n for morph in chunk.morphs:\n if morph.pos == pos:\n if pos == '動詞':\n return morph.base\n if pos == '助詞':\n return morph.surface\n\nif __name__ == '__main__':\n doc = read_file()\n verb_pps_all = []\n for sentence in doc:\n verb_pps = {}\n for chunk_srcs in sentence:\n chunk_dst = sentence[chunk_srcs.dst]\n if pos_in_phrase('助詞', chunk_srcs) \\\n and pos_in_phrase('動詞', chunk_dst):\n verb_base = obtain_word('動詞', chunk_dst)\n pp_surface = obtain_word('助詞', chunk_srcs)\n dst_verb = f'{chunk_srcs.dst}_{verb_base}'\n if dst_verb in verb_pps:\n verb_pps[dst_verb].append(pp_surface)\n verb_pps[dst_verb].sort()\n else:\n verb_pps[dst_verb] = [pp_surface]\n verb_pps_all.append(verb_pps)\n\n for verb_pps in verb_pps_all:\n for dst_verb in verb_pps.keys():\n verb = dst_verb.split('_')[1]\n print('{}\\t{}'.format(verb, ' '.join(verb_pps[dst_verb])))\n\n# Unix command\n## cat output45.txt | sort | uniq -c | sort -n -r | head\n\"\"\"\n 49 する\tを\n 18 する\tが\n 17 する\tて\n 16 する\tに\n 15 する\tと\n 11 する\tは を\n 11 する\tに を\n 9 する\tで を\n 9 よる\tに\n 8 する\tが に\n\"\"\"\n## cat output45.txt | grep '^行う\\s'| sort | uniq -c | sort -n -r | head\n\"\"\"\n 8 行う\tを\n 1 行う\tまで を\n 1 行う\tから\n 1 行う\tに まで を\n 1 行う\tは を をめぐって\n 1 行う\tに に により を\n 1 行う\tて に は は\n 1 行う\tが て で に\n 1 行う\tに を を\n 1 行う\tで に を\n\"\"\"\n## cat output45.txt | grep '^なる\\s'| sort | uniq -c | sort -n -r | head\n\"\"\"\n 3 なる\tに は\n 3 なる\tが と\n 3 なる\tと\n 2 なる\tに\n 1 なる\tから が て で と\n 1 なる\tから で と\n 1 なる\tて として に は\n 1 なる\tが と にとって\n 1 なる\tで と など\n 1 なる\tが で と に は は\n\"\"\"\n## cat output45.txt | grep '^与える\\s'| sort | uniq -c | sort -n -r | head\n\"\"\"\n 1 与える\tが など\n 1 与える\tに は を\n 1 与える\tが に\n\"\"\"\n\n","sub_path":"aida/chapter05/knock45.py","file_name":"knock45.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"547667475","text":"import json\nimport matplotlib.pyplot as plt\nwith open('loss.json' , 'r') as reader:\n\tjf = json.loads(reader.read())\n\tv_loss=jf['v_loss']\n\tloss=jf['loss']\n\tepoch=jf['epoch']\n\tplt.clf()\n\tplt.plot(epoch,loss,lw=3,label=\"training data\")\n\tplt.plot(epoch,v_loss,\"r\",lw=3,label=\"validation data\")\n\tplt.xlabel(\"epoch\")\n\tplt.ylabel(\"loss\")\n\tplt.ylim((1,3))\n\tplt.yticks([1,1.5,2,2.5,3])\n\tplt.legend(loc=\"best\")\n\tplt.savefig('pic.jpg')\n\t\n","sub_path":"pic.py","file_name":"pic.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"150889723","text":"from django.shortcuts import HttpResponse\r\nfrom django.urls import path\r\nfrom shen_ring.views import login\r\n\r\nfrom . import views\r\n\r\napp_name = 'index'\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('login/', login, name='login'), # internal call to djangosaml2 login\r\n path('logout/', views.logout, name='logout'), # Saml logout doesn't work, so only local logout.\r\n path('profile/', views.profile, name='profile'),\r\n path('feedback/form/', views.feedback_form, name='feedback_form'),\r\n path('feedback/submit/', views.feedback_submit, name='feedback_submit'),\r\n path('feedback/list/', views.list_feedback, name='list_feedback'),\r\n path('feedback/confirm//', views.feedback_confirm, name='confirm_feedback'),\r\n path('feedback/close//', views.feedback_close, name='close_feedback'),\r\n path('about/', views.about, name='about'),\r\n path('profile/settings/', views.user_settings, name='changesettings'),\r\n path('terms/', views.terms_form, name='termsaccept'),\r\n path('robots.txt', lambda r: HttpResponse(\"User-agent: *\\nAllow: /\", content_type=\"text/plain\"), name='robots'), # allow all robots.\r\n path('markdown/upload/', views.markdown_upload, name='markdown_upload'),\r\n]\r\n","sub_path":"index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"411463297","text":"import datetime\nfrom enum import Enum\n\n\nclass DocType(Enum):\n Act = \"АР\"\n Invoice = \"PH\"\n\n\nclass Role(Enum):\n Accountant = \"accountant\"\n Shop = \"store\"\n\n\nclass SendNotifyType(Enum):\n ChangeStatusDocument = 1\n OverdueDocument = 2\n\n\n# subscribe_type_list: 1 - смена статуса 2 - не подписаны в срок\n\n\nclass Subscriber:\n def __init__(self, email, role, store_number, subscribe_type_list):\n self.email = email\n self.role = role\n self.store_number = store_number\n self.subscribe_type_list = subscribe_type_list\n\n def __repr__(self):\n return f\"{self.email}, {self.role}, {self.store_number}, {self.subscribe_type_list}\"\n\n\nclass Document:\n def __init__(self, store_number, document_number, document_date, document_type, parent_invoice_number,\n parent_invoice_date, send_notify_type):\n self.store_number = store_number\n self.document_number = document_number\n self.document_date = document_date\n self.document_type = document_type\n self.parent_invoice_number = parent_invoice_number\n self.parent_invoice_date = parent_invoice_date\n self.send_notify_type = send_notify_type\n\n def __repr__(self):\n return f\"{self.store_number}, {self.document_number}, {self.document_date}, {self.document_type}, {self.parent_invoice_number}, {self.parent_invoice_date}\"\n\n\nclass DataProvider:\n\n @staticmethod\n def get_documents():\n return DataProvider._list_documents\n\n @staticmethod\n def get_subscribers():\n return DataProvider._list_subscribers\n\n _list_documents = [\n Document(\n 2,\n 2,\n datetime.date(2019, 11, 1),\n DocType.Act,\n 1,\n datetime.date(2019, 10, 20),\n SendNotifyType.ChangeStatusDocument\n ),\n Document(\n 1,\n 1,\n datetime.date(2019, 12, 28),\n DocType.Invoice,\n 1,\n datetime.date(2019, 12, 10),\n SendNotifyType.OverdueDocument\n ),\n Document(\n 3,\n 3,\n datetime.date(2019, 10, 12),\n DocType.Act,\n 3,\n datetime.date(2019, 10, 11),\n SendNotifyType.OverdueDocument\n ),\n\n ]\n _list_subscribers = [\n Subscriber(\n \"anatoliynn@gamil.com\",\n Role.Accountant,\n 1,\n [SendNotifyType.OverdueDocument]\n ),\n Subscriber(\n \"store2@gmail.com\",\n Role.Shop,\n 1,\n [SendNotifyType.ChangeStatusDocument, SendNotifyType.OverdueDocument]\n ),\n Subscriber(\n \"ivan@gamil.com\",\n Role.Accountant,\n 2,\n [SendNotifyType.ChangeStatusDocument]\n ),\n Subscriber(\n \"store3@gmail.com\",\n Role.Shop,\n 3,\n [SendNotifyType.ChangeStatusDocument, SendNotifyType.OverdueDocument]\n ),\n ]\n\n\n","sub_path":"provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"190618783","text":"import numpy as np\n\nfrom gym.envs.registration import register\n\n\npid_pathcolav_config = {\n \"step_size\": 0.10,\n \"max_t_steps\": 4000,\n \"min_reward\": -1000,\n \"n_obs_states\": 12,\n \"cruise_speed\": 1.5,\n \"lambda_reward\": 0.6,\n \"reward_roll\": -1,\n \"reward_rollrate\": -1,\n \"reward_control_derivative\": [-0.005, -0.005],\n \"reward_heading_error\": -1,\n \"reward_crosstrack_error\": -0.0001,\n \"reward_pitch_error\": -1,\n \"reward_verticaltrack_error\": -0.0001,\n \"reward_use_rudder\": -0.1,\n \"reward_use_elevator\": -0.1,\n \"reward_collision\": 0,\n \"sensor_span\": (140,140), # the horizontal and vertical span of the sensors\n \"sensor_suite\": (15, 15), # the number of sensors covering the horizontal and vertical span\n \"sensor_input_size\": (8,8), # the shape of FLS data passed to the neural network. Max pooling from raw data is used\n \"sensor_frequency\": 1,\n \"sonar_range\": 25,\n \"n_obs_errors\": 2,\n \"n_obs_inputs\": 0,\n \"n_actuators\": 2,\n \"la_dist\": 3,\n \"accept_rad\": 1,\n \"n_waypoints\": 7,\n \"n_int_obstacles\": 1,\n \"n_pro_obstacles\": 3,\n \"n_adv_obstacles\": 8\n}\n\nregister(\n id='PathColav3d-v0',\n entry_point='gym_auv.envs:PathColav3d',\n kwargs={'env_config': pid_pathcolav_config}\n)\n","sub_path":"gym_auv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"314841148","text":"s = input()\nletters = int(input())\nmin_size = int(input())\nmax_size = int(input())\nmeet = []\ncnt = []\nfor x in range(len(s)):\n for y in range(min_size, max_size + 1):\n if x + y >len(s):\n continue\n tem = s[x:x + y]\n tem2 = set(list(tem))\n if len(tem2) <= letters:\n if tem in meet:\n cnt[meet.index(tem)] = cnt[meet.index(tem)] + 1\n else:\n meet.append(tem)\n cnt.append(1)\nif len(cnt) == 0:\n print(0)\nelse:\n print(max(cnt))","sub_path":"Code/CodeRecords/2909/60835/298515.py","file_name":"298515.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"346158427","text":"#\n# This file is part of spiderdonuts,\n# https://github.com/TheoryInPractice/spiderdonuts/,\n# and is Copyright (C) North Carolina State University, 2017. It is licensed\n# under the three-clause BSD license; see LICENSE.\n#\n\"\"\"A collection of functions used to generate pre-existing graphs.\"\"\"\n\n# Imports\nimport networkx as nx\nimport numpy as np\nimport os\nimport logging\nfrom code import SPIDERDONUTS\n\n\n# Determine the base path to the current directory\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\n\n\n# Spiderdonuts logger\nlogger = logging.getLogger(SPIDERDONUTS)\n\n\ndef abs_path(relative):\n \"\"\"Resolve a relative path to an absolute path based on current directory.\n\n Parameters\n ----------\n relative : string\n A file path relative to the current working file.\n\n Returns\n -------\n string\n The absolute path to the relative file.\n \"\"\"\n return os.path.join(BASE_PATH, relative)\n\n\ndef chamfered_dodecahedron():\n \"\"\"Return a networkx graph of a Chamfered Dodecahedron.\n\n Returns\n -------\n Networkx Graph\n A Networkx Graph object of a Chamfered Dodecahedron.\n \"\"\"\n return nx.read_gml(abs_path('gml/chamfered_dodecahedron.gml'))\n\n\ndef pyramid_prism_3():\n \"\"\"Return a networkx graph of a Pyramid Prism 3.\n\n Returns\n -------\n Networkx Graph\n A Networkx Graph object of a Pyramid Prism 3.\n \"\"\"\n return nx.read_gml(abs_path('gml/pyramid_prism_3.gml'))\n\n\ndef pyramid_prism_4():\n \"\"\"Return a networkx graph of a Pyramid Prism 4.\n\n Returns\n -------\n Networkx Graph\n A Networkx Graph object of a Pyramid Prism 4.\n \"\"\"\n return nx.read_gml(abs_path('gml/pyramid_prism_4.gml'))\n\n\ndef pyramid_prism(faces=3, layers=0):\n \"\"\"Generate a Pyramid Prism with `faces` sides and `layers` extra layers.\n\n The graph will be generated programatically,\n rather than loading from a saved .gml file.\n\n Parameters\n ----------\n faces : int, optional\n Number of sides which the generated prism will have\n (The default is 3).\n layers : int, optional\n Number of additional layers added to the middle of\n the prism (The default is 0).\n\n Returns\n -------\n Networkx Graph\n A Pyramid Prism graph with `faces` sides and\n `layers` extra layers.\n\n Raises\n ------\n Exception\n Raised if faces is less than or equal to zero or\n if layers is less than zero.\n \"\"\"\n # Raise if faces <= 0 or layers < 0\n if faces <= 0 or layers < 0:\n raise Exception('faces must be > 0, layers >= 0')\n\n # Create an empty graph.\n g = nx.Graph()\n\n # Number of rows needed is 2 + number of extra layers\n # len_row is equal to number of faces\n num_rows = 2 + layers\n len_row = faces\n\n # Set graph top and bottom node labels\n top = 0\n bottom = (num_rows * len_row) + 1\n\n # Create rows\n rows = [\n [\n (i * len_row) + j\n for j in range(1, len_row + 1)\n ]\n for i in range(0, num_rows)\n ]\n\n # Add nodes to g\n g.add_nodes_from([\n top,\n *[node for row in rows for node in row],\n bottom\n ])\n\n # Add edges to g\n g.add_edges_from([\n # All edges from the top node to the first row\n *[(top, node) for node in rows[0]],\n\n # All edges from a row to the row below it\n *[\n edge\n for i in range(0, len(rows) - 1)\n for edge in zip(rows[i], rows[i + 1])\n ],\n\n # All edges connecting edges on a row\n *[\n (row[i], row[(i + 1) % len(row)])\n for row in rows\n for i in range(0, len(row))\n ],\n\n # All edges from the bottom row to the bottom node\n *[(bottom, node) for node in rows[-1]]\n ])\n\n # Return g\n return g\n\n\ndef fan_graph():\n \"\"\"Create a fan graph.\n\n Returns\n -------\n Networkx Graph\n A fan graph.\n \"\"\"\n return nx.read_gml(abs_path('gml/fan.gml'))\n\n\ndef snowflake():\n \"\"\"Create a snowflake graph.\n\n Returns\n -------\n Networkx Graph\n A snowflake graph.\n \"\"\"\n return nx.read_gml(abs_path('gml/snowflake.gml'))\n\n\ndef tiered_pyramid_prism(k=3):\n \"\"\"Generate a Tiered Pyramid Prism with K sides.\n\n The graph will be generated programatically,\n rather than loading from a saved .gml file.\n\n Parameters\n ----------\n k : int, optional\n Number of sides which the generated prism will have\n (The default is 3).\n\n Returns\n -------\n Networkx Graph\n A Tiered Pyramid Prism graph with k sides.\n\n Raises\n ------\n Exception\n Raised if k argument is less than or equal to zero\n \"\"\"\n return nx.read_gml(abs_path('gml/tiered_pyramid_prism.gml'))\n\n\ndef hexagonal_pyramid_prism():\n \"\"\"Generate a hexagonal pyramid prism.\n\n Returns\n -------\n Networkx Graph\n A networkx graph of a hexagonal pyramid prism\n \"\"\"\n return nx.read_gml(abs_path('gml/hexagonal_pyramid_prism.gml'))\n\n\ndef triangular_prism():\n \"\"\"Generate a 3-layer triangular_prism graph.\"\"\"\n return nx.read_gml(abs_path('gml/triangular_prism.gml'))\n\n\ndef triangular_orthobicupola():\n \"\"\"Generate a triangular orthobicupola.\"\"\"\n return nx.read_gml(abs_path('gml/triangular_orthobicupola.gml'))\n\n\ndef square_orthobicupola():\n \"\"\"Generate a square orthobicupola.\"\"\"\n return nx.read_gml(abs_path('gml/square_orthobicupola.gml'))\n\n\ndef orthobicupola(sides=3):\n \"\"\"Generate an orthobicupola.\n\n The number of sides is taken as the number of\n sides on the top polygon of the bicupola. I.E.\n sides=3 will produce a triangular orthobicupola.\n\n Parameters\n ----------\n sides : Number\n Number of sides on the bicupola (default is 3).\n\n Returns\n -------\n Networkx Graph\n Generated graph of an orthobicupola.\n \"\"\"\n # Generate a new graph\n g = nx.Graph()\n\n # Generate layers\n top = [i for i in range(0, sides)]\n middle = [i for i in range(sides, 3 * sides)]\n bottom = [i for i in range(3 * sides, 4 * sides)]\n\n # Add nodes\n g.add_nodes_from([*top, *middle, *bottom])\n\n # Add edges\n g.add_edges_from([\n # Add all edges between nodes of the same row\n *[\n (row[i], row[(i + 1) % len(row)])\n for row in [top, middle, bottom]\n for i in range(0, len(row))\n ],\n\n # Add all edges between the middle row\n # and the top and bottom rows\n *[\n (middle[i], row[i // 2])\n for row in [top, bottom]\n for i in range(0, len(middle))\n ]\n ])\n\n # Return\n return g\n\n\ndef rhombicuboctahedron():\n \"\"\"Generate a rhombicuboctahedron graph.\"\"\"\n return nx.read_gml(abs_path('gml/rhombicuboctahedron.gml'))\n\n\ndef snowflakecycle(flake_number=5, inner_cycle=5, outer_cycle=3):\n \"\"\"Generate a snowflake cycle.\n\n Cycle can be customized with flake_number different snowflake-sides,\n inner_cycle different snowflakes connected in a cycle by their inner nodes,\n and outer_cycle different copies connected in a cycle by their outer nodes.\n\n The total number of nodes is (1+2*flake_number)*inner_cycle*outer_cycle\n\n Parameters\n ----------\n flake_number : Number\n Number of sides of the snowflake (default is 5).\n inner_cycle : Number\n Length of cycle connecting inner nodes of snowflakes (default 5).\n outer_cycle : Number\n Length of cycle connecting outer nodes of snowflakes (default 3).\n Generated graph of a snowflake-cycle.\n\n Returns\n -------\n Networkx Graph\n Snowflake Cycle\n \"\"\"\n # Construct (k+1)-node snow-flake graph:\n # A k-cycle with a single node connected to every other vertex\n num_flake_nodes = (1 + 2 * flake_number)\n Cn = nx.cycle_graph(2 * flake_number)\n Adj = nx.adjacency_matrix(Cn).toarray()\n A_snowflake = np.zeros((num_flake_nodes, num_flake_nodes))\n A_snowflake[1:(num_flake_nodes), 1:(num_flake_nodes)] = Adj\n\n # Construct vector to identify \"every other node\" in the cycle\n temp_vec = np.zeros((2 * flake_number, 1))\n a_mod = 0\n for j in range(len(temp_vec)):\n if j % 2 == (a_mod % 2):\n temp_vec[j] = 1\n\n A_snowflake[1:(num_flake_nodes), 0] = np.squeeze(temp_vec)\n A_snowflake[0, 1:(num_flake_nodes)] = np.squeeze(temp_vec.T)\n\n # Next link together inner_cycle of these\n # snowflakes by their inner flake nodes\n D = np.zeros((num_flake_nodes))\n D[1:(num_flake_nodes)] = np.squeeze(temp_vec)\n D = np.diag(D)\n\n cycle_inner_g = nx.cycle_graph(inner_cycle)\n cycle_inner = nx.adjacency_matrix(cycle_inner_g).toarray()\n I_inner = np.identity(inner_cycle)\n A_inner = np.kron(I_inner, A_snowflake) + np.kron(cycle_inner, D)\n\n # Now link together outer_cycle copies of the inner_cycle graphs\n cycle_outer_g = nx.cycle_graph(outer_cycle)\n cycle_outer = nx.adjacency_matrix(cycle_outer_g).toarray()\n\n temp_vec = np.zeros((2 * flake_number, 1))\n for j in range(len(temp_vec)):\n if j % 2 == (a_mod + 1 % 2):\n temp_vec[j] = 1\n temp_vec = np.squeeze(temp_vec)\n D = np.zeros(num_flake_nodes)\n D[1:(num_flake_nodes)] = temp_vec\n\n I_outer = np.identity(outer_cycle)\n AG = np.kron(I_outer, A_inner) + np.kron(\n cycle_outer,\n np.kron(I_inner, np.diag(D))\n )\n AG = np.asmatrix(AG)\n\n return nx.from_numpy_matrix(AG)\n\n\ndef kks_graph(clique_size=4, num_cliques=5, silent=True):\n \"\"\"Create a networkx version of the KKS graph G(clique_size, num_cliques)\n\n Parameters\n ----------\n clique_size: positive integer\n The number of nodes in each clique of\n the graph G(clique_size, num_cliques)\n\n num_cliques: positive integer\n The number of cliques in the G(clique_size, num_cliques) graph\n\n Returns\n -------\n Networkx graph\n The KKS graph G(clique_size, num_cliques)\n \"\"\"\n\n node_list = []\n for j in range(0, (num_cliques+1)*clique_size):\n node_list.append(j)\n\n inner_nodes = node_list[-clique_size:]\n\n if not silent:\n print(\"Clique nodes: \", node_list[:-clique_size])\n print(\"Independent set: \", inner_nodes)\n\n edge_list = []\n\n # Add edges fow the cliques\n for which_clique in range(0, num_cliques):\n first_node = which_clique*clique_size\n last_node = first_node + clique_size\n for endpoint_a in range(first_node, last_node):\n for endpoint_b in range(endpoint_a+1, last_node):\n edge_list.append((endpoint_a, endpoint_b))\n\n # Now add perfect matching from inner nodes to outer nodes\n for idx, inner_node in enumerate(inner_nodes):\n for which_clique in range(0, num_cliques):\n edge_list.append((inner_node, clique_size*which_clique+idx))\n\n G = nx.Graph()\n G.add_nodes_from(node_list)\n G.add_edges_from(edge_list)\n return G\n\n\ndef spider(degree, length):\n \"\"\"Create a spider graph.\n\n Parameters\n ----------\n degree : Integer\n Degree of the center node (number of pendants)\n length : Integer\n Length of each pendant (not including the center node)\n\n Returns\n -------\n Networkx Graph\n Spider graph\n \"\"\"\n # Create a single spoke and get its adjacency matrix W\n spoke = nx.path_graph(length)\n spoke_adj = nx.adjacency_matrix(spoke).todense()\n\n # Create the identity matrix of size degree\n identity = np.identity(degree)\n\n # Create the adjacency matrix for the graph containing\n # `degree` number unconnected spokes\n spokes = np.matrix(np.kron(spoke_adj, identity))\n\n # Calculate the total number of desired nodes in the graph\n num_nodes = (degree * length) + 1\n\n # Create a num_nodes x num_nodes matrix, filled with zeroes.\n # This will eventually be the matrix of the spider graph\n spider = np.zeros((num_nodes, num_nodes))\n\n # Add the spokes to the spider\n spider[1:num_nodes, 1:num_nodes] = spokes\n\n # Connect each spoke to the center\n spider[0, 1:degree + 1] = 1\n spider[1:degree + 1, 0] = 1\n\n # Construct and return a networkx graph from the matrix\n return nx.from_numpy_matrix(spider)\n\n\ndef spider_torus(degree, length, copies):\n \"\"\"Create a torus of spider graphs.\n\n A torus is formed with the folling process:\n\n Take `copies[0]` copies of a spider graph and link them\n together in a ring by connecting each node in the first\n level of each spider to the same node in the adjacent\n spider graphs in the ring.\n\n Take `copies[1]` copies of the original ring, and link them\n together in a new ring by connection each node in the second\n level of each spider to the same node in the adjacent spider\n graphs in the ring.\n\n ...\n\n Repeat until reaching the outer level of the spider.\n\n Parameters\n ----------\n degree : Integer\n The degree of the base spider graph\n length : Integer\n The length of each arm of the base spider graph\n copies : List\n A list containing the number of copies of the\n previous level to maek at each level of the\n hyperchain.\n\n Returns\n -------\n Dict\n Dictionary containing\n graph - The full hyperchain graph\n representatives - An list of representative nodes from each class,\n in ascending order by level.\n degree - Degree of the base spider graph\n length - Length of each pendant\n copies - Number of copies in each ring\n \"\"\"\n\n # Raise an exception if a number of copies is not\n # specified for each level\n if len(copies) != length:\n raise Exception('Invalid number of copies.')\n\n # Create the base spider graph\n spider_graph = spider(degree, length)\n\n # Get the adjacency matrix and shape of the base spider\n spider_adj = nx.adjacency_matrix(spider_graph).todense()\n spider_rows, spider_cols = spider_adj.shape\n\n # Copy the spider adjacency matrix. This will be the base\n # matrix that gets built up into the hyperchain.\n adj = spider_adj.copy()\n\n # Number of copies of the original spider that exist\n # within the hyperchain\n num_duplcates = 1\n\n # Iterate over each level, building up the resulting hyperchain\n for level in range(length):\n\n # Log\n logger.info('Generating level {} of the spider torus'.format(level))\n\n # Get the number of copies being made of the\n # previous level\n num_copies = copies[level]\n\n # Construct a diagonal matrix which has a 1 for every\n # pair of nodes that will be connected in this level.\n # This is determined by taking the nodes from the\n # original spider graph that exist at `level` and\n # tiling it `num_duplcates` times.\n min_node = 1 + (level * degree)\n max_node = 1 + ((level + 1) * degree)\n row = np.zeros(spider_rows)\n row[min_node:max_node] = 1\n diagonal = np.diag(np.tile(row, num_duplcates))\n\n # Create a cycle graph equal in size to the number of copies\n # being made for this level. Save its adjacency matrix.\n cycle = nx.adjacency_matrix(nx.cycle_graph(num_copies)).todense()\n\n # Calculate the edges being added to the set of duplicates\n # as the kronecker product of the cycle graph with the\n # diagonal matrix which links nodes together\n edges = np.kron(cycle, diagonal)\n\n # Use the kronecker product to create a single graph\n # containing n unconnected copies of the base adjacency\n # matrix\n duplicates = np.kron(np.identity(num_copies), adj)\n\n # Update the number of duplicates\n num_duplcates *= num_copies\n\n # Update the adjacency matrix to be the set of duplicates\n # plus the set of edges being added.\n adj = duplicates + edges\n\n # Log completion\n logger.info('Spider torus complete')\n\n # Return the graph formed from the final adjacency matrix\n return {\n 'graph': nx.from_numpy_matrix(adj),\n 'representatives': [level * degree for level in range(length + 1)],\n 'degree': degree,\n 'length': length,\n 'copies': copies\n }\n","sub_path":"code/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":16196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"32105002","text":"from collections import Counter\nfrom re import split\n\nBANNER = \"-\" * 35\n\n\ndef format_print(counter, is_reverse=False):\n lst = counter.items()\n print(\"[Unique Words: %d]\" % len(lst))\n print(\"%-16s | %16s\" % (\"Word\", \"Count\"))\n print(BANNER)\n for word, count in lst:\n print(\"%-16s | %16d\" % (word, count))\n\n\ndef count_words(filename):\n counter = Counter()\n with open(filename, \"r\") as f:\n for line in f:\n line = line.strip().lower()\n if not line:\n continue\n counter.update(x for x in split(\"[^a-zA-Z']+\", line) if x)\n return counter\n\n\nformat_print(count_words(\"sarcozy.txt\"), is_reverse=False)","sub_path":"ATIVIDADES/contador_de_palavras.py","file_name":"contador_de_palavras.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"455188615","text":"# -*- coding: UTF-8 -*-\n\nimport couchdb\nimport couchdb.design as diseno\n\nclass TweetStore(object):\n def __init__(self, dbname, url='http://localhost:5984'):\n try:\n self.server = couchdb.Server(url=url)\n self.db = self.server.create(dbname)\n self._create_views()\n except couchdb.http.PreconditionFailed:\n self.db = self.server[dbname]\n \n def _create_views(self):\n count_map = 'function(doc) { emit(doc.id, 1); }'\n count_reduce = 'function(keys, values) { return sum(values); }'\n view = couchdb.design.ViewDefinition('twitter','count_tweets', count_map, reduce_fun=count_reduce)\n view.sync(self.db)\n get_tweets = 'function(doc) { emit(doc, doc); }'\n #get_tweets = 'function(doc) { emit((\"0000000000000000000\"+doc.id).slice(-19), doc); }'\n view = couchdb.design.ViewDefinition('twitter', 'get_tweets', get_tweets)\n view.sync(self.db)\n\n def save_tweet(self, tw):\n tw['_id'] = tw['id_str']\n self.db.save(tw)\n \n def count_tweets(self):\n for doc in self.db.view('twitter/count_tweets'):\n return doc.value\n\n def get_tweets(self):\n return self.db.view('twitter/get_tweets')\n \n \n \n \n ","sub_path":"tweetStore.py","file_name":"tweetStore.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"295567972","text":"#Finds the maximum subarray such that i,j, i not= j, max(arr[j] - arr[i])\n\n#Brute force solution\ndef max_subarray(arr):\n first_index = 0\n max_tuple = (0,0,0)\n while first_index < (len(arr) - 1):\n second_index = first_index + 1\n while second_index < len(arr):\n difference = arr[second_index] - arr[first_index]\n if difference > max_tuple[0]:\n max_tuple = (difference, first_index, second_index)\n second_index += 1\n first_index += 1\n return arr[max_tuple[1]:max_tuple[2] + 1]\n\n\n#recursive solution using divide and conquer solution\ndef max_subarray_faster_solution(arr):\n array_minimum = (float(\"inf\"), 0)\n array_maximum = (-float(\"inf\"), 0)\n for i in range(len(arr)):\n if arr[i] < array_minimum[0]:\n array_minimum = (arr[i], i)\n elif arr[i] > array_maximum[0]:\n array_maximum = (arr[i], i)\n return arr[array_minimum[1]:array_maximum[1] + 1]\n\n\n\n","sub_path":"challenges/find_max_subarray.py","file_name":"find_max_subarray.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"365785699","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : maxFortowElement.py\n@Time : 2020/07/07 19:37:53\n@Author : Michael \n@Version : 1.0\n@Contact : Search username of MichaelForwork at github\n@Doc : find two max elements in the array\n'''\n# -*-*-*-*- here is the beginning of this script -*-*-*-*-\n\ndef maxfortwoElement(array:list)->tuple:\n \"\"\"find two max elements in an array \n \"\"\"\n LENGTH_ARRAY = len(array)\n if array[0] < array[1]:\n maxIndex1 = 1\n maxIndex2 = 0\n else:\n maxIndex1 = 0\n maxIndex2 = 1\n for i in range(2,LENGTH_ARRAY):\n if array[i] > array[maxIndex2]:\n maxIndex2 = i\n if array[maxIndex2] > array[maxIndex1]:\n maxIndex1,maxIndex2 = maxIndex2,maxIndex1\n \n return array[maxIndex1],array[maxIndex2]\n\n\nif __name__ == \"__main__\":\n li = [22334243324,232,213,23,335,2341,12,1214,324234]\n x1, x2 = maxfortwoElement(li,)\n print(x1,x2)\n","sub_path":"maxFortowElement.py","file_name":"maxFortowElement.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"300373003","text":"\r\nimport random\r\ndef fib_piasno(m): #to find the piasno period\r\n fiblist=[0,1]\r\n for i in range(2,m*m+1):\r\n fiblist.append(fiblist[i-1]+fiblist[i-2])\r\n if(fiblist[i]%m==1 and fiblist[i-1]%m==0 ):\r\n return (len(fiblist)-2)\r\n\r\ndef fib_sum_last_digit_for_complete_period(p): #to find the sum of last digits of a complete period\r\n fiblist=[0,1]\r\n sum=1\r\n for i in range (2,p):\r\n fiblist.append(fiblist[-1]+fiblist[-2]%10)\r\n sum=fiblist[-1]+sum\r\n return sum%10\r\n\r\ndef fib_sum(n): #to find the sum of last digits of a fibonacci no.\r\n equ = (n) % fib_piasno(10)\r\n if equ <= 1:\r\n return equ \r\n the_complete_divisible_no = n-equ\r\n times = int(the_complete_divisible_no/n) \r\n fiblist=[0,1]\r\n sum =1\r\n for i in range(equ-1):\r\n fiblist.append((fiblist[-1]+fiblist[-2])%10)\r\n sum=sum+fiblist[-1]\r\n return (sum%10 + (times*fib_sum_last_digit_for_complete_period(fib_piasno(10))%10))%10 \r\n'''\r\ndef fibonacci_sum_naive(n):\r\n if n <= 1:\r\n return n\r\n\r\n previous = 0\r\n current = 1\r\n _sum = 1\r\n\r\n for _ in range(n - 1):\r\n previous, current = current, previous + current\r\n _sum += current\r\n\r\n return _sum % 10\r\n'''\r\n\r\n\r\ndef main():\r\n n=int(input())\r\n print(fib_sum(n))\r\n\r\n'''\r\ndef test():\r\n while(1):\r\n n= random.randint(1,10000)\r\n x= fib_sum(n)\r\n y=fibonacci_sum_naive(n)\r\n print(n)\r\n if x!=y:\r\n print(\"----FAST:\",x,\"----SLOW:\",y,\"----\")\r\n print(\"-------worng-----\")\r\n break\r\n print(\"ok\")\r\ntest()\r\n'''\r\nmain()","sub_path":"algorithmic toolbox/week2/last_digit_sum_part2.py","file_name":"last_digit_sum_part2.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"295964786","text":"\"\"\" 4 farklı nesnenin 3 kutuya kaç farklı şekilde yerleştirilmesine dair 'Küme Elemanlarının Düzenlenmesi'\nkonulu çözüm altında yazılmış kodu.\"\"\"\ndef Düzenleme(n,p):\n if p > 0 :\n return (n-p+1)*Düzenleme(n,p-1)\n else:\n return 1\n\nprint(\"4 farklı nesne 3 kutuya %s şekilde yerleştirilir...\\n\"%(Düzenleme(4,3)))\nprint(\"Söz konusu durumlar aşağıda bulunmaktadır;\")\ndizi = [1,2,3,4]\n\nfor i in range(4):\n print(\"%s ile Başlayan Durumlar ; \" % (i + 1))\n for j in range(4):\n for k in range(4):\n if (i!=j and j!=k and k!= i):\n print(dizi[i], dizi[j], dizi[k])","sub_path":"Gruplastırma_Örnek2.py","file_name":"Gruplastırma_Örnek2.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"132181807","text":"def should_show(e):\n return not e.verticals or 'STREET' in e.verticals\n\n\nclass EventType(object):\n def __init__(self, index_name, public_name, public_name_plural):\n self.index_name = index_name\n self.public_name = public_name\n self.public_name_plural = public_name_plural\n self.categories_name = index_name\n\n def __repr__(self):\n return 'EventType(%s, %s, %s)' % (self.index_name, self.public_name, self.public_name_plural)\n\n @property\n def url_name(self):\n return self.public_name.lower()\n\n\nBATTLE = EventType('BATTLE', 'Competition', 'Competitions')\nPERFORMANCE = EventType('PERFORMANCE', 'Performance', 'Performances')\nWORKSHOP = EventType('WORKSHOP', 'Workshop', 'Workshops')\nPARTY = EventType('PARTY', 'Party', 'Parties')\nSESSION = EventType('SESSION', 'Session', 'Sessions')\nAUDITION = EventType('AUDITION', 'Audition', 'Auditions')\nREGULAR_CLASS = EventType('REGULAR_CLASS', 'Regular Class', 'Regular Classes')\n\nEVENT_TYPES = [\n BATTLE,\n PERFORMANCE,\n PARTY,\n WORKSHOP,\n SESSION,\n AUDITION,\n REGULAR_CLASS,\n]\n\n\nclass Style(object):\n def __init__(self, index_name, public_name, categories_name=None):\n self.index_name = index_name\n self.public_name = public_name\n self.categories_name = categories_name or self.index_name\n\n def __repr__(self):\n return 'Style(%s, %s, %s)' % (self.index_name, self.public_name, self.categories_name)\n\n @property\n def url_name(self):\n return self.public_name.lower()\n\n\nBREAK = Style('BREAKING', 'Breaking', categories_name='BREAK')\nHIPHOP = Style('HIPHOP', 'Hip-Hop')\nHOUSE = Style('HOUSE', 'House')\nPOP = Style('POPPING', 'Popping', categories_name='POP')\nLOCK = Style('LOCKING', 'Locking', categories_name='LOCK')\nWAACK = Style('WAACKING', 'Waacking', categories_name='WAACK')\nDANCEHALL = Style('DANCEHALL', 'Dancehall')\nVOGUE = Style('VOGUE', 'Vogue')\nKRUMP = Style('KRUMPING', 'Krumping', categories_name='KRUMP')\nTURF = Style('TURFING', 'Turfing', categories_name='TURF')\nLITEFEET = Style('LITEFEET', 'Litefeet')\nFLEX = Style('FLEXING', 'Flexing', categories_name='FLEX')\nBEBOP = Style('BEBOP', 'Bebop')\nALLSTYLE = Style('ALLSTYLE', 'All-Styles')\nKIDS = Style('KIDS', 'Kids')\nSTREET = Style('STREET', 'Street Dance')\n\nSTYLES = [\n STREET,\n BREAK,\n HIPHOP,\n HOUSE,\n POP,\n LOCK,\n WAACK,\n DANCEHALL,\n VOGUE,\n KRUMP,\n TURF,\n LITEFEET,\n FLEX,\n BEBOP,\n ALLSTYLE,\n KIDS,\n]\n\nCATEGORY_LOOKUP = dict([(x.categories_name, x.public_name) for x in STYLES + EVENT_TYPES])\n\n\ndef humanize_categories(categories):\n return [CATEGORY_LOOKUP[x] for x in categories]\n","sub_path":"server/dancedeets/event_types.py","file_name":"event_types.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"139755067","text":"\"\"\"Частотный анализ слов\"\"\"\n\ndef analize(words):\n \"\"\"\n Частотный анализ слов\n :param words: входной список слов\n :return: самое частое слово кортеж (слово, кол-во)\n \"\"\"\n\n analyze = {}\n for word in words:\n # если слово уже есть в словаре\n if word in analyze:\n analyze[word] += 1\n else:\n analyze[word] = 1\n # сортируем словарь и получаем кортеж слово и самое большое кол-во\n result = sorted(analyze.items(), key=lambda item: item[1], reverse=True)\n # берем 1-ый элемент и слово - это будет тема\n theme = result[0]\n # возвращаем результат\n return theme[0]\n\n\n\n","sub_path":"puzzle/step4_freq_analyze.py","file_name":"step4_freq_analyze.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"298802864","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\nimport pandas as pd\n\nplt.style.use(\"bmh\")\nsns.color_palette(\"hls\", 1)\n\nimport matplotlib\nmatplotlib.rc('xtick', labelsize=14)\nmatplotlib.rc('ytick', labelsize=14)\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.family'] = 'STIXGeneral'\n\"\"\"\nr = 0.5\nNy = 160\nNx = int(round(Ny/(0.5+1/r)))\n\nx_axis = np.linspace(0, 2, Nx)\ny_axis = np.linspace(-1, 1, Ny)\n\nb = int(Nx/2)\na = int(Ny/2)\nb_2 = int(b/2)\nb2 = b_2\n\nforces = np.arange(5, 23, 2)\nfor i in range(len(forces)):\n velocity = np.load('../data/rogh_Re' + str(forces[i]) + '.npy')\n\n plt.streamplot(x_axis, y_axis, velocity[:,:,1], velocity[:,:,0])\n plt.xlabel(r\"$x$ position [b]\", fontsize=14)\n plt.ylabel(r\"$y$ position [a]\", fontsize=14)\n #plt.savefig(\"../figures/flow_field_rough.pdf\", bbox_inches=\"tight\")\n #os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/flow_field_rough.pdf\", \"../figures/flow_field_rough.pdf\"))\n plt.show()\n\n plt.quiver(x_axis[:b_2], y_axis[:b], velocity[:b,:b_2,1], velocity[:b,:b_2,0])\n plt.quiver(-np.flip(x_axis[:b_2], axis=0), y_axis[:b], velocity[:b,-b_2:,1], velocity[:b,-b_2:,0])\n plt.xlabel(r\"$x$ position [b]\", fontsize=14)\n plt.ylabel(r\"$y$ position [a]\", fontsize=14)\n #plt.savefig(\"../figures/flow_field_corner.pdf\", bbox_inches=\"tight\")\n #os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/flow_field_corner.pdf\", \"../figures/flow_field_corner.pdf\"))\n plt.show()\n\"\"\"\n\"\"\"\nr = 0.6\n\nNY = np.linspace(100, 600, 16, dtype='int')\nNY = NY[:9] #have run up to NY = 400\nNX = np.array(NY/(0.5+1/r), dtype='int')\n\nprint(NY)\ntau = 2\nepsilon = 1e-9\nf = np.array([0, 5*1e-8])\nNX = np.array([46, 61, 77, 92, 108, 123, 138, 154, 169])\nfor i in range(len(NY)):\n velocity = np.load('../data/test_high_re.npy')\n #NX[i] += NX[i]%2\n NX[0] = len(velocity[0, :, 0])\n NY[0] = len(velocity[:, 0, 0])\n print(NX[0], NY[0])\n x_axis = np.linspace(0, 2, NX[i])\n y_axis = np.linspace(-1, 1, NY[i])\n\n b = int(NX[i]/2)\n a = int(NY[i]/2)\n b_2 = int(b/2)\n b2 = b_2\n\n #velocity = np.load('../data/rogh_Re' + str(NY[i]) + \"_\" + str(NX[i]) + '.npy')\n U = (np.mean(np.mean(velocity[:, :b2, 1])) + np.mean(np.mean(velocity[:, -b2:, 1])) + np.mean(np.mean(velocity[b:-b, b2:-b2, 1])))/3\n print(U, U*a*2)\n plt.streamplot(x_axis, y_axis, velocity[:,:,1], velocity[:,:,0])\n plt.xlabel(r\"$x$ position [b]\", fontsize=14)\n plt.ylabel(r\"$y$ position [a]\", fontsize=14)\n #plt.savefig(\"../figures/flow_field_rough.pdf\", bbox_inches=\"tight\")\n #os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/flow_field_rough.pdf\", \"../figures/flow_field_rough.pdf\"))\n plt.show()\n\n plt.quiver(x_axis[:b_2], y_axis[:b], velocity[:b,:b_2,1], velocity[:b,:b_2,0])\n plt.quiver(-np.flip(x_axis[:b_2], axis=0), y_axis[:b], velocity[:b,-b_2:,1], velocity[:b,-b_2:,0])\n plt.xlabel(r\"$x$ position [b]\", fontsize=14)\n plt.ylabel(r\"$y$ position [a]\", fontsize=14)\n #plt.savefig(\"../figures/flow_field_corner.pdf\", bbox_inches=\"tight\")\n #os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/flow_field_corner.pdf\", \"../figures/flow_field_corner.pdf\"))\n plt.show()\n\"\"\"\nr = 0.8\n\nNY = 300\nNX = int(NY/(0.5+1/r))\n\ntau = 1.01\nf = np.array([0, 1.1e-6])\nnumber_of_datafiles = 11\n\nfilenames = ['high_re_1e6_0.8_13_1100.npy']\nfor i in range(len(filenames)):\n #i = 10\n velocity = np.load('../data_high_re/' + filenames[i])\n velocity = velocity[0::3, 0::3, :]\n NX = len(velocity[0, :, 0])\n NY = len(velocity[:, 0, 0])\n\n x_axis = np.linspace(0, 2, NX)\n y_axis = np.linspace(-1, 1, NY)\n\n b = int(NX/2)\n a = int(NY/2)\n b_2 = int(b/2)\n b2 = b_2\n print(a/b)\n total = np.zeros((NY, 2*NX, 2))\n total[:, :NX] = velocity\n total[::, NX:] = velocity\n\n tau = 1.05\n nu = (tau - 0.5)/3\n U = (np.mean(np.mean(velocity[:, :b2, 1])) + np.mean(np.mean(velocity[:, -b2:, 1])) + np.mean(np.mean(velocity[b:-b, b2:-b2, 1])))/3\n y_streampoints = np.arange(-1, 1, 0.05)\n stream_points = np.zeros((len(y_streampoints), 2))\n stream_points[:, 1] = y_streampoints\n stream_points[:, 0] = 0.1\n\n plt.streamplot(x_axis, y_axis, velocity[:,:,1], velocity[:,:,0], start_points=stream_points, density=50, color=sns.color_palette()[1])\n stream_points[:, 0] = -0.1\n plt.streamplot(-np.flip(x_axis, axis=0), y_axis, np.flip(velocity[:,:,1], axis=0), velocity[:,:,0], start_points=stream_points, density=50, color=sns.color_palette()[1])\n plt.xlabel(r\"$x$ position [b]\", fontsize=14)\n plt.ylabel(r\"$y$ position [a]\", fontsize=14)\n plt.fill_between([0.5, 1.5], [-1, -1], [-0.425, -0.425], color=\"k\")\n plt.fill_between([0.5, 1.5], [1, 1], [0.425, 0.425], color=\"k\")\n plt.fill_between([-0.5, -1.5], [-1, -1], [-0.425, -0.425], color=\"k\")\n plt.fill_between([-0.5, -1.5], [1, 1], [0.425, 0.425], color=\"k\")\n #plt.savefig(\"../figures/streamline_rough_big.pdf\", bbox_inches=\"tight\")\n #os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/streamline_rough_big.pdf\", \"../figures/streamline_rough_big.pdf\"))\n plt.show()\n\n print('Re: ', U*a/nu)\n print('Re: ', U*b/nu)\n print('Nu: ', nu)\n \"\"\"\n y_streampoints = np.arange(-0.99, -0.845, 0.005)\n stream_points = np.zeros((len(y_streampoints), 2))\n stream_points[:, 1] = y_streampoints\n stream_points[:, 0] = -0.220675\n plt.figure(100+i)\n upper = int(0.3*b)\n start = int(0.7*b_2)\n print(x_axis[start:b_2], y_axis[:upper])\n plt.streamplot(x_axis[start:b_2], y_axis[:upper], velocity[:upper,start:b_2,1], velocity[:upper,start:b_2,0], start_points=stream_points, density=100, color=sns.color_palette()[1])\n plt.fill_between([0.5, 0.51], [-1, -1], [-0.85, -0.85], color=\"k\")\n plt.fill_between([0.35, 0.51],[-1.01, -1.01], [-1, -1], color=\"k\")\n plt.axis([0.40, 0.51, -1.01, -0.925])\n plt.savefig(\"../figures/flow_field_rough_2.pdf\", bbox_inches=\"tight\")\n os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/flow_field_rough_2.pdf\", \"../figures/flow_field_rough_2.pdf\"))\n plt.show()\n \"\"\"\n velocity = np.load('../data_high_re/' + filenames[i])\n upper = b\n plt.figure(10+i)\n velocity = velocity[0::5, 0::5, :]\n NX = len(velocity[0, :, 0])\n NY = len(velocity[:, 0, 0])\n x_axis = np.linspace(0, 2, NX)\n y_axis = np.linspace(-1, 1, NY)\n b = int(NX/2)\n a = int(NY/3.5)\n b_2 = int(b/2)\n b2 = b_2\n plt.quiver(x_axis[:b_2], y_axis[:a], velocity[:a,:b_2,1], velocity[:a,:b_2,0])\n #plt.quiver(x_axis[:b_2]+2+x_axis[1]-x_axis[0], y_axis[:upper], velocity[:upper, :b_2, 1], velocity[:upper, :b_2,0])\n plt.xlabel(r\"$x$ position [b]\", fontsize=14)\n plt.ylabel(r\"$y$ position [a]\", fontsize=14)\n #plt.savefig(\"../figures/flow_field_corner.pdf\", bbox_inches=\"tight\")\n #os.system('pdfcrop %s %s &> /dev/null &'%(\"../figures/flow_field_corner.pdf\", \"../figures/flow_field_corner.pdf\"))\n plt.show()\n","sub_path":"final_exam/plot/problem_5_visualize_field.py","file_name":"problem_5_visualize_field.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"635982022","text":"import argparse\nimport collections\nimport gensim\nimport os\n\nfrom gensim.models import word2vec\n\nparser = argparse.ArgumentParser(description=\"Create and query a gensim \" \\\n \"word2vec model.\")\n\n# Positional arguments (required).\nparser.add_argument(\"training_data_file_path\", type=str,\n help=\"Path to training data\")\n\n# Optional arguments.\nparser.add_argument(\"-s\", \"--save_model_path\", type=str,\n help=\"Path to save Word2Vec model to. Default is\" \\\n \" ./tmp/gensim_w2v.model\",\n default=\"tmp/gensim_w2v.model\")\n\nparser.add_argument(\"-q\", \"--query_words_file_path\", type=str,\n help=\"Path to query words seperated by new line.\",\n default=None)\n\nparser.add_argument(\"-v\", \"--verbose_vocabulary\", type=str,\n help=\"Path to save vocabulary to so you can see what your \\\n model is being trainned on.\",\n default=None)\n\nparser.add_argument(\"-mwc\", \"--minimum_word_count\", type=int,\n help=\"Minimum number of occurances of word for it to be \\\n counted in model.\",\n default=4)\n\nparser.add_argument(\"-sw\", \"--skip_window\", type=int,\n help=\"Number of words either side of target to look.\",\n default=5)\n\nargs = parser.parse_args()\n\n\n# TODO: Maybe check ascii bounds instead.\nPARSE_OUT = {\n '/', ':', '-', '.', '\\'', '–',\n \"\\t\", \"\\\"\", \":\", \",\", \"\\\\\", \"]\", \";\",\n \"[\", \"{\", \"}\", \".\", '(', ')', '&', \"!\", \"•\", \"―\", \"*\"\n}\n\ndef replace_with_dict_comprehension(string):\n \"\"\"Replaces any character found in string matching a character in PARSE_OUT\n with a space ' ' so the data set is not skewed by punctation/formatting.\n \"\"\"\n translation = string.translate({ord(c): ' ' for c in PARSE_OUT})\n return translation # Single string.\n\n\ndef read_text_data_as_sentences(filename):\n \"\"\"Reads filename and returns the sentences in it after removing formatting.\n\n Returns:\n A list of sentences, each sentence is a list of words.\n \"\"\"\n with open(filename, 'r') as fh:\n file_contents_as_string = fh.read()\n file_contents_as_string = replace_with_dict_comprehension(\n file_contents_as_string)\n sentences = file_contents_as_string.split('\\n')\n parsed_sentences = []\n del file_contents_as_string\n # Parse out whitespace and digits.\n for sentence in sentences:\n sentence = [word.lower() for word in sentence.split(' ')\n if word != \"\" and not word.isdigit()]\n if sentence:\n parsed_sentences.append(sentence)\n del sentences\n return parsed_sentences\n\n\ndef flatten_sentences_to_words(sentences):\n \"\"\"Helper function to look get all words in a single list, not actually\n needed.\n \"\"\"\n words = []\n for sentence in sentences:\n for word in sentence:\n words.append(word)\n return words\n\nvocabulary_as_sentences = read_text_data_as_sentences(\n args.training_data_file_path)\n\n# Can remove to save time and space.\nvocabulary = flatten_sentences_to_words(vocabulary_as_sentences)\nprint(\"Total number of words: \" + str(len(vocabulary)))\nunique_words = len(set(vocabulary))\nprint(\"Number of unique words: {0}\".format(unique_words))\n\nif args.verbose_vocabulary:\n if not os.path.isdir(os.path.dirname(args.verbose_vocabulary)):\n os.mkdir(os.path.dirname(args.verbose_vocabulary))\n with open(args.verbose_vocabulary, \"w\") as fh:\n fh.write(str(vocabulary))\n with open(args.verbose_vocabulary + \"_count\", \"w\") as fh:\n fh.write(str(collections.Counter(vocabulary).most_common(unique_words)))\n\ndel vocabulary\n\nprint(\"Training model\")\n# Train word2vec on sentences.\n# Note: sentences can be an iterator (use yeild).\nmodel = gensim.models.Word2Vec(\n sentences=vocabulary_as_sentences,\n min_count=args.minimum_word_count, # Min frequency needed to count word.\n workers=6, # Threads.\n window=args.skip_window, # Window of words to left and right to consider.\n sg=1 # Use Skip-Gram not Continous Bag of Words.\n)\n\nprint(\"Saving model to {0}\".format(args.save_model_path))\n\nif not os.path.isdir(os.path.dirname(args.save_model_path)):\n os.mkdir(os.path.dirname(args.save_model_path))\n\nmodel.save(args.save_model_path)\n\n# Query.\nif args.query_words_file_path:\n print(\"Querying model\")\n with open(args.query_words_file_path, \"r\") as fh:\n query_words = [word.strip().lower() for word in fh.readlines()]\n for query in query_words:\n try:\n print(\"Nearest words to: {0}\\n{1}\".format(\n query, model.wv.most_similar(positive=[query])))\n except KeyError:\n print(\"Word {0} not in embeddngs.\".format(query))\n","sub_path":"word_2_vec.py","file_name":"word_2_vec.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"224007963","text":"\"\"\"\n# @Time : 2020/9/20\n# @Author : Jimou Chen\n\"\"\"\nnum, weight = map(int, input().split())\n\n# 第一个下标的值设为0,方便\nw = [0]\nv = [0]\nfor i in range(num):\n each_w, each_v = map(int, input().split())\n w.append(each_w)\n v.append(each_v)\n\n# 用一个二维列表存储d[i][j]\ndp = []\nfor i in range(0, num + 1):\n temp = []\n for j in range(0, weight + 1):\n temp.append(0)\n dp.append(temp)\n\nfor i in range(1, num + 1):\n for j in range(1, weight + 1):\n if j < w[i]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - w[i]] + v[i])\n\nprint(dp[num][weight])\nprint(dp)\n'''\n3 5\n2 3\n3 5\n4 7\n\n8\n'''","sub_path":"PythonLearning/OJ/背包01.py","file_name":"背包01.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"398102521","text":"# coding:utf-8\n\n# On utilise une classe pour définir le type pointeur sur noeud.\nclass Noeud(object):\n def __init__(self, val, suiv=None):\n \"\"\"initialisateur de classe\n permet l'allocation de la mémoire requise pour stocker le noeud\n et l'initialisation de ses attributs val et suiv\"\"\"\n self.val = val\n self.suiv = suiv\n\n\n# Quelques exemples de listes avec représentation simplement chaînée.\n# a) la liste vide :\nlisteVide = None\n\n# b) une liste réduite à l'élément 5 :\nlisteSingleton = Noeud(5)\n# listeSingleton=Noeud(5) => appel de la fonction __init__ de Noeud\n# avec comme paramètres : self=listeSingleton, val=5 et suiv=None\n#\n# Cette simple instruction correspond en pseudo-code à la séquence :\n# listeSingleton : pointeur sur noeud # déclaration de type\n# Nouveau(listeSingleton) # allocation de la mémoire\n# listeSingleton->val=5 # initialisation du champ val\n# listeSingleton->suiv=None # initialisation du champ suiv\n\n\n# c) la liste 2,5,8,10:\n\n\n# Exo 1. A partir de la liste maliste=Noeud(2,Noeud(5,Noeud(8,Noeud(10))))\n# écrivez l'instruction nécessaire pour afficher le 1er élément de maliste (l'élément 2)\n# puis l'instruction nécessaire pour afficher le 3ème élément (ici 8).\n\n\ndef at_index(list, n):\n while n >= 0 and list is not None:\n if n == 0:\n return list.val\n list = list.suiv\n n -= 1\n return -1\n\n\n\n\n# Exo 2. En utilisant les procédures données ci-dessous (essayez d'abord de bien les comprendre),\n# écrivez les instructions nécessaires pour :\n# a) afficher tous les éléments de maliste;\n# b) ajouter l'élément 7 en début de maliste et afficher à nouveau maliste;\n# c) ajouter l'élément 3 en fin de maliste et afficher à nouveau maliste;\n# d) déterminer si l'élément 8 est ou non dans maliste.\n\ndef affiche(debut):\n while debut != None:\n print(debut.val, end=\" \"),\n debut = debut.suiv\n print(\"\")\n\n\ndef insere_debut(debut, x):\n return Noeud(x, debut)\n\n\ndef insere_fin_it(debut, x):\n if debut == None:\n return Noeud(x)\n cour = debut\n while cour.suiv != None:\n cour = cour.suiv\n cour.suiv = Noeud(x)\n return debut\n\n\ndef recherche_rec(debut, x):\n if debut == None:\n return False\n if debut.val == x:\n return True\n return recherche_rec(debut.suiv, x)\n\n\n\n# Exo 3. Donnez une version récursive de la procédure insere_fin(debut,x)\n# qui prend en entrée deux arguments (debut qui est une référence\n# sur le premier noeud d'une liste et x un élément)\n# et qui insère x à la fin de la liste.\n\ndef ex3(l, n):\n if l is not None:\n l.suiv = ex3(l.suiv, n)\n else:\n return Noeud(n)\n return l\n\n\n# Exo 4. Donnez une version itérative de la procédure recherche(debut,x)\n# qui prend en entrée deux arguments (debut une référence\n# sur le premier noeud d'une liste et x un élément) et\n# qui détermine si x est ou non dans la liste.\n\ndef ex4(l, n):\n while l is not None:\n if l.val == n:\n return True\n l = l.suiv\n return False\n\n\n\n# Exo 5. Écrivez une procédure inverse(debut)\n# qui prend en entrée debut une référence sur le premier noeud d'une liste\n# et qui retourne une référence sur le premier noeud de la liste inversée.\n\ndef ex5(l):\n first = None\n while l is not None:\n temp = Noeud(l.val)\n temp.suiv = first\n first = temp\n l = l.suiv\n return first\n\n\n\n# Exo 6. Une liste L1 est une sous-liste d'une liste L2 si L1 est obtenue à partir de L2 en supprimant zéro, un ou plusieurs éléments de la liste L2.\n# Exemple: la liste 3,5,10 est une sous-liste de la liste 2,3,5,5,7,10.\n# Écrivez une procédure sousListe(L1,L2)\n# qui prend en entrée deux listes L1 et L2 et qui retourne True si L1 est une sous-liste de L2.\n# On supposera que L1 et L2 sont des listes d'entiers triés dans l'ordre croissant (au sens large).\ndef exo6(l_1, l_2):\n while l_1 is not None and l_2 is not None:\n if l_1.val == l_2.val:\n l_1 = l_1.suiv\n l_2 = l_2.suiv\n return l_1 is None\n\n\n\n# Exos un peu plus difficiles (avec *):\n\n# Exo 7. Écrivez une procédure insere_apres(L,x,y)\n# qui insère un élément y après la première occurrence de l'élément x dans une liste L\n# (ne fait rien en l'absence de x) et retourne la liste ainsi modifiée.\n\ndef exo7(l, x, n):\n node = l\n while node is not None:\n if node.val == x:\n temp = Noeud(n, node.suiv)\n node.suiv = temp\n return l\n node = node.suiv\n return l\n\n\n\n# Exo 8. Écrivez une procédure insere_avant(L,x,y)\n# qui insère un élément y avant la première occurrence de l'élément x dans une liste L\n# (ne fait rien en l'absence de x) et retourne la liste ainsi modifiée.\n\n\ndef exo8(l, x, n):\n node = l\n previous = node\n while node is not None:\n if node.val == x:\n if previous == node:\n #Only happens when the first node is the occurence\n return Noeud(n, node)\n temp = Noeud(n, node)\n previous.suiv = temp\n return l\n previous = node\n node = node.suiv\n return l\n\n\n## Test\n\n#debut = None\n#for i in range(3):\n# debut = insere_fin_it(debut, i)\n#affiche(debut)\n\ndef main():\n maliste = Noeud(2, Noeud(5, Noeud(8, Noeud(10))))\n print(\"exo\", 1)\n print(at_index(maliste, 2))\n print(at_index(maliste, 3))\n print(\"exo\", 2)\n affiche(maliste)\n affiche(insere_debut(maliste, 7))\n affiche(insere_fin_it(maliste, 3))\n print(recherche_rec(maliste, 8))\n print(\"exo\", 3)\n affiche(ex3(maliste, 2))\n print(\"exo\", 4)\n print(ex4(maliste, 2))\n print(ex4(maliste, 4))\n print(\"exo\", 5)\n affiche(ex5(maliste))\n print(\"exo\", 6)\n print(exo6(maliste.suiv, maliste))\n print(exo6(maliste, maliste.suiv))\n print(\"exo\", 7)\n affiche(exo7(maliste, 2, 1))\n print(\"exo\", 8)\n affiche(exo8(maliste, 2, 1))\n\nmain()\n\n","sub_path":"TP_Algo/TP1.py","file_name":"TP1.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"388045647","text":"'''----------------------------------------------------------------------------------------------------------\nMODULE : AMWIMappings\nPROJECT : Markit Wire OTC Project\nPURPOSE : This module will be used to map entries in AMWI_Mappings integration config\nDEPARTMENT AND DESK : Markit Wire Front-End Users\nREQUESTER : AMWI OTC Project\nDEVELOPER : Arthur Grace\nCR NUMBER : CHNG0002337584\n\nIMPORTANT -- This module should only be called if important information needs to be sent from the Markit Wire Interface\n----------------------------------------------------------------------------------------------------------'''\nimport acm\nimport xml.dom.minidom as xml\n\nconfiguration = acm.GetDefaultValueFromName(acm.GetDefaultContext(), acm.FObject, \"AMWI_Mappings\")\nconfig = xml.parseString(configuration)\nmappings = config.getElementsByTagName(\"Mappings\")\n\n\ndef return_mapping(entry, typeofentry):\n for mapping in mappings:\n if mapping.getAttribute(\"MappingName\") == typeofentry:\n map = mapping.getElementsByTagName(\"Mapping\")\n for mappedentries in map:\n if mappedentries.getAttribute(\"key\") == entry:\n return mappedentries.childNodes[0].nodeValue\n\n\ndef return_mapping_tenor(entry, period, typeofentry):\n for mapping in mappings:\n if mapping.getAttribute(\"MappingName\") == typeofentry:\n map = mapping.getElementsByTagName(\"Mapping\")\n for mappedentries in map:\n if mappedentries.getAttribute(\"key\") == entry:\n if mappedentries.getAttribute(\"Period\") == period:\n return mappedentries.childNodes[0].nodeValue\n","sub_path":"Extensions/AMWIConfig/FPythonCode/AMWIMappings.py","file_name":"AMWIMappings.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"145695989","text":"from functools import reduce\nimport excepciones\nimport main\n\n\n# LEN\ndef LEN(datos=None, *args):\n if datos is None:\n raise excepciones.ArgumentoInvalido('LEN({})'.format(datos))\n if len(args) > 0:\n raise excepciones.ArgumentoInvalido('LEN({},{})'.format(datos, args))\n if not isinstance(datos, list) and not isinstance(datos, str):\n raise excepciones.ErrorTipo('LEN({})'.format(datos))\n\n if isinstance(datos, str) and datos in main.variables_definidas:\n datos = main.variables_definidas[datos]\n if isinstance(datos, str) and not datos in main.variables_definidas:\n raise excepciones.ReferenciaInvalida(\"LEN('{}')\".format(datos))\n\n return len(datos)\n\n\n# PROM\ndef PROM(datos=None, *args):\n if datos is None:\n raise excepciones.ArgumentoInvalido('PROM({})'.format(datos))\n if len(args) > 0:\n raise excepciones.ArgumentoInvalido('PROM({},{})'.format(datos, args))\n if not isinstance(datos, list) and not isinstance(datos, str):\n raise excepciones.ErrorTipo('PROM({})'.format(datos))\n if len(datos) == 0:\n raise excepciones.ErrorMatematico('PROM({})'.format(datos))\n\n if not isinstance(datos, list) and isinstance(datos, str) and datos in main.variables_definidas:\n datos = main.variables_definidas[datos]\n if isinstance(datos, str) and not datos in main.variables_definidas:\n raise excepciones.ReferenciaInvalida(\"PROM('{}')\".format(datos))\n\n suma = reduce(lambda x, y: float(x) + float(y), datos)\n cantidad = LEN(datos)\n promedio = suma / cantidad\n return promedio\n\n\n# MEDIAN\ndef MEDIAN(datos=None, *args):\n if datos is None:\n raise excepciones.ArgumentoInvalido('MEDIAN({})'.format(datos))\n if len(args) > 0:\n raise excepciones.ArgumentoInvalido('MEDIAN({},{})'.format(datos, args))\n if not isinstance(datos, list) and not isinstance(datos, str):\n raise excepciones.ErrorTipo('MEDIAN({})'.format(datos))\n if len(datos) == 0:\n raise excepciones.ErrorMatematico('MEDIAN({})'.format(datos))\n\n if not isinstance(datos, list) and isinstance(datos, str) and datos in main.variables_definidas:\n datos = main.variables_definidas[datos]\n if isinstance(datos, str) and not datos in main.variables_definidas:\n raise excepciones.ReferenciaInvalida(\"MEDIAN('{}')\".format(datos))\n\n medio = int(LEN(datos) / 2)\n if LEN(datos) % 2 != 0:\n return datos[medio]\n else:\n datos_medio = [datos[medio - 1], datos[medio]]\n return PROM(datos_medio)\n\n\n# DESV\ndef DESV(datos=None, *args):\n if datos is None:\n raise excepciones.ArgumentoInvalido('DESV({})'.format(datos))\n if len(args) > 0:\n raise excepciones.ArgumentoInvalido('DESV({},{})'.format(datos, args))\n if not isinstance(datos, list) and not isinstance(datos, str):\n raise excepciones.ErrorTipo('DESV({})'.format(datos))\n if len(datos) == 0:\n raise excepciones.ErrorMatematico('DESV({})'.format(datos))\n\n if not isinstance(datos, list) and isinstance(datos, str) and datos in main.variables_definidas:\n datos = main.variables_definidas[datos]\n if isinstance(datos, str) and not datos in main.variables_definidas:\n raise excepciones.ReferenciaInvalida(\"DESV('{}')\".format(datos))\n\n promedio = PROM(datos)\n restar = lambda x: float(x) - promedio\n restas = list(map(restar, datos))\n cuadrado = lambda x: float(x) ** 2\n restas_cuadrado = list(map(cuadrado, restas))\n suma_restas = reduce(lambda x, y: float(x) + float(y), restas_cuadrado)\n cantidad = LEN(datos)\n n = cantidad - 1\n desv = (suma_restas / n) ** (1 / 2)\n return desv\n\n\n# VAR\ndef VAR(datos=None, *args):\n if datos is None:\n raise excepciones.ArgumentoInvalido('VAR({})'.format(datos))\n if len(args) > 0:\n raise excepciones.ArgumentoInvalido('VAR({},{})'.format(datos, args))\n if not isinstance(datos, list) and not isinstance(datos, str):\n raise excepciones.ErrorTipo('VAR({})'.format(datos))\n if len(datos) == 0:\n raise excepciones.ErrorMatematico('VAR({})'.format(datos))\n\n if not isinstance(datos, list) and isinstance(datos, str) and datos in main.variables_definidas:\n datos = main.variables_definidas[datos]\n if isinstance(datos, str) and not datos in main.variables_definidas:\n raise excepciones.ReferenciaInvalida(\"VAR('{}')\".format(datos))\n\n desv = DESV(datos)\n var = DESV(datos) ** 2\n return var\n\n\nif __name__ == '__main__':\n try:\n LEN()\n except excepciones.ArgumentoInvalido as err:\n print('{}'.format(err))\n try:\n LEN(1, 2, 3, 4, 5, 6)\n except excepciones.ArgumentoInvalido as err:\n print('{}'.format(err))\n try:\n LEN('datos')\n except excepciones.ReferenciaInvalida as err:\n print('{}'.format(err))\n try:\n LEN(1)\n except excepciones.ErrorTipo as err:\n print('{}'.format(err))\n try:\n PROM([])\n except excepciones.ErrorMatematico as err:\n print('{}'.format(err))\n\n from ComandosBasicos import asignar\n from Interprete import leer_simple\n\n columna = leer_simple([\"extraer_columna\", \"registros\", \"tiempo_sano\"])\n asignar('columna', columna)\n print(main.variables_definidas)\n print(LEN('columna'))\n\n datos = [1, 2, 3, 4, 5, 6]\n suma = 1 + 2 + 3 + 4 + 5 + 6\n print(suma)\n print(len(datos))\n promedio = suma / len(datos)\n print(promedio)\n\n from ComandosDatos import extraer_columna\n\n columna = extraer_columna('registros', 'tiempo_sano')\n\n print(LEN(datos))\n print(LEN(columna))\n\n print(PROM(datos))\n print(PROM(columna))\n\n print(MEDIAN(datos))\n print(MEDIAN(columna))\n\n print(DESV(datos))\n print(DESV(columna))\n\n print(VAR(datos))\n print(VAR(columna))\n","sub_path":"Tareas/T03/ComandosNumeros.py","file_name":"ComandosNumeros.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"533576145","text":"import torch\r\nfrom os.path import join\r\nimport os\r\n\r\n\r\nclass Saver():\r\n def __init__(self, model, module_dict={}, folder=None, save_min=False, save_freq=10,\r\n start_epoch=0):\r\n \"\"\"Saves a torch module and a dict of modules\r\n\r\n :param model: model\r\n :type model: torch.nn.Module\r\n :param module_dict: dict of modules, defaults to {}\r\n :type module_dict: dict, optional\r\n :param folder: forlder name, defaults to None\r\n :type folder: str, optional\r\n :param save_min: if true the best value of the target metric is the minimum value, defaults to False\r\n :type save_min: bool, optional\r\n :param save_freq: frequency of saving, defaults to 10\r\n :type save_freq: int, optional\r\n :param start_epoch: first epoch, defaults to 0\r\n :type start_epoch: int, optional\r\n \"\"\" \r\n self.model = model\r\n self.module_dict = module_dict\r\n self.folder = folder\r\n self.save_min = save_min\r\n self.save_freq = save_freq\r\n self.best_crit = 10000 if save_min else -10000\r\n self.epoch = start_epoch\r\n self.file_saved = None\r\n\r\n def save(self, crit_val):\r\n \"\"\"Saves a new model if crit_val is better than the best stored best criterion.\r\n\r\n :param crit_val: target metric used for storing or not the model \r\n :type crit_val: float\r\n \"\"\" \r\n if (self.save_min and crit_val < self.best_crit) or ((not self.save_min) and crit_val > self.best_crit):\r\n file_saved = join(\r\n self.folder, f'best_epoch_{self.epoch}_crit_{crit_val:.3f}.pth')\r\n torch.save({'model': self.model.state_dict(),\r\n 'epoch': self.epoch,\r\n **{k: mod.state_dict() for k, mod in self.module_dict.items()}\r\n }, file_saved)\r\n if self.file_saved is not None:\r\n os.remove(self.file_saved)\r\n self.file_saved = file_saved\r\n self.best_crit = crit_val\r\n if ((self.epoch+1) % self.save_freq) == 0:\r\n torch.save({'model': self.model.state_dict(),\r\n 'epoch': self.epoch,\r\n **{k: mod.state_dict() for k, mod in self.module_dict.items()}\r\n }, join(self.folder, f'epoch_{self.epoch}_crit_{crit_val:.3f}.pth'))\r\n self.epoch += 1\r\n","sub_path":"utilstorch/saver.py","file_name":"saver.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"104340871","text":"from django.shortcuts import HttpResponse\r\nfrom urllib.parse import urlencode\r\nfrom urllib import request\r\nfrom urllib import error\r\nimport json\r\nimport gitlab\r\nfrom appmanager.models import AppInfo\r\nimport logging\r\nlogger = logging.getLogger('django')\r\n\r\n\r\nprivate_token = {\"PRIVATE-TOKEN\": \"PRIVATE-TOKEN\"}\r\ntoken = token\r\ngitlab_Url = \"http://gitlab.testdomain.com\"\r\nquery_Uri = \"/api/v4/projects?search=\"\r\n\r\n'''\r\n获取git项目对应id\r\n添加hook、拉取分支等都需要id进行精确定位\r\n'''\r\n\r\n\r\ndef get_project_id(app_name, git_url):\r\n final_url = request.Request(gitlab_Url + query_Uri + app_name, headers=private_token)\r\n html = request.urlopen(final_url)\r\n data = json.loads(html.read())\r\n for dict in data:\r\n if dict['http_url_to_repo'] == git_url:\r\n logger.info(\"获取到该项目的git--id {}\".format(dict['id']))\r\n return dict['id']\r\n\r\n\r\n'''\r\n添加hook\r\n'''\r\n\r\n\r\ndef create_project_hook(env, app_name, git_url):\r\n project_id = get_project_id(app_name, git_url)\r\n if app_name.find('.com') == -1:\r\n add_webhook_data = {\"id\": project_id, \"url\": \"http://yourjenkins.com/project/\" + env + \"-\" + app_name,\r\n \"enable_ssl_verification\": \"true\", \"token\": \"jenkins_token\"}\r\n elif env != 'uat':\r\n job_name = app_name.split('.')[0]+'.'+ env + '.testdomain.com'\r\n add_webhook_data = {\"id\": project_id, \"url\": \"http://yourjenkins.com/project/\" + job_name,\r\n \"enable_ssl_verification\": \"true\", \"token\": \"jenkins_token\"}\r\n else:\r\n add_webhook_data = {\"id\": project_id, \"url\": \"http://yourjenkins.com/project/\" + app_name,\r\n \"enable_ssl_verification\": \"true\", \"token\": \"jenkins_token\"}\r\n req = request.Request(gitlab_Url + \"/api/v4/projects/\" + str(project_id) + \"/hooks\",\r\n urlencode(add_webhook_data).encode(), headers=private_token)\r\n print(app_name, project_id)\r\n try:\r\n html = request.urlopen(req)\r\n final_code = html.read()\r\n print(final_code.decode('utf-8'))\r\n if html.getcode() == '201':\r\n logger.info(\"添加-- {} --的hook成功\".format(app_name))\r\n return True\r\n\r\n except error.HTTPError as e:\r\n logger.error(e)\r\n return False\r\n\r\n\r\n'''\r\n拉取分支信息\r\n'''\r\n\r\n\r\ndef get_branch(request):\r\n app_name = request.GET['app_name']\r\n env = app_name.split('-')[0]\r\n try:\r\n final_name = app_name.split(env + '-')[1]\r\n except:\r\n return HttpResponse(\"false\")\r\n gl = gitlab.Gitlab(gitlab_Url, token)\r\n try:\r\n app_info = AppInfo.objects.get(App_name=final_name)\r\n except AppInfo.DoesNotExist:\r\n branches = ''\r\n return branches\r\n project_id = get_project_id(final_name, app_info.Git_Url)\r\n project_info = gl.projects.get(project_id)\r\n branches = project_info.branches.list()\r\n if branches == '':\r\n return HttpResponse(\"false\")\r\n else:\r\n a = []\r\n for branche in branches:\r\n a.append(branche.name)\r\n a.append(env)\r\n return HttpResponse(json.dumps(a), content_type='application/json')\r\n\r\n\r\n'''\r\n添加ansible文件到gitlab\r\n本项目目前只应用到ops/ansible\r\n553是其git对应的id\r\n 之后如果要应用到其他gitlab项目\r\n 添加git地址变量\r\n 使用上面的get_project_id获取id\r\n'''\r\n\r\n\r\ndef add_ansible_file(filename, content, message, operate='create'):\r\n gl = gitlab.Gitlab(gitlab_Url, token)\r\n project = gl.projects.get(553)\r\n data = {\r\n 'branch': 'master',\r\n 'commit_message': message,\r\n 'actions': [\r\n {\r\n 'action': operate,\r\n 'file_path': filename,\r\n 'content': content\r\n }\r\n ]\r\n }\r\n project.commits.create(data)\r\n","sub_path":"appmanager/gitlab_operate.py","file_name":"gitlab_operate.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"409852690","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import print_function\r\nfrom .misc import TreeProperty\r\n\r\nclass Pos():\r\n\r\n def __init__(self, min_rowx, min_colx):\r\n self.set_mins(min_rowx, min_colx)\r\n\r\n def next_cell(self):\r\n self.colx += 1\r\n if self.colx > self.max_colx:\r\n self.max_colx = self.colx\r\n return self.rowx, self.colx\r\n\r\n def next_row(self):\r\n self.rowx += 1\r\n self.colx = self.colx_start\r\n if self.rowx > self.max_rowx:\r\n self.max_rowx = self.rowx\r\n return self.rowx\r\n\r\n def coord(self):\r\n return self.rowx, self.colx\r\n\r\n def set_mins(self, min_rowx, min_colx):\r\n self.min_rowx = min_rowx\r\n self.min_colx = min_colx\r\n self.rowx_start = min_rowx - 1\r\n self.colx_start = min_colx - 1\r\n self.rowx = self.rowx_start\r\n self.colx = self.colx_start\r\n self.max_rowx = self.rowx_start\r\n self.max_colx = self.colx_start\r\n\r\nclass RangePos(Pos):\r\n\r\n pos_map = TreeProperty('pos_map')\r\n nocache = TreeProperty('nocache')\r\n\r\n def __init__(self, wtsheet, xlrange, parent=None):\r\n Pos.__init__(self, xlrange.min_rowx, xlrange.min_colx)\r\n self.wtsheet = wtsheet\r\n self.xlrange = xlrange\r\n self.cells = {}\r\n self._children = []\r\n if parent:\r\n parent.add_child(self)\r\n else:\r\n self._pos_map = {}\r\n self.pos_map[xlrange.rkey] = self\r\n\r\n def __str__(self):\r\n fmt = '%s -> %s -> %s'\r\n return fmt % (self.__class__.__name__, self.xlrange.rkey, self.depth)\r\n\r\n @property\r\n def depth(self):\r\n if not hasattr(self, '_depth'):\r\n if not hasattr(self, '_parent') or self._parent is self:\r\n self._depth = 0\r\n else:\r\n self._depth = self._parent.depth + 1\r\n return self._depth\r\n\r\n def add_child(self, child):\r\n child._parent = self\r\n self._children.append(child)\r\n\r\n def write_cell(self, rdrowx, rdcolx, value, cty):\r\n wtrowx, wtcolx = self.next_cell()\r\n #print('write cell', rdrowx, rdcolx, wtrowx, wtcolx, value, cty)\r\n self.wtsheet.cell(rdrowx, rdcolx, wtrowx, wtcolx, value, cty)\r\n\r\n def print(self):\r\n print('\\t' * self.depth, self)\r\n for child in self._children:\r\n child.print()\r\n\r\n def set_image_ref(self, image_ref, image_key):\r\n if hasattr(self.wtsheet, 'merger'):\r\n self.wtsheet.merger.set_image_ref(image_ref, (self.rowx,self.colx+1,image_key))\r\n\r\nclass SheetPos(RangePos):\r\n\r\n def __init__(self, wtsheet, xlrange, nocache=False):\r\n RangePos.__init__(self, wtsheet, xlrange, None)\r\n self.set_mins(xlrange.index_base, xlrange.index_base)\r\n self.current_node = self\r\n self.current_key = ''\r\n self.last_node = None\r\n self.last_key = None\r\n self._parent = self\r\n self._nocache = nocache\r\n self.node_map = xlrange.node_map\r\n\r\n def enter(self):\r\n pass\r\n\r\n def exit(self):\r\n pass\r\n\r\n def find_lca(self, pre, next):\r\n # find lowest common ancestor\r\n next_branch = []\r\n if pre.depth > next.depth:\r\n for i in range(next.depth, pre.depth):\r\n pre.exit()\r\n #print(pre, 'pre up', pre._parent)\r\n pre = pre._parent\r\n\r\n elif pre.depth < next.depth:\r\n for i in range(pre.depth, next.depth):\r\n next_branch.insert(0, next)\r\n #print(next, 'next up', next._parent)\r\n next = next._parent\r\n if pre is next:\r\n pass\r\n else:\r\n pre_parent = pre._parent\r\n next_parent = next._parent\r\n while pre_parent != next_parent:\r\n #print(pre, next, 'up together')\r\n pre.exit()\r\n pre = pre_parent\r\n pre_parent = pre._parent\r\n next_branch.insert(0, next)\r\n next = next_parent\r\n next_parent = next._parent\r\n pre.exit()\r\n if pre_parent._children.index(pre) > pre_parent._children.index(next):\r\n pre_parent.child_reenter()\r\n next.reenter()\r\n else:\r\n next.enter()\r\n\r\n for next in next_branch:\r\n next.enter()\r\n\t\t\t\r\n def get_pos(self, key):\r\n self.current_pos = self.pos_map.get(key)\r\n return self.current_pos\r\n\r\n def get_crange(self, key):\r\n self.current_range = self.node_map.get(key)\r\n return self.current_range\r\n\r\n def get_node(self, key):\r\n if key == self.current_key:\r\n return self.current_node\r\n else:\r\n self.last_key = self.current_key\r\n self.last_node = self.current_node\r\n self.current_node = self.node_map.get(key)\r\n self.current_key = key\r\n self.find_lca(self.last_node, self.current_node)\r\n return self.current_node\r\n\r\nclass HRangePos(RangePos):\r\n\r\n def enter(self):\r\n #print(self, self._parent, 'hrange enter')\r\n #print(self._parent.max_rowx + 1, self._parent.min_colx, )\r\n self.set_mins(self._parent.max_rowx + 1, self._parent.min_colx)\r\n self.cells.clear()\r\n\r\n def reenter(self):\r\n #print('reenter', self._parent.max_rowx + 1, self._parent.min_colx)\r\n self._parent.min_rowx = self._parent.max_rowx + 1\r\n self.set_mins(self._parent.max_rowx + 1, self._parent.min_colx)\r\n self.cells.clear()\r\n\r\n def exit(self):\r\n self._parent.colx = self.colx\r\n #self._parent.max_colx = self.max_colx\r\n self._parent.rowx = self.max_rowx\r\n self._parent.max_rowx = max(self.max_rowx, self._parent.max_rowx)\r\n\r\n self.align_children()\r\n if self.depth == 1:\r\n self.write_cells()\r\n\r\n def child_reenter(self):\r\n self.align_children()\r\n\r\n def align_children(self):\r\n if self.nocache:\r\n return\r\n for child in self._children:\r\n if not child.cells:\r\n continue\r\n try:\r\n aligned = align(child.min_rowx, child.max_rowx, self.min_rowx, self.max_rowx)\r\n child.align(aligned)\r\n except Exception as e:\r\n print(e)\r\n print(child.cells)\r\n self.cells.update(child.cells)\r\n child.cells.clear()\r\n\r\n def write_cells(self):\r\n if not self.cells:\r\n return\r\n min_rowx, min_colx = min(self.cells)\r\n max_rowx, max_colx = max(self.cells)\r\n\r\n for wtrowx in range(min_rowx, max_rowx + 1):\r\n for wtcolx in range(min_colx, max_colx + 1):\r\n cell = self.cells.get((wtrowx, wtcolx))\r\n if cell:\r\n cell.write_cell(self.wtsheet)\r\n else:\r\n pass\r\n #print(wtrowx, wtcolx, 'no cell')\r\n\r\n\r\nclass VRangePos(RangePos):\r\n\r\n def enter(self):\r\n #print(self, self._parent, 'vrange enter')\r\n self.set_mins(self._parent.min_rowx, self._parent.colx + 1)\r\n self.cells.clear()\r\n\r\n def reenter(self):\r\n #print(self, self._parent, 'vrange reenter')\r\n #print(self._parent.min_rowx, self._parent.max_rowx, 'vrange reenter')\r\n self._parent.min_rowx = self._parent.max_rowx + 1\r\n self.set_mins(self._parent.max_rowx + 1, self._parent.min_colx)\r\n self.cells.clear()\r\n\r\n\r\n def exit(self):\r\n #print(self, self._parent, 'vrange exit')\r\n #print(self.max_rowx, self.max_colx)\r\n #print(self._parent.max_rowx, self._parent.max_colx)\r\n self._parent.colx = self.colx\r\n # self._parent.max_colx = self.max_colx\r\n self._parent.rowx = self.max_rowx\r\n self._parent.max_rowx = max(self.max_rowx, self._parent.max_rowx)\r\n self.merge_children()\r\n self.min_rowx = self._parent.min_rowx\r\n\r\n def child_reenter(self):\r\n self.merge_children()\r\n\r\n def merge_children(self):\r\n for child in self._children:\r\n self.cells.update(child.cells)\r\n child.cells.clear()\r\n\r\n def write_cell(self, rdrowx, rdcolx, value, cty):\r\n wtrowx, wtcolx = self.next_cell()\r\n #print('write cell', rdrowx, rdcolx, wtrowx, wtcolx, value, cty)\r\n if self.nocache:\r\n self.wtsheet.cell(rdrowx, rdcolx, wtrowx, wtcolx, value, cty)\r\n else:\r\n self.cells[(wtrowx, wtcolx)] = CachedCell(rdrowx, rdcolx, wtrowx, wtcolx, value, cty)\r\n\r\n def align(self, aligned):\r\n if not aligned:\r\n return\r\n for rdrowx, wtsetting in aligned:\r\n for colx in range(self.min_colx, self.colx + 1):\r\n cell = self.cells.get((rdrowx, colx))\r\n if not cell:\r\n raise Exception('no cell to align')\r\n continue\r\n for wtrowx,merged in wtsetting:\r\n if merged:\r\n self.cells[(wtrowx, colx)] = cell.create_mcell(wtrowx)\r\n else:\r\n cell.move_row(wtrowx)\r\n self.cells[(wtrowx, colx)] = cell\r\n\r\nclass CachedCell():\r\n\r\n def __init__(self, rdrowx, rdcolx, wtrowx, wtcolx, value, cty):\r\n self.rdrowx = rdrowx\r\n self.rdcolx = rdcolx\r\n self.wtrowx = wtrowx\r\n self.wtcolx = wtcolx\r\n self.value = value\r\n self.cty = cty\r\n\r\n def move_row(self, target_rowx):\r\n self.wtrowx = target_rowx\r\n\r\n def write_cell(self, wtsheet):\r\n wtsheet.cell(self.rdrowx, self.rdcolx, self.wtrowx, self.wtcolx, self.value, self.cty)\r\n\r\n def create_mcell(self, target_rowx):\r\n return MergedCell(self.rdrowx, self.rdcolx, target_rowx, self.wtcolx, self)\r\n\r\nclass MergedCell():\r\n\r\n def __init__(self, rdrowx, rdcolx, wtrowx, wtcolx, cached_cell):\r\n self.rdrowx = rdrowx\r\n self.rdcolx = rdcolx\r\n self.wtrowx = wtrowx\r\n self.wtcolx = wtcolx\r\n self.cached_cell = cached_cell\r\n\r\n def move_row(self, target_rowx):\r\n self.wtrowx = target_rowx\r\n\r\n def create_mcell(self, target_rowx):\r\n return MergedCell(self.rdrowx, self.rdcolx, target_rowx, self.wtcolx, self.cached_cell)\r\n\r\n def write_cell(self, wtsheet):\r\n #return\r\n wtsheet.mcell(self.rdrowx, self.rdcolx, self.wtrowx, self.wtcolx, self.cached_cell.wtrowx)\r\n\r\ndef align(mina, maxa, minb, maxb):\r\n if mina != minb:\r\n print(mina, minb, maxa, maxb)\r\n raise Exception( 'mins not equal')\r\n if maxb < maxa:\r\n print(maxa, maxb)\r\n raise Exception('maxb smaller')\r\n if maxa == maxb:\r\n return\r\n a = maxa - mina + 1\r\n b = maxb - minb + 1 - a\r\n d, r = divmod(b, a)\r\n aligned = []\r\n rowb = minb\r\n for index,rowa in enumerate(range(mina, maxa + 1)):\r\n rowbs = []\r\n rowbs.append((rowb, 0))\r\n rowb += 1\r\n for _ in range(d):\r\n rowbs.append((rowb, 1))\r\n rowb += 1\r\n if index < r:\r\n rowbs.append((rowb, 1))\r\n rowb += 1\r\n rowbs.reverse()\r\n aligned.append((rowa, rowbs))\r\n aligned.reverse()\r\n return aligned\r\n\r\n","sub_path":"xltpl/pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":11216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"519674939","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\n# In[2]:\n\n\ndriver = webdriver.Chrome(\"D:\\Data Science - ExcelR\\Data Science Project\\chromedriver\")\n\n\n# In[7]:\n\n\nproducts=[] #List to store name of the product\nprices=[] #List to store price of the product\nratings=[] #List to store rating of the product\n\n\n# In[8]:\n\n\ndriver.get(\"https://www.flipkart.com/computers/laptops/~acer-gaming-laptops/pr?sid=6bo,b5g&wid=3.productCard.PMU_V2_2\")\n\n\n# In[56]:\n\n\ncontent = driver.page_source\nsoup = BeautifulSoup(content)\n\nfor a in soup.findAll('a',href=True, attrs={'class':'_13oc-S'}):\n name=a.find('div', attrs={'class':'_4rR01T'})\n price=a.find('div', attrs={'class':'_30jeq3 _1_WHN1'})\n rating=a.find('div', attrs={'class':'_3LWZlK'})\n products.append(name.text)\n prices.append(price.text)\n ratings.append(rating.text)\n\n\n# In[57]:\n\n\ndf = pd.DataFrame({'Product Name':products,'Price':prices,'Rating':ratings}) \n\n\n# In[58]:\n\n\ndf.to_csv('products.csv', index=False, encoding='utf-8')\n\n\n# In[59]:\n\n\ndf\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Data Scraping.py","file_name":"Data Scraping.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"361141253","text":"import pandas as pd\n\ndf_indicadores = pd.read_parquet(f'../data/processed/df_indicadores.parquet')\ndf_final_pob = pd.read_parquet('../data/processed/df_final_pob.parquet')\ndf_final_pob_melt = pd.read_parquet('../data/processed/df_final_pob_melt.parquet')\ndf_final_pob_melt_PC = pd.read_parquet('../data/processed/df_final_pob_melt_PC.parquet')\n\nCCAA=sorted(df_final_pob['CCAA'].unique().to_list())\nCCAA.insert(0, 'TODAS')\nCCAA_dict = dict(zip(CCAA, CCAA))\n\nprov=sorted(df_final_pob['Provincia'].unique().to_list())\nprov.insert(0, 'TODAS')\nPROV = dict(zip(prov, prov))\n\nmun=sorted(df_final_pob['Nombre Ente Principal'].unique().to_list())\nmun.insert(0, 'TODOS')\nMUNICIPIOS = dict(zip(mun, mun))\n\npdc=sorted(list(df_final_pob_melt['Descripción'].unique()))\npdc.insert(0, 'TODOS')\nPDC = dict(zip(pdc, pdc))\n\n\n\n\n\nWELL_COLORS = dict(\n GD=\"#FFEDA0\",\n GE=\"#FA9FB5\",\n GW=\"#A1D99B\",\n IG=\"#67BD65\",\n OD=\"#BFD3E6\",\n OE=\"#B3DE69\",\n OW=\"#FDBF6F\",\n ST=\"#FC9272\",\n BR=\"#D0D1E6\",\n MB=\"#ABD9E9\",\n IW=\"#3690C0\",\n LP=\"#F87A72\",\n MS=\"#CA6BCC\",\n Confidential=\"#DD3497\",\n DH=\"#4EB3D3\",\n DS=\"#FFFF33\",\n DW=\"#FB9A99\",\n MM=\"#A6D853\",\n NL=\"#D4B9DA\",\n OB=\"#AEB0B8\",\n SG=\"#CCCCCC\",\n TH=\"#EAE5D9\",\n UN=\"#C29A84\",\n)","sub_path":"Dash/controls.py","file_name":"controls.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"518056963","text":"from torchtext.utils import download_from_url, extract_archive, unicode_csv_reader\nfrom torchtext.data.datasets_utils import _RawTextIterableDataset\nfrom torchtext.data.datasets_utils import _wrap_split_argument\nfrom torchtext.data.datasets_utils import _add_docstring_header\nfrom torchtext.data.datasets_utils import _find_match\nimport os\nimport io\n\nURL = 'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbZlU4dXhHTFhZQU0'\n\nMD5 = 'f7ddfafed1033f68ec72b9267863af6c'\n\nNUM_LINES = {\n 'train': 650000,\n 'test': 50000,\n}\n\n_PATH = 'yelp_review_full_csv.tar.gz'\n\n\n@_add_docstring_header(num_lines=NUM_LINES, num_classes=5)\n@_wrap_split_argument(('train', 'test'))\ndef YelpReviewFull(root, split):\n def _create_data_from_csv(data_path):\n with io.open(data_path, encoding=\"utf8\") as f:\n reader = unicode_csv_reader(f)\n for row in reader:\n yield int(row[0]), ' '.join(row[1:])\n dataset_tar = download_from_url(URL, root=root,\n path=os.path.join(root, _PATH),\n hash_value=MD5, hash_type='md5')\n extracted_files = extract_archive(dataset_tar)\n\n path = _find_match(split + '.csv', extracted_files)\n return _RawTextIterableDataset(\"YelpReviewFull\", NUM_LINES[split],\n _create_data_from_csv(path))\n","sub_path":"torchtext/datasets/yelpreviewfull.py","file_name":"yelpreviewfull.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"425221004","text":"\"\"\"\n\nKavitha is a student and she used to spend time with her neighbours.Since she is very friendly all of her neighbours like to spend time with her.she is awarded as\n\nBest neighbour by their neighbours.Now the task is to find the best neighbour in the undirected graph.Given a graph with V vertices and E edges.Find the Vertex \n\nthat has more number of neighbours.If many best neighbours exist print the first Vertex in the order.\n\nInput:\n\nFirstline contains number of vertices V\nSecond line consists of number of edges E\nNext E lines contains the edges connecting two vertices\n\nOutput:\nprint the Best Neighbour\n\nExample 1:\n\nInput:\n4\n3\n1 2\n2 3\n3 4\n\nOutput:\n2\n\nExample 2:\n\nInput:\n5\n4\n1 2\n1 3\n2 3\n2 4\n\nOutput:\n2\n\n\"\"\"\ndef addEdge(s,d,graph):\n graph[s].append(d)\n graph[d].append(s)\ndef bestNeighbour(graph):\n l=0\n for i in range(1,len(graph)):\n if len(graph[i])>l:\n l=len(graph[i])\n best=i\n return best\nv=int(input())\ne=int(input())\ng=[[]for i in range(v+1)]\nfor i in range(e):\n s,d=map(int,input().split())\n addEdge(s,d,g)\nprint(bestNeighbour(g))\n\n","sub_path":"vertex with max neighbours.py","file_name":"vertex with max neighbours.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"538142990","text":"import gi\ngi.require_version('Notify', '0.7')\ngi.require_version('Gtk', '3.0')\ngi.require_version('AppIndicator3', '0.1')\n\nfrom gi.repository import Gtk as gtk\nfrom gi.repository import AppIndicator3 as appindicator\nfrom gi.repository import Notify as notify\nimport signal\nimport os\n\n# Client path\nclient_path = \"/home/miroslav/Projects/MM-backer/client/client\"\n\nhostname = \"localhost\"\nport = 1308\n\n# Indicator name\nAPPINDICATOR_ID = 'Backer indicator'\n\n# Icons\nicon_black = os.path.abspath('icons/icon-black.png')\nicon_white = os.path.abspath('icons/icon-white.png')\nactual_icon = icon_black\n\n\ndef main():\n\n # Register cancel signal\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n indicator = appindicator.Indicator.new(APPINDICATOR_ID, icon_black, appindicator.IndicatorCategory.SYSTEM_SERVICES)\n indicator.set_status(appindicator.IndicatorStatus.ACTIVE)\n indicator.set_menu(build_menu(indicator))\n\n # Init notifications\n notify.init(APPINDICATOR_ID)\n\n # Loop gtk\n gtk.main()\n\n\ndef build_menu(indicator):\n\n menu = gtk.Menu()\n\n # Sync folders\n item_upload = gtk.MenuItem(\"Sync\")\n item_upload.connect('activate', sync)\n menu.append(item_upload)\n\n # Item switching themes between light and dark\n item_switch_theme = gtk.MenuItem('Switch theme color')\n item_switch_theme.connect('activate', switch_theme, indicator)\n menu.append(item_switch_theme)\n\n # Quit button\n item_quit = gtk.MenuItem('Quit')\n item_quit.connect('activate', quit)\n menu.append(item_quit)\n\n menu.show_all()\n\n return menu\n\n\ndef sync(source):\n\n # Notification template\n # notify.Notification.new(\"Nadpis\", 'text', None).show()\n\n os.system(client_path + ' ' + hostname + ' ' + str(port))\n\n\ndef switch_theme(source, indicator):\n\n global actual_icon\n\n if actual_icon == icon_black:\n\n actual_icon = icon_white\n indicator.set_icon(icon_white)\n\n elif actual_icon == icon_white:\n\n actual_icon = icon_black\n indicator.set_icon(icon_black)\n\n\ndef quit(source):\n\n notify.uninit()\n gtk.main_quit()\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"client/indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"375961837","text":"# import the necessary packages\nfrom keras.models import Sequential\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dropout\nfrom keras.layers.core import Dense\nfrom keras import backend as K\n\nclass SmallerVGGNet:\n @staticmethod\n def build(width, height, depth, classes):\n # initialize the model along with the input shape to be\n # \"channels last\" and the channels dimension itself\n model = Sequential()\n inputShape = (height, width, depth)\n chanDim = -1\n\n # if we are using \"channels first\", update the input shape\n # and channels dimension\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n # CONV => RELU => POOL\n model.add(Conv2D(3, (11, 11), activation='relu', input_shape=inputShape))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n\n model.add(Conv2D(96, (5, 5), activation='relu'))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n model.add(Dropout(0.5))\n model.add(Conv2D(192, (3, 3)))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n model.add(Dropout(0.5))\n model.add(Conv2D(192, (3, 3)))\n\n model.add(Flatten())\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n # return the constructed network architecture\n return model","sub_path":"pyimagesearch/smallervggnet.py","file_name":"smallervggnet.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"246727688","text":"from sarasvati.plugins import CommandsPlugin\nfrom .handlers import create_thought, delete_thought, search_thoughts, link_thoughts, activate, info, rename\n\n\nclass GenericCommandsPlugin(CommandsPlugin):\n \"\"\"\n Generic commands plugin\n \"\"\"\n\n def activate(self):\n super().activate()\n\n self.register(\"/activate\", activate) \\\n .add_argument(\"title\", as_default=True) \\\n .add_argument(\"id\")\n\n self.register(\"/create\", create_thought) \\\n .add_argument(\"title\", required=True, as_default=True) \\\n .add_argument(\"desc\") \\\n .add_argument(\"as\")\n self.register(\"/delete\", delete_thought) \\\n .add_argument(\"title\", as_default=True)\n self.register(\"/search\", search_thoughts) \\\n .add_argument(\"title\", as_default=True)\n self.register(\"/rename\", rename) \\\n .add_argument(\"title\", as_default=True, required=True)\n self.register(\"/link\", link_thoughts) \\\n .add_argument(\"from\", as_default=True, as_active=True) \\\n .add_argument(\"to\", required=True) \\\n .add_argument(\"as\")\n\n self.register(\"/info\", info) \\\n .add_argument(\"title\", as_default=True, as_active=True)\n\n def deactivate(self):\n super().deactivate()\n\n self.unregister(\"/activate\")\n self.unregister(\"/create\")\n self.unregister(\"/delete\")\n\n self.unregister(\"/ls\")\n self.unregister(\"/l\")\n self.unregister(\"/rn\")\n self.unregister(\"/info\")\n","sub_path":"core/commands/brain/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"133699439","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 6 16:48:47 2016\n\n@author: jessime\n\"\"\"\n\ndef get_corners(pic):\n \"\"\"Return the corner of an image\n \n Parameters\n ----------\n pic : graphics.Image\n The image for which the corners will be found\n \n Returns\n -------\n tuple\n Four values representing the boarders of the image\n \"\"\"\n left = pic.anchor.x - (pic.getWidth()/2)\n top = pic.anchor.y - (pic.getWidth()/2)\n right = pic.anchor.x + (pic.getWidth()/2)\n bottom = pic.anchor.y + (pic.getWidth()/2)\n return (left, top, right, bottom)\n \ndef point_in_rect(x, y, pic):\n \"\"\"Decide if a point is found within the corners of an image\n \n Parameters\n ----------\n x : int, float\n x coordinate for the point being checked\n y : int, float\n y coordinate for the point being checked\n pic : graphics.Image\n The image in which to search for the given point\n \n Returns\n -------\n inside : bool\n Whether or not the point lies within an image \n \"\"\"\n inside = False\n left, top, right, bottom = get_corners(pic)\n if left <= x <= right and top <= y <= bottom:\n inside = True\n return inside\n \ndef collision_detection(pic1, pic2):\n \"\"\"Decide if two images are touching on screen\n\n Parameters\n ----------\n pic1 : graphics.Image\n The first image for checking collision\n pic2 : graphics.Image\n The second image for checking collison\n\n Returns\n -------\n collision : bool\n Whether or not the two images have collided\n \"\"\"\n collision = False\n for a, b in [(pic1, pic2), (pic2, pic1)]:\n left, top, right, bottom = get_corners(a)\n if ((point_in_rect(left, top, b)) or\n (point_in_rect(left, bottom, b)) or\n (point_in_rect(right, top, b)) or\n (point_in_rect(right, bottom, b))):\n collision = True\n return collision","sub_path":"move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"207957015","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nTOTAL = 50\nSTEP = 0.25\nNOIZ = 10\n\ndef func(x):\n \"\"\"\n Чистая функция, возвращающая не зашумленные данные\n :param x:\n :return:\n \"\"\"\n return 4*x + 15\n\ndef generate_sample(total=TOTAL):\n \"\"\"\n Добавили в функцию шум\n :param total:\n :return:\n \"\"\"\n x = 0\n while x < total * STEP:\n yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(1, NOIZ)\n x += STEP\n#------------------------------------------\n\ndef prediction(X,w):\n \"\"\"\n Функция предсказания, вынес для удобства чтения кода далее.\n функция реализует решение уравнения f(X) = X*w в матричном виде\n :param X: входные параметры\n :param w: веса\n :return: f(X) = X*w\n \"\"\"\n return np.dot(X, w)\n\ndef mes (y, y_pred):\n \"\"\"\n Функция среднеквадратичной ошибки\n :param y: известные параметры\n :param y_pred: предсказанные параметры\n :return: среднеквадратичную ошибку\n \"\"\"\n N = y_pred.shape[0]\n m = np.sum(((y - y_pred) ** 2)) / N\n return m\n\ndef gradient_step_v(X, y, w, eta=0.01):\n \"\"\"\n Функция расчета смещения весов в зависимости от градиента.\n :param X: входные параметры\n :param y: известные параметры\n :param w: веса\n :param eta: коэффициент регулирующий скорость изменения весов\n :return: обновленные веса\n \"\"\"\n N = X.shape[0]\n y_pred = prediction(X, w)\n rs = (y_pred - y)\n st = (w.T - np.sum((2.0* eta/N*X*rs),axis=0)).T\n return st\n\ndef gradient_descent(X, y, max_iter=1e5, eta = 1e-3, error_visualization=False):\n \"\"\"\n Функция реализующая метод градиентного спуска.\n :param X: входные параметры\n :param y: известные параметры\n :param max_iter: максимальное число интераций\n :param eta: коэффициент регулирующий скорость изменения весов\n :param error_visualization: флаг, отрисовывать визуальзацию изменения ошибки или нет\n :return: итоговую ошибку и веса\n \"\"\"\n\n w = np.zeros([X.shape[1],1])\n ERR_list = []\n inter = []\n\n for i in range(int(max_iter)):\n w = gradient_step_v(X, y, w, eta = eta)\n err_i = mes(y, prediction(X, w))\n ERR_list.append(err_i)\n inter.append(i)\n if error_visualization is True:\n plt.plot(inter, ERR_list)\n plt.show()\n return w, ERR_list[-1]\n\n#----------------------------------------------------------------\n\nx = np.array([[i * STEP] for i in range(TOTAL)], dtype = float)\nY = np.array([[i] for i in generate_sample()], dtype = float)\nX = np.column_stack((np.ones((TOTAL,1)),x))\n\n#----------------------------------------------------------------\n\nw,E = gradient_descent(X, Y, max_iter=1e5, eta=(1 / (TOTAL ** 2)), error_visualization = True)\nprint(f'коэффициенты: \\n{w}')\nprint(f'Среднеквадратичная ошибка - {E}')\n\nfig = plt.figure()\nplt.plot(x, Y, 'bo')\nY_real = [func(i) for i in x]\nplt.plot(x, Y_real, 'g')\nY_pred = X @ w\nplt.plot(x, Y_pred, 'r')\nplt.show()\n","sub_path":"gradient descent/single variable gradient descent.py","file_name":"single variable gradient descent.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"443108800","text":"import os\nimport csv\nimport commands\n\nPATH_STUDENT =\"/home/master/gcking/test/archiTA/Student_project1\"\nPATH_RPT =\"/home/master/gcking/test/archiTA/Student_rpt\"\nPATH_TA\t\t =\"/home/master/gcking/test/archiTA/TA_project1/single_cycle\"\n\nstudentID = commands.getoutput('ls '+PATH_STUDENT).split('\\n')\nos.chdir('Student_rpt')\n\nfor i in range(0,len(studentID)):\n\tos.system('mkdir '+studentID[i])\n\tos.chdir(PATH_RPT+'/'+studentID[i])\n\tos.system('cp '+PATH_STUDENT+'/'+studentID[i]+'/testcase/'+'iimage.bin .')\n\tos.system('cp '+PATH_STUDENT+'/'+studentID[i]+'/testcase/'+'dimage.bin .')\n\tflag = os.system(PATH_TA+' > error_msg.txt 2>&1')\n\tos.system('rm iimage.bin')\n\tos.system('rm dimage.bin')\n\tos.chdir('..')\n","sub_path":"archiTA_dev/1st_student_rpt.py","file_name":"1st_student_rpt.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"644367216","text":"# import kivy\n# kivy.require('1.10.0') # replace with your current kivy version !\n\n''' Sender random commands for ESP8266 0.7.9.8h01\n(exhibition version + Holl sensor v01)\nshould match main.py (ESP8266 autoloaded v. 0.7.9.1)/\n\n'''\n\n# from kivy.app import App\n#\n# from kivy.uix.widget import Widget\n# from kivy.garden.joystick import Joystick\n#\n# from kivy.uix.gridlayout import GridLayout\n# from kivy.uix.floatlayout import FloatLayout\n# from kivy.uix.label import Label\n# from kivy.uix.textinput import TextInput\n# from kivy.uix.button import Button\n\nimport socket\nimport time\nimport random\n\n\n\n# class RoboPad(FloatLayout):\n# def __init__(self, **kwargs):\n# super(RoboPad, self).__init__(**kwargs)\n#\n# # # print('running super(Gamepad, self).__init__()')\n#\n# # joystickhand and joystickrun\n# self.joystickhand = Joystick(size_hint=(.4, .4),\n# pos_hint={'x': 0.0, 'y': .2},\n# sticky=True)\n# self.add_widget(self.joystickhand)\n# self.joystickrun = Joystick(size_hint=(.4, .4),\n# pos_hint={'x': 0.6, 'y': .2})\n# self.add_widget(self.joystickrun)\n#\n# # add some buttons\n# self.catchbutton = Button(size_hint=(.15, .15),\n# pos_hint={'x': .8, 'y': .65},\n# text='Catch me!')\n# self.add_widget(self.catchbutton)\n#\n# # add debug Labels\n# # self.debug_label = Label(size_hint=(.2, .2),\n# # pos_hint={'x': .8, 'y': .8},\n# # text='message ... ...',) # multiline=True,)\n# # self.add_widget(self.debug_label)\n# # self.debug_label_hand = Label(size_hint=(.2, .2),\n# # pos_hint={'x': .1, 'y': .8},\n# # text='message ... ...',)\n# # self.add_widget(self.debug_label_hand)\n# # self.debug_label_run = Label(size_hint=(.2, .2),\n# # pos_hint={'x': .5, 'y': .8},\n# # text='message ... ...',) # multiline=True,)\n# # self.add_widget(self.debug_label_run)\n#\n# # bind joystick\n# self.joystickrun.bind(pad=self.update_coordinates_run)\n# self.joystickhand.bind(pad=self.update_coordinates_hand)\n#\n# # bind button\n# self.catchbutton.bind(on_press=self.update_catch_release)\n#\n# def update_coordinates_run(self, joystick, pad):\n# # test for joystickrun binding test\n# # # print('update_coordinates_run ...')\n# # # print(self, joystick, pad)\n# x = str(pad[0])[0:5]\n# y = str(pad[1])[0:5]\n# radians = str(joystick.radians)[0:5]\n# magnitude = str(joystick.magnitude)[0:5]\n# angle = str(joystick.angle)[0:5]\n# # text = \"x: {}\\ny: {}\\nradians: {}\\nmagnitude: {}\\nangle: {}\\nsend data status: {}\"\n# # self.debug_label_run.text = text.format(x, y, radians, magnitude, angle, send_status)\n#\n# # without send_status # print just to debug label\n# text = \"x: {}\\ny: {}\\nradians: {}\\nmagnitude: {}\\nangle: {}\"\n# # self.debug_label_run.text = text.format(x, y, radians, magnitude, angle)\n# # self.debug_label.text = text.format(x, y, radians, magnitude, angle)\n# self.send_command_data(turnx=x, runy=y)\n#\n# def update_coordinates_hand(self, joystick, pad):\n# # test for update_coordinates_hand binding test\n# # # print('update_coordinates_hand running...')\n# # # print(self, joystick, pad)\n# x = str(pad[0])[0:5]\n# y = str(pad[1])[0:5]\n# radians = str(joystick.radians)[0:5]\n# magnitude = str(joystick.magnitude)[0:5]\n# angle = str(joystick.angle)[0:5]\n# # text = \"x: {}\\ny: {}\\nradians: {}\\nmagnitude: {}\\nangle: {}\\nsend data status: {}\"\n# # self.debug_label_run.text = text.format(x, y, radians, magnitude, angle, send_status)\n#\n# # without send_status # print just to debug label\n# text = \"x: {}\\ny: {}\\nradians: {}\\nmagnitude: {}\\nangle: {}\"\n# # self.debug_label_hand.text = text.format(x, y, radians, magnitude, angle)\n# # self.debug_label.text = text.format(x, y, radians, magnitude, angle)\n#\n# # <<<\n# self.send_command_data(headx=x, handy=y)\n#\n# def update_catch_release(self, instance):\n# # # print('DBG: button pressed!')\n# # catch = catch\n# self.send_command_data(catch='catch')\n#\n# def send_command_data(self, headx='z', handy='z', turnx='z', runy='z', catch='z'):\n# robot_host = '192.168.4.1' # hardcoded robot ip t4m net\n# robot_port = 80\n# # # print('send_command_data running')\n# # self.debug_label.text = 'headx {}\\nhandy {}\\nturnx {}\\nruny {}\\ncatch {}'.format(headx,\n# # handy,\n# # turnx,\n# # runy,\n# # catch)\n#\n# dict_commands = {'headx': headx, 'handy': handy, 'turnx': turnx, 'runy': runy, 'catch': catch}\n# # # print(dict_commands)\n#\n# str_commands = 'http://' + str(robot_host) + '/?'\n#\n# for item in dict_commands:\n# # # print(item,\n# # dict_commands[item],\n# # type(dict_commands[item])\n# # )\n# # if dict_commands[item] !='z':\n# # str_commands += item +\\\n# # '=' + \\\n# # dict_commands[item] + \\\n# # '&'\n#\n# # add normalization\n# if dict_commands[item] != 'z':\n# if dict_commands[item] != 'catch':\n# str_commands += item + \\\n# '=' + \\\n# str('{0:.2f}'.format((float(dict_commands[item]) + 1) / 2)) + \\\n# '&'\n# else:\n# str_commands += item + \\\n# '=' + \\\n# 'catch' + \\\n# '&'\n# # # print('str_commands: {}'.format(str_commands))\n#\n# try:\n# client_socket = socket.socket() # instantiate\n# client_socket.connect((robot_host, robot_port)) # connect to the server\n# # message = 'http://192.168.4.1/?turnx=' + str(turnx) # take input\n# client_socket.send(str_commands.encode()) # encode than send message\n# #\n# client_socket.close() # close the connection\n# # # sleep(3)\n# # # time.sleep(0.02)\n# # #\n# time.sleep(0.2)\n# # # print('sent OK {} sent'.format(str_commands))\n# # send_status = 'sent ok' + str(turnx)\n# except:\n# print('ERR: command not sent {}'.format(turnx))\n# # send_status += 'error sending turnx' + str(turnx)\n#\n#\n# class RoboJoystickApp(App):\n# def build(self):\n# # print('BasicApp.running build()')\n# self.icon = 'robot256.png'\n# return RoboPad() # goes how ?\n\n\ndef random_sender():\n\n def generate_commands(timeout=0, cycle=0):\n send_command_data(timeout=0,\n cycle=0,\n headx=random.choice(['z', random.random()]),\n handy=random.choice(['z', random.random()]),\n turnx=random.choice(['z', random.random()]),\n runy=random.choice(['z', random.random()]),\n catch=random.choice(['z', 'catch']))\n\n def send_command_data(timeout=0, cycle=0, headx='z', handy='z', turnx='z', runy='z', catch='z'):\n robot_host = '192.168.4.1' # hardcoded robot ip t4m net\n robot_port = 80\n # robot_port = 8080\n # # print('send_command_data running')\n # self.debug_label.text = 'headx {}\\nhandy {}\\nturnx {}\\nruny {}\\ncatch {}'.format(headx,\n # handy,\n # turnx,\n # runy,\n # catch)\n\n dict_commands = {'headx': headx, 'handy': handy, 'turnx': turnx, 'runy': runy, 'catch': catch}\n # # print(dict_commands)\n\n str_commands = 'http://' + str(robot_host) + '/?'\n\n for item in dict_commands:\n # # print(item,\n # dict_commands[item],\n # type(dict_commands[item])\n # )\n # if dict_commands[item] !='z':\n # str_commands += item +\\\n # '=' + \\\n # dict_commands[item] + \\\n # '&'\n\n # add normalization\n if dict_commands[item] != 'z':\n if dict_commands[item] != 'catch':\n str_commands += item + \\\n '=' + \\\n str('{0:.2f}'.format((float(dict_commands[item]) + 1) / 2)) + \\\n '&'\n else:\n str_commands += item + \\\n '=' + \\\n 'catch' + \\\n '&'\n # # print('str_commands: {}'.format(str_commands))\n\n try:\n client_socket = socket.socket() # instantiate\n client_socket.connect((robot_host, robot_port)) # connect to the server\n # message = 'http://192.168.4.1/?turnx=' + str(turnx) # take input\n client_socket.send(str_commands.encode()) # encode than send message\n #\n client_socket.close() # close the connection\n # # sleep(3)\n # # time.sleep(0.02)\n # #\n # time.sleep(0.2)\n # # print('sent OK {} sent'.format(str_commands))\n # send_status = 'sent ok' + str(turnx)\n except:\n pass\n print('ERR: cycle: {},'\n ' timeout:{},'\n ' command len: {},'\n ' not sent: {}'.format(timeout,\n cycle,\n len(str_commands.encode()),\n str_commands.encode()))\n # send_status += 'error sending turnx' + str(turnx)\n\n def cycle_timeouted():\n # initial timeout 0.5 s: 0.2 0.2 0.2\n # ini_timeout = 50 20 50 100\n # step_timeout = 100 100 250 500\n\n ini_timeout = 100\n step_timeout = 500\n\n count = 1\n\n for timeout in range(ini_timeout, 1, -1):\n print('DBG: current timeout: {}, count: {}'.format(timeout / step_timeout, count))\n for cycle in range(0, 10):\n # print('DBG: current timeout: {}, cycle: {}'.format(timeout / step_timeout, cycle))\n generate_commands(timeout, cycle)\n time.sleep(timeout / step_timeout)\n count += 1\n\n cycle_timeouted()\n\n\nif __name__ == '__main__':\n print('running __main__()')\n random_sender()\n print('quiting __main__()')\n\n","sub_path":"async-examples-from-docs/uasyncio/uasyncio-01/tester-sender.py","file_name":"tester-sender.py","file_ext":"py","file_size_in_byte":11893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"44137095","text":"#!/usr/bin/env python3\n\"\"\"\nTest development server.\n\"\"\"\n\nimport socket\nimport ssl\nimport testlib\nimport traceback\nimport sys\n\nbindsocket = socket.socket()\nbindsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nbindsocket.bind((\"\", 8675))\nbindsocket.listen(5)\n\nwhile True:\n\n print(\"Waiting for a client...\")\n new_socket, from_addr = bindsocket.accept()\n print(\"Accepted a connection from %s\" % str(from_addr))\n\n connection = ssl.wrap_socket(\n new_socket,\n server_side=True,\n certfile=\"server_cert.pem\",\n keyfile=\"server_key.pem\",\n ca_certs=\"client_cert.pem\",\n cert_reqs=ssl.CERT_REQUIRED,\n )\n\n in_line = \"start\"\n\n t = testlib.Transport(connection)\n\n try:\n while in_line:\n in_line = input(\"control> \")\n if in_line:\n args = in_line.split()\n\n if len(args) > 1:\n t.write_msg(testlib.Request(args[0], args[1:]))\n else:\n t.write_msg(testlib.Request(args[0]))\n\n resp = t.read_msg()\n print(resp)\n except KeyboardInterrupt:\n bindsocket.shutdown(socket.SHUT_RDWR)\n bindsocket.close()\n sys.exit(1)\n except EOFError:\n pass\n except Exception:\n traceback.print_exc(file=sys.stdout)\n finally:\n connection.close()\n","sub_path":"testing/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"478162271","text":"a = int(input())\nprost = True\nfor i in range(a//2, 1, -1) :\n print(\"{} % {} = {}\".format(a, i, a % i))\n if a % i == 0 :\n prost = False \n break\nif prost == False :\n print(\"Broj nije prost\")\nelse :\n print(\"Broj je prost\")","sub_path":"Zadatci/zadatak15.py","file_name":"zadatak15.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"488282539","text":"# USAGE\n# python train.py --checkpoints output/checkpoints\n# python train.py --checkpoints output/checkpoints --model output/checkpoints/epoch_25.hdf5 --start-epoch 25\n\n# set the matplotlib backend so figures can be saved in the background\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\n# import the necessary packages\n#from config import tiny_imagenet_config as config\n#from pyimagesearch.preprocessing import ImageToArrayPreprocessor\n#from pyimagesearch.preprocessing import SimplePreprocessor\n#from pyimagesearch.preprocessing import MeanPreprocessor\nfrom pyimagesearch.callbacks import EpochCheckpoint\nfrom pyimagesearch.callbacks import TrainingMonitor\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.image import img_to_array\nfrom keras.utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\nimport matplotlib.pyplot as plt\nfrom imutils import paths\nimport cv2\nimport os\nimport random\n#from pyimagesearch.io import HDF5DatasetGenerator\nfrom pyimagesearch.resnet import ResNet\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nfrom keras.models import load_model\nimport keras.backend as K\nimport argparse\nimport json\nimport numpy as np\nimport sys\n\n# set a high recursion limit so Theano doesn't complain\nsys.setrecursionlimit(5000)\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,\n help=\"path to input dataset\")\nap.add_argument(\"-p\", \"--plot\", required=True,\n help=\"path to the output loss/accuracy plot\")\nap.add_argument(\"-c\", \"--checkpoints\", required=True,\n\thelp=\"path to output checkpoint directory\")\nap.add_argument(\"-ml\", \"--model_load\", type=str,\n\thelp=\"path to *specific* model checkpoint to load\")\nap.add_argument(\"-m\", \"--model\", type=str, required=True,\n help=\"path to model\")\nap.add_argument(\"-o\", \"--output\", required=True,\n help=\"path to output directory (logs, plots, etc.)\")\nap.add_argument(\"-w\", \"--weights\", required=True,\n help=\"path to output directory (logs, plots, etc.)\")\nap.add_argument(\"-s\", \"--start-epoch\", type=int, default=0,\n\thelp=\"epoch to restart training at\")\nargs = vars(ap.parse_args())\n\n# initialize the data and labels\nprint(\"[INFO] loading images...\")\ndata = []\nlabels = []\n\n# grab the image paths and randomly shuffle them\nimagePaths = sorted(list(paths.list_images(args[\"dataset\"])))\nrandom.seed(42)\nrandom.shuffle(imagePaths)\n\n# loop over the input images\nfor imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (64, 64))\n image = img_to_array(image)\n data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n label = 1 if label == \"Positive\" else 0\n labels.append(label)\n\n# scale the raw pixel intensities to the range [0, 1]\ndata = np.array(data, dtype=\"float\") / 255.0\nlabels = np.array(labels)\n\n# partition the data into training and testing splits using 75% of\n# the data for training and the remaining 25% for testing\n(trainX, testX, trainY, testY) = train_test_split(data,\n labels, test_size=0.2, random_state=42)\n\n# convert the labels from integers to vectors\ntrainY = to_categorical(trainY, num_classes=2)\ntestY = to_categorical(testY, num_classes=2)\n\n# apply mean subtraction to the data\nmean = np.mean(trainX, axis=0)\ntrainX -= mean\ntestX -= mean\n\n# construct the training image generator for data augmentation\naug = ImageDataGenerator(rotation_range=18, zoom_range=0.15,\n\twidth_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,\n\thorizontal_flip=True, fill_mode=\"nearest\")\n\n# if there is no specific model checkpoint supplied, then initialize\n# the network and compile the model\nif args[\"model_load\"] is None:\n\tprint(\"[INFO] compiling model...\")\n\tmodel = ResNet.build(64, 64, 3, 2, (3, 4, 6),\n\t\t(64, 128, 256, 512), reg=0.0005, dataset=\"tiny_imagenet\")\n\topt = SGD(lr=1e-1, momentum=0.9)\n\tmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,\n\t\tmetrics=[\"accuracy\"])\n\n# otherwise, load the checkpoint from disk\nelse:\n\tprint(\"[INFO] loading {}...\".format(args[\"model\"]))\n\tmodel = load_model(args[\"model\"])\n\n\t# update the learning rate\n\tprint(\"[INFO] old learning rate: {}\".format(\n\t\tK.get_value(model.optimizer.lr)))\n\tK.set_value(model.optimizer.lr, 1e-3)\n\tprint(\"[INFO] new learning rate: {}\".format(\n\t\tK.get_value(model.optimizer.lr)))\n\n# construct the set of callbacks\nfigPath = os.path.sep.join([args[\"output\"], \"{}.png\".format(\n os.getpid())])\njsonPath = os.path.sep.join([args[\"output\"], \"{}.json\".format(\n os.getpid())])\nfname = os.path.sep.join([args[\"weights\"],\n \"weights-{epoch:03d}-{val_acc:.4f}.hdf5\"])\ncheckpoint = ModelCheckpoint(fname, monitor=\"val_acc\", mode=\"max\",\n save_best_only=True, verbose=1)\ncallbacks = [\n\tEpochCheckpoint(args[\"checkpoints\"], every=5,\n\t\tstartAt=args[\"start_epoch\"]),\n\tTrainingMonitor(figPath, jsonPath,\n\t\tstartAt=args[\"start_epoch\"]),checkpoint]\n\n# train the network\nH = model.fit_generator(\n\taug.flow(trainX,trainY,batch_size=128),#trainGen.generator(),\n steps_per_epoch= len(trainX) // 128, #trainGen.numImages // 64,\n validation_data=(testX,testY), #valGen.generator(),\n validation_steps= len(testX) // 128, #valGen.numImages // 64,\n\tepochs=25,\n\tmax_queue_size=10,\n\tcallbacks=callbacks, verbose=1)\n\n# save the network to disk\nprint(\"[INFO] serializing network...\")\nmodel.save(args[\"model\"])\n\n# plot the training loss and accuracy\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, 25), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, 25), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, 25), H.history[\"acc\"], label=\"train_acc\")\nplt.plot(np.arange(0, 25), H.history[\"val_acc\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy on CIFAR-10\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.savefig(args[\"plot\"])\n \n","sub_path":"yelp_photos/train_resnet.py","file_name":"train_resnet.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"356524936","text":"import business_logic\nfrom flask import Flask, jsonify, abort, request\nfrom diffuculty_level import DiffucultyLevel\n\napp = Flask(__name__)\n#app.config[\"DEBUG\"] = True\n\n\n@app.route('/get_image', methods=['GET'])\ndef init_game():\n #difficulty_level = DiffucultyLevel.EASY\n difficulty = int(request.args.get('difficulty'))\n\n b_logic = business_logic.BusinessLogic()\n success = b_logic.init_database()\n if (not success): abort(500)\n \n image_to_guess = b_logic.init_game(difficulty)\n return image_to_guess\n\n@app.route('/check_answer', methods=['POST'])\ndef check_answer():\n answer = request.get_json()\n\n b_logic = business_logic.BusinessLogic()\n isCorrect = b_logic.check_answer(answer['answer'])\n return jsonify(result = isCorrect)\n \napp.run()","sub_path":"src/lab5/back_end/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"422876183","text":"# Copyright (c) 2019. yoshida-lab. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nfrom scipy.stats import boxcox\nfrom sklearn.base import BaseEstimator\nfrom sklearn.preprocessing import minmax_scale\n\n\nclass DescriptorHeatmap(BaseEstimator):\n \"\"\"\n Heatmap.\n \"\"\"\n\n def __init__(self,\n save=None,\n bc=False,\n pivot_kws=None,\n method='average',\n metric='euclidean',\n figsize=None,\n row_cluster=False,\n col_cluster=True,\n row_linkage=None,\n col_linkage=None,\n row_colors=None,\n col_colors=None,\n mask=None,\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n save\n bc\n pivot_kws\n method\n metric\n figsize\n row_cluster\n col_cluster\n row_linkage\n col_linkage\n row_colors\n col_colors\n mask\n kwargs\n \"\"\"\n self.save = save\n self.bc = bc\n self.pivot_kws = pivot_kws\n self.method = method\n self.metric = metric\n self.figsize = figsize\n self.col_cluster = col_cluster\n self.row_linkage = row_linkage\n self.row_cluster = row_cluster\n self.col_linkage = col_linkage\n self.row_colors = row_colors\n self.col_colors = col_colors\n self.mask = mask\n self.kwargs = kwargs\n self.desc = None\n\n def _transform(self, series):\n series_ = series\n if series.min() != series.max():\n if self.bc:\n with np.errstate(all='raise'):\n shift = 1e-10\n tmp = series - series.min() + shift\n try:\n series_, _ = boxcox(tmp)\n except FloatingPointError:\n series_ = series\n series_ = minmax_scale(series_)\n return series_\n\n def fit(self, desc):\n desc_ = desc.apply(self._transform)\n self.desc = pd.DataFrame(desc_, index=desc.index, columns=desc.columns)\n return self\n\n def draw(self, y=None):\n ax = sb.clustermap(\n self.desc,\n cmap=\"RdBu\",\n method=self.method,\n figsize=self.figsize,\n row_cluster=self.row_cluster,\n col_cluster=self.col_cluster,\n **self.kwargs)\n ax.cax.set_visible(False)\n ax.ax_heatmap.yaxis.set_ticks_position('left')\n ax.ax_heatmap.yaxis.set_label_position('left')\n\n if y is None:\n ax.ax_col_dendrogram.set_position((0.1, 0.8, 0.9, 0.1))\n ax.ax_heatmap.set_position((0.1, 0.2, 0.9, 0.6))\n else:\n ax.ax_col_dendrogram.set_position((0.1, 0.8, 0.83, 0.1))\n ax.ax_heatmap.set_position((0.1, 0.2, 0.84, 0.6))\n ax = plt.axes([0.95, 0.2, 0.05, 0.6])\n x_ = y.values\n y_ = np.arange(len(x_))[::-1]\n ax.plot(x_, y_, lw=4)\n ax.get_yaxis().set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set_xlabel('{:s}'.format(y.name), fontsize='large')\n if self.save:\n plt.savefig(**self.save)\n","sub_path":"xenonpy/visualization/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"457201343","text":"from __future__ import absolute_import, print_function\n\nimport inspect\nimport argparse\n\nfrom .config import Config\nfrom .get import GetCommand\nfrom .add import AddCommand\nfrom .authorized_keys import AuthorizedKeysCommand\n\n\ndef main():\n \"\"\"\n Load settings, parse console arguments, and run the requested command.\n \"\"\"\n\n # available commands\n commands = {\n 'get-key': GetCommand,\n 'add-key': AddCommand,\n 'authorized-keys': AuthorizedKeysCommand}\n\n # load the configuration\n config = Config.autodetect()\n\n # create the argument parser\n parser = argparse.ArgumentParser()\n\n # configure common parameters\n parser.add_argument(\n '-H', '--ldap-uri',\n help=\"LDAP directory URI\")\n\n parser.add_argument(\n '-b', '--search-base',\n help=\"LDAP search base\")\n\n # create argument parsers for commands\n subparsers = parser.add_subparsers()\n for name, command in commands.items():\n # create the parser\n command_parser = subparsers.add_parser(\n name,\n help=inspect.getdoc(command))\n\n # configure it to run the command\n command_parser.set_defaults(command=command)\n command.configure_parser(command_parser)\n\n # parse arguments\n args = parser.parse_args()\n\n # update common settings\n if args.ldap_uri:\n config.uri = args.ldap_uri\n\n if args.search_base:\n config.user_base = args.search_base\n config.group_base = args.search_base\n\n # run the command\n command = args.command(config, args)\n command.run()\n","sub_path":"ssh_ldap_utils/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"610240011","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 11 18:49:10 2016\r\n\r\n@author: Humber\r\n** Read first: \r\n Befor doing anything follow the steps - \r\n Step 1:\r\n Before running the code save all the Links from step 1 in a single xlsx file \r\n named - getProdData.xlsx, in the first sheet and remove the blank rows.\r\n Step 2:\r\n The output will be saved inside the success.txt file automatically.\r\n Step 3:\r\n The process will stop cause of server block or by excaption. Follow the steps - \r\n a. Make a copy of success.txt file.\r\n b. Copy all the data from the file to its copy.\r\n c. The right side Console shows the last process a success or fail so you will\r\n last supc number from thoes files where the img link will have that supc number. \r\n Copy it.\r\n d. Find the supc code in the getProdData.xlsx file and delete that row and the\r\n above rows.\r\n e. Before running close the right side Console and reopen it from the menu as \r\n \"Consoles -> Open an IPython console\". Then rerun thee process again. \r\n New links will be scrapped inside the success or fail text files.\r\n f. repeat from step b to e.\r\n\"\"\"\r\nimport xlrd\r\nimport os.path\r\nfrom bs4 import BeautifulSoup\r\nimport urllib.request as req\r\nimport contextlib\r\nimport re\r\n\r\n\r\ncrawlProdData = r'Input_3crawlProdData.xlsx'\r\nallData = r\"Output_3AllData.txt\"\r\nimgList = []\r\nbreadcrumData = ''\r\n\r\n\"\"\"Delete temp output file\"\"\"\r\ntry:\r\n with open(os.path.join(os.path.dirname(__file__), allData)) as existing_file:\r\n existing_file.close()\r\n os.remove(os.path.join(os.path.dirname(__file__), allData))\r\nexcept Exception as e:\r\n print(\"Delete Output_3AllData.txt file exception- \" + str(e))\r\n\r\ndef save_to_file(supcCode,productUrl,breadCrum,img,title,uli,matter):\r\n try: \r\n with open(os.path.join(os.path.dirname(__file__), allData), \"a\") as myfile:\r\n myfile.write(str(supcCode) +','+ str(productUrl) +','+ str(breadCrum) +','+ str(img) +','+ title +','+ uli +','+ matter +'\\n')\r\n print(\"step 3 - success\")\r\n except Exception as e:\r\n print(\"save_to_file method exception- \" + str(e))\r\n \r\ndef get_Prod_Data(data):\r\n try:\r\n# productUrl = r\"http://www.snapdeal.com/product/total-gym-home-gym-adjustable/683563847221#bcrumbSearch:SDL717687292\"\r\n# productUrl = r\"http://www.snapdeal.com/product/jaspo-kids-delite-pro-junior/662812944654#bcrumbSearch:SDL218198354\"\r\n \r\n for productUrl in data: \r\n productUrl = str(productUrl)\r\n\r\n productUrl = productUrl.replace(\"'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\")\r\n \r\n \r\n \r\n \"\"\"Here we are knowcking the door of snapdeal server as a human agent and not python\"\"\"\r\n headers = {}\r\n headers['User-Agent'] = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\r\n reqest = req.Request(productUrl, headers = headers) \r\n \r\n with contextlib.closing(req.urlopen(reqest)) as url: \r\n content = url.read().decode('UTF-8') \r\n soup = BeautifulSoup(content,\"lxml\")\r\n \r\n \"\"\"Get Bread Crums / Category\"\"\"\r\n breadCrumsss = soup.find('div',attrs={\"id\":\"breadCrumbWrapper2\"})\r\n breadCrumsss.findAll('span')\r\n breadCrums = breadCrumsss.text\r\n breadCrum = ' '.join(breadCrums.split()).replace(',','--').replace('Sports & Fitness Fitness Fitness Equipment','Sports & Fitness > Fitness > Fitness Equipment >')\r\n \r\n \r\n \"\"\"Get image link\"\"\" \r\n \r\n imgsss = soup.find('div',attrs={\"id\":\"bx-pager-left-image-panel\"}) \r\n \r\n imgss = imgsss.findAll('img')\r\n \r\n for imgs in imgss: \r\n imgList.append(str(imgs))\r\n \r\n rr = '||'.join(imgList)\r\n \r\n # img = re.findall('http://n1.sdlcdn.com/imgs\\/[a-z]\\/[0-9]\\/[a-z]\\/[\\-a-zA-Z0-9]+.jpg', rr)\r\n \r\n \r\n \r\n \"\"\"Get product title\"\"\" \r\n titless = soup.find('h1',attrs={\"class\":\"pdp-e-i-head\"}) \r\n titles = titless.text\r\n title = titles.replace(',','--')\r\n \r\n \"\"\"Get product Highlights\"\"\" \r\n div = soup.find('div',attrs={\"class\":\"spec-body\"}) \r\n uls = div.find('ul',attrs={\"class\":\"dtls-list clear\"}) \r\n ul = uls.text\r\n uli = ' '.join(ul.split()).replace(',','--') \r\n \r\n \"\"\"Get SUPC Code\"\"\" \r\n supcCode = uli.split(\"SUPC: \",1)[1]\r\n \r\n \"\"\"Get product Details\"\"\" \r\n div1 = soup.find('div',attrs={\"class\":\"detailssubbox\"}) \r\n matters = div1.text\r\n matter = ' '.join(matters.split()).replace(',','--')\r\n \r\n save_to_file(supcCode,productUrl,breadCrum,rr,title,uli,matter)\r\n \r\n imgList.clear()\r\n \r\n except Exception as e:\r\n print(\"get_Prod_Data method exception- \" + str(e))\r\n\r\n\"\"\"Start of the Program\"\"\"\r\nbook = xlrd.open_workbook(os.path.join(os.path.dirname(__file__), crawlProdData))\r\nsheet = book.sheet_by_index(0) #or by the index it has in excel's sheet collection\r\n \r\ndata = [] #make a data store\r\nfor i in range(sheet.nrows):\r\n data.append(sheet.row_values(i))\r\n \r\nget_Prod_Data(data)\r\n\r\n","sub_path":"Python_Work/Python_Snapdeal/Step 3/Setp_3CrawlProdData - ver 2.0.py","file_name":"Setp_3CrawlProdData - ver 2.0.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"508962110","text":"import torch\nimport torchvision as tv\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision.utils import make_grid\n\nfrom utils.storage import save_trained, load_trained\nfrom utils.device import setup_device\nimport utils.tensorboard as tensorboard\n\nfrom utils.dataset import DifferentTargetSceneDataset\nfrom torch.utils.data import DataLoader\n\nfrom models.anOtherSwapNetSmaller import SwapModel\nfrom models.patchGAN import NLayerDiscriminator\n\n\n# Get used device\nGPU_IDS = [2]\ndevice = setup_device(GPU_IDS)\n\n# Parameters\nNAME = 'Alice'\nBATCH_SIZE = 50\nNUM_WORKERS = 8\nEPOCHS = 30\nSIZE = 256\n\n# Configure training objects\nmodelA = SwapModel().to(device)\nmodelB = NLayerDiscriminator.to(device)\noptimizerA = torch.optim.Adam(modelA.parameters(), weight_decay=0)\noptimizerB = torch.optim.Adam(modelB.parameters(), weight_decay=0)\n\n# Losses\ndistance = nn.MSELoss().to(device)\n\n# Configure dataloader\ntrain_dataset = DifferentTargetSceneDataset(locations=['scene_abandonned_city_54'],\n transform=transforms.Resize(SIZE))\ntrain_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)\n\nDATASET_SIZE = len(train_dataset)\nprint(f'Dataset contains {DATASET_SIZE} samples.')\nprint(f'Running with batch size: {BATCH_SIZE} for {EPOCHS} epochs.')\n\n# Configure tensorboard\nwriter = tensorboard.setup_summary_writer(NAME)\ntensorboard_process = tensorboard.start_tensorboard_process()\nSHOWN_SAMPLES = 3\nVISUALIZATION_FREQ = DATASET_SIZE // BATCH_SIZE // 50 # every how many batches tensorboard is updated with new images\nprint(f'{SHOWN_SAMPLES} samples will be visualized every {VISUALIZATION_FREQ} batches.')\n\n# Train loop\nfor epoch in range(1, EPOCHS+1):\n train_loss, train_score = 0., 0.\n print(f'Epoch {epoch}:')\n \n step = 0\n sub_train_loss, sub_train_score = 0., 0.\n \n for batch_idx, batch in enumerate(train_dataloader):\n \n \n \n input_image = batch[0][0]['image'].to(device)\n target_image = batch[0][1]['image'].to(device)\n groundtruth_image = batch[1]['image'].to(device)\n input_color = batch[0][0]['color'].to(device)\n target_color = batch[0][1]['color'].to(device)\n input_direction = batch[0][0]['direction'].to(device)\n target_direction = batch[0][1]['direction'].to(device)\n \n # Forward\n modelA.train() \n modelB.train()\n \n output = model(input_image, target_image, groundtruth_image, encode_pred = True)\n \n groundtruth_scene_latent, input_scene_latent, target_scene_latent, relighted_scene_latent, \\\n groundtruth_light_latent, input_light_latent, target_light_latent, relighted_light_latent, \\\n groundtruth_light_predic, input_light_predic, target_light_predic, \\\n relighted_image, relighted_image2 = output \n \n loss = 1. * distance(relighted_image, groundtruth_image) \n \n\n # Backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n train_score += distance(input_image, groundtruth_image).item() / loss.item()\n sub_train_loss += loss.item()\n sub_train_score += distance(input_image, groundtruth_image).item() / loss.item()\n \n # Visualize current progress\n if (1+batch_idx) % VISUALIZATION_FREQ == 0:\n writer.add_image('Visualization/1-Input', make_grid(input_image[:SHOWN_SAMPLES]), step)\n writer.add_image('Visualization/2-Target', make_grid(target_image[:SHOWN_SAMPLES]), step)\n writer.add_image('Visualization/3-Ground-truth', make_grid(groundtruth_image[:SHOWN_SAMPLES]), step)\n writer.add_image('Visualization/4-Relighted', make_grid(relighted_image[:SHOWN_SAMPLES]), step)\n writer.add_image('Visualization/5-Relighted2', make_grid(relighted_image2[:SHOWN_SAMPLES]), step)\n\n writer.add_image('Light-latent/1-Input', make_grid(input_light_latent[:SHOWN_SAMPLES]), step)\n writer.add_image('Light-latent/2-Target', make_grid(target_light_latent[:SHOWN_SAMPLES]), step)\n writer.add_image('Light-latent/3-Ground-truth', make_grid(groundtruth_light_latent[:SHOWN_SAMPLES]), step)\n writer.add_image('Light-latent/4-Relighted', make_grid(relighted_light_latent[:SHOWN_SAMPLES]), step)\n \n step += 1\n writer.add_scalar('Sub-loss/1-Loss', sub_train_loss, step)\n writer.add_scalar('Sub-score/1-Score', sub_train_score, step)\n print ('Sub-loss/1-Loss:', sub_train_loss, 'Sub-score/1-Score:', sub_train_score)\n sub_train_loss = 0.\n sub_train_score = 0.\n \n \n # Evaluate\n model.eval()\n # TODO: Add test set evaluation here\n\n # Update tensorboard training losses\n writer.add_scalar('Loss/1-Loss', train_loss, epoch)\n writer.add_scalar('Score/1-Score', train_loss, epoch)\n\n# Store trained model\nstorage.save_trained(model, NAME)\n\n# Terminate tensorboard\ntensorboard.stop_tensorboard_process(tensorboard_process)\n","sub_path":"misc/oldExperiments/train_SmallSwapModelWithGAN.py","file_name":"train_SmallSwapModelWithGAN.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"357422311","text":"#!/usr/bin/env python\n#coding:utf-8\n#Author:liangpeili\n\nimport os\nimport MySQLdb\nimport time\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n#打开结果文件\ncur_time = time.strftime(\"%Y%m%d%H%M%S\")\nresult_file = 'data' + cur_time + '.txt'\nf = open(result_file, 'w')\n\n\n\n# 连接数据库,获取查询结果\nquery_sql = 'select * from soufang limit 2'\nconn = MySQLdb.connect(host='localhost', user='root', passwd='123456', db='house_buyer', charset='utf8')\ncur = conn.cursor()\ncur.execute(query_sql)\nresults = cur.fetchall()\n\n# 写入表头到文件\ndesc = cur.description\nlength = len(results[0])\ndesc_line = ''\nfor i in range(length):\n desc_line = desc_line + str(desc[i][0]) + '\\t'\nf.write(desc_line+'\\n')\n\n# 写入查询结果到文件\nfor result in results:\n line = ''\n for i in range(length):\n line = line + str(result[i]) + '\\t'\n f.write(line+'\\n')\n\nf.close()\ncur.close()\nconn.close()\n\n#添加到压缩文件\nos.system('tar -zcvf %s.tgz %s' % ('data'+cur_time, result_file))\n","sub_path":"query_db.py","file_name":"query_db.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"616560453","text":"# Importing all necessary libraries\nimport pandas as pd\nimport numpy as np\nfrom keras.preprocessing import image\nfrom training_dataset_creator import get_dataset\nimport tensorflow as tf\n\nfrom model_definition import model_definition\n\n\ndir = 'training_dataset\\\\'\ntotal_images = get_dataset(dir)\n\ndata = list()\nfor x in total_images:\n ent = [x, x.split('_')[0]]\n data.append(ent)\n\ntrain = pd.DataFrame(data, columns=['image', 'color'])\n\ntrain = pd.concat([train.drop('color', axis=1),\n pd.get_dummies(train['color'],\n prefix='color',\n prefix_sep='_',\n dummy_na=False)],\n axis=1)\n\n\ntrain_image = []\nfor i in range(len(total_images)):\n img = image.load_img(dir + train['image'][i])\n img = image.img_to_array(img)\n img = img / 255\n train_image.append(img)\n\nX = np.array(train_image)\ny = np.array(train[['color_red', 'color_green', 'color_blue']])\n\nmodel_path = 'model.h5'\n\nmodel = model_definition(X, y)\nmodel.save(model_path)\nprint('Model saved to:', model_path)\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"465825236","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\n\nfrom fmri import process_fmri as process\n\n\ndef encode(C, X, H, Gp, Gn):\n \"\"\"\n encodes\n :param C: data labels\n :param X: data to be windowed\n :param H: window size\n :param Gp: start point guard\n :param Gn: end point guard\n :return:\n \"\"\"\n _, m, _ = C.shape\n Np, p, T = X.shape\n N = T - H + 1\n num_examples = Np * N\n \n y = np.zeros([Np, N])\n C_temp = np.zeros(T)\n \n for i in range(Np):\n for j in range(m):\n temp_idx = [idx for idx, e in enumerate(C[i, j, :]) if e == 1]\n cue_idx1 = [idx - Gn for idx in temp_idx]\n cue_idx2 = [idx + Gp for idx in temp_idx]\n cue_idx = list(zip(cue_idx1, cue_idx2))\n \n for idx in cue_idx:\n C_temp[slice(*idx)] = j + 1\n \n y[i, :] = C_temp[0 : N]\n \n X_windowed = np.zeros([Np, N, p, H])\n \n for t in range(N):\n X_windowed[:, t, :, :] = X[:, :, t : t + H]\n \n y = np.reshape(y, (num_examples))\n X_windowed = np.reshape(X_windowed, (num_examples, p, H))\n \n return [X_windowed, y]\n\n\ndef craft(X):\n \"\"\"\n feature crafting for signal\n :param X: fmri signal data\n :return: crafted features\n \"\"\"\n num_examples, p, T = X.shape\n X_f = np.empty(shape=(num_examples, p, 0))\n\n # energy\n X_e = np.sum(np.power(X, 2), axis=2)\n X_e = np.reshape(X_e, (num_examples, p, 1))\n\n # min\n X_min = np.min(X, axis=2)\n X_min = np.reshape(X_min, (num_examples, p, 1))\n\n # max\n X_max = np.max(X, axis=2)\n X_max = np.reshape(X_max, (num_examples, p, 1))\n\n # avg\n X_avg = np.mean(X, axis=2)\n X_avg = np.reshape(X_avg, (num_examples, p, 1))\n\n # standard deviation\n X_std = np.std(X, axis=2)\n X_std = np.reshape(X_std, (num_examples, p, 1))\n\n # fft\n X_ft = np.fft.fft(X, axis=2)\n X_ft = np.abs(X_ft)\n\n # differences\n X_fd = np.diff(X, axis=2)\n X_sd = np.diff(X_fd, axis=2) \n\n # zero crossings\n X_sgn = np.sign(X) > 0\n X_sgn_d = np.abs(np.diff(X_sgn, axis=2))\n X_num_zc = np.sum(X_sgn_d, axis=2)\n X_num_zc = np.reshape(X_num_zc, (num_examples, p, 1))\n\n features = [X, X_e, X_min, X_max, X_avg, X_std, X_ft, X_fd, X_sd, X_num_zc]\n for ft in features:\n X_f = np.append(X_f, ft, axis=2)\n \n _, _, d = X_f.shape\n \n X_f = np.reshape(X_f, (num_examples, p * d))\n \n return X_f\n\n\ndef apply_pca(X, n):\n scaler = preprocessing.StandardScaler().fit(X)\n X_std = scaler.transform(X) \n \n pca = PCA(n_components=n)\n pca.fit(X_std)\n score = pca.explained_variance_ratio_\n score = sum(score)\n\n X_pca = pca.transform(X_std)\n \n return [X_pca, score]\n\n\ndef try_model(X, y, model):\n \"\"\"\n run a ML model with the data and labels\n :param X: data\n :param y: labels\n :param model: desired ML model\n :return: resulting confusion matrix\n \"\"\"\n seed = 21\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=seed)\n model.fit(X_train, y_train)\n y_hat = model.predict(X_test)\n \n stimuli = ['none', 'LF', 'LH', 'RF', 'RH', 'T']\n\n C_mat = confusion_matrix(y_test, y_hat) \n C = pd.DataFrame(C_mat, index=stimuli, columns=stimuli)\n \n print(C)\n print()\n \n return C_mat\n\n\ndef main():\n \"\"\"\n example data processing and training pipeline\n :return:\n \"\"\"\n H, Gp, Gn = 15, 4, 4\n num_components = 300\n seed = 21\n\n C, _, X_bar = process.get_dataset('./all_subjects/', p=148, session='MOTOR_LR')\n \n X, y = encode(C, X_bar, H, Gp, Gn)\n X_f = craft(X)\n\n X_pca, pct_explained = apply_pca(X_f, num_components)\n\n model_lr = LogisticRegression(random_state=seed, class_weight='balanced')\n \n try_model(X_pca, y, model_lr)\n\nif __name__ == '__main__':\n main()\n","sub_path":"fmri/windowing_fmri.py","file_name":"windowing_fmri.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"90444776","text":"from string import ascii_lowercase\n\nrates = {v: i for i, v in enumerate(ascii_lowercase)}\n\ndef prize_draw(st, we, n):\n if n > len(we): return 'Not enough participants'\n if not n: 'No participants'\n names, w_nums = st.split(','), {}\n for i in range(len(we)):\n w_nums[names[i]] = we[i] * get_som(names[i])\n \n \ndef get_som(name):\n ords = sum(rates[sym.lower()] for sym in name)\n return len(name) + ords","sub_path":"Codewars/prizes_test.py","file_name":"prizes_test.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"122500804","text":"#-*- coding: utf-8 -*-\n\nimport pickle\nwith open('/home/hyeyoung/dataset/data/doc_list.txt', 'rb') as f:\n data = pickle.load(f)\n\nimport kss\nsentence_list = []\nindex = 1\nfor one_data in data:\n sentence_oneDoc_list = kss.split_sentences(one_data)\n for one_sentence in sentence_oneDoc_list:\n sentence_list.append(one_sentence)\n index += 1\n print(index)\n\nwith open('/home/hyeyoung/dataset/data/sentence_list.txt', 'wb') as f:\n pickle.dump(sentence_list, f)\n","sub_path":"105.189 - topicmodel/makeSentencelist.py","file_name":"makeSentencelist.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"68642109","text":"import numpy as np\nimport pandas as pd\nfrom alex_featureselection import total_feature_extractor\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import cross_val_score, KFold\nfrom sklearn.svm import SVC,LinearSVC\nfrom sklearn.metrics import balanced_accuracy_score, make_scorer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.feature_selection import SelectPercentile, mutual_info_classif\nfrom imblearn.pipeline import Pipeline\nfrom imblearn.ensemble import BalancedBaggingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom alex_classifier import AlexClassifier\n\nimport sklearn.ensemble as skl\nimport csv\n\n# Pipeline variables:\nshould_add_before_after = False\nshould_run_on_test = False\ntest_different_models = False\ndo_gridsearch = False\nown_model = False\nvoting_classifier = True\n\n\nprint('------ opening files -------')\nX_eeg1 = pd.read_csv('train_eeg1.csv',sep=',',index_col=0)\nX_eeg1 = np.asarray(X_eeg1)\n\n\nX_eeg2 = pd.read_csv('train_eeg2.csv',sep=',',index_col=0)\nX_emg = np.asarray(X_eeg2)\n\nX_emg = pd.read_csv('train_emg.csv',sep=',',index_col=0)\nX_emg = np.asarray(X_emg)\n\n\n#%%\nY = pd.read_csv('train_labels.csv',sep=',',index_col=0)\nY = np.asarray(Y)\nY = np.ravel(Y)\n\nX_test_eeg1 = pd.read_csv('test_eeg1.csv',sep=',',index_col=0)\nX_test_eeg1 = np.asarray(X_test_eeg1)\n\nX_test_eeg2 = pd.read_csv('test_eeg2.csv',sep=',',index_col=0)\nX_test_eeg2 = np.asarray(X_test_eeg2)\n\nX_test_emg = pd.read_csv('test_emg.csv',sep=',',index_col=0)\nX_test_emg = np.asarray(X_test_emg)\n\n\nprint('------ preprocessing files ------')\nX = total_feature_extractor(X_eeg1,X_eeg2, X_emg)\nX_test = total_feature_extractor(X_test_eeg1,X_test_eeg2, X_test_emg)\n\nif should_add_before_after:\n X1 = np.zeros(X.shape)\n X1[0, :] = 3 * X[0, :]\n X1[-1, :] = 3 * X[-1, :]\n for i in range(1, len(X) - 1):\n X1[i, :] = X[i - 1, :] + X[i, :] + X[i + 1, :]\n X = X1\n\n X1 = np.zeros(X_test.shape)\n X1[0, :] = 3 * X_test[0, :]\n X1[-1, :] = 3 * X_test[-1, :]\n for i in range(1, len(X_test) - 1):\n X1[i, :] = X_test[i - 1, :] + X_test[i, :] + X_test[i + 1, :]\n X_test = X1\n\nprint('----- Training models ------')\n\nif voting_classifier:\n model1 = LinearSVC(class_weight='balanced', max_iter=3000, dual=False)\n model2 = SVC(class_weight='balanced')\n model3 = BalancedBaggingClassifier(base_estimator=model2, n_estimators=100)\n model = skl.VotingClassifier([('LinearSVC', model1), ('SVC', model2), ('balancedbagging', model3)])\n model = Pipeline([('standardizer', preprocessing.StandardScaler()),\n ('model', model)\n ])\n scorer = make_scorer(balanced_accuracy_score)\n cv = KFold(n_splits=3, shuffle=False)\n score = cross_val_score(model, X, Y, cv=cv, scoring=scorer)\n print('voting classifier: {}'.format(score))\n\nif own_model:\n model = AlexClassifier(2)\n cv_score = model.crossvalidate(X,Y)\n print('own model: {}'.format(cv_score))\n\n\nif test_different_models:\n model1 = LinearSVC(class_weight='balanced',max_iter=3000,dual=False)\n model2 = SVC(class_weight='balanced')\n model3 = LogisticRegression(class_weight='balanced',solver='lbfgs',multi_class='auto')\n model4 = RandomForestClassifier(n_estimators=100, class_weight='balanced')\n model5 = BalancedBaggingClassifier(base_estimator = model2, n_estimators=100)\n model_list = [model1,model2,model3,model4,model5]\n\n for idx, model in enumerate(model_list):\n selector = SelectPercentile(mutual_info_classif,90)\n model = Pipeline([('standardizer', preprocessing.StandardScaler()),\n ('MI', selector),\n ('model', model)\n ])\n scorer = make_scorer(balanced_accuracy_score)\n cv = KFold(n_splits=3,shuffle=False)\n score = cross_val_score(model, X, Y, cv=cv, scoring=scorer)\n print('balanced accuracy score model {}: {}'.format(idx+1, score))\n\nif do_gridsearch:\n selector = SelectPercentile(mutual_info_classif, 90)\n model_svc = SVC(class_weight='balanced')\n model = BalancedBaggingClassifier(base_estimator=model_svc, n_estimators=100)\n pipeline = Pipeline([('standardizer', preprocessing.StandardScaler()),\n ('MI', selector),\n ('model', model)\n ])\n percentiles = [70, 80, 90, 100]\n # Create the random grid\n print('\\n********* Performing Grid Search')\n random_grid = {'MI__percentile': percentiles}\n\n grid_search_rand = GridSearchCV(pipeline, random_grid, scoring=make_scorer(balanced_accuracy_score),\n cv=KFold(n_splits=3, shuffle=False),\n verbose=1, n_jobs=3)\n grid_search_rand.fit(X, Y)\n print(\"DONE BITCH. BEST PARAMS\")\n # print(grid_search.best_params_)\n print(grid_search_rand.best_params_)\n print(\"Results:\")\n print('Best score: {}'.format(grid_search_rand.best_score_))\n print(grid_search_rand.cv_results_)\n\nif should_run_on_test:\n selector = SelectPercentile(mutual_info_classif, 90)\n model_svc = SVC(class_weight='balanced')\n model = BalancedBaggingClassifier(base_estimator=model_svc, n_estimators=100)\n pipeline = Pipeline([('standardizer', preprocessing.StandardScaler()),\n ('model', model)\n ])\n pipeline.fit(X,Y)\n #%%\n y_pred = pipeline.predict(X_test)\n\n print('\\n********* Writing to file')\n with open('result.csv', mode='w') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(['Id', 'y'])\n for i in range(len(y_pred)):\n writer.writerow([i, y_pred[i]])\n\n\n\n\n","sub_path":"Project5/alex_pipeline2.py","file_name":"alex_pipeline2.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"580981585","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport time\nimport json\n\nSI_NONE = 0xFFFF\n\n\ndef load_speed_json(f):\n j = json.load(f)\n # Index speed indexes to names\n speed_i2s = {}\n for k, v in j['speed_model'].items():\n i = v['speed_index']\n if i != SI_NONE:\n speed_i2s[i] = k\n return j, speed_i2s\n\n\ndef gen_tiles(fnin, speed_i2s):\n for l in open(fnin):\n # lappend items pip $name $speed_index\n # puts $fp \"$type $tile $grid_x $grid_y $items\"\n parts = l.strip().split()\n tile_type, tile_name, grid_x, grid_y = parts[0:4]\n grid_x, grid_y = int(grid_x), int(grid_y)\n tuples = parts[4:]\n assert len(tuples) % 3 == 0\n pips = {}\n wires = {}\n for i in range(0, len(tuples), 3):\n ttype, name, speed_index = tuples[i:i + 3]\n name_local = name.split('/')[1]\n {\n 'pip': pips,\n 'wire': wires,\n }[ttype][name_local] = speed_i2s[int(speed_index)]\n yield (tile_type, tile_name, grid_x, grid_y, pips, wires)\n\n\ndef run(fnin, fnout, speed_json_fn, verbose=False):\n speedj, speed_i2s = load_speed_json(open(speed_json_fn, 'r'))\n\n tiles = {}\n for tile in gen_tiles(fnin, speed_i2s):\n (tile_type, tile_name, grid_x, grid_y, pips, wires) = tile\n this_dat = {'pips': pips, 'wires': wires}\n if tile_type not in tiles:\n tiles[tile_type] = this_dat\n else:\n if tiles[tile_type] != this_dat:\n print(tile_name, tile_type)\n print(this_dat)\n print(tiles[tile_type])\n assert 0\n\n j = {'tiles': tiles}\n json.dump(\n j, open(fnout, 'w'), sort_keys=True, indent=4, separators=(',', ': '))\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(description='Solve timing solution')\n parser.add_argument(\n '--speed-json',\n default='../../speed/build/speed.json',\n help='Provides speed index to name translation')\n parser.add_argument('fnin', default=None, help='input tcl output .txt')\n parser.add_argument('fnout', default=None, help='output .json')\n args = parser.parse_args()\n\n run(args.fnin, args.fnout, speed_json_fn=args.speed_json, verbose=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fuzzers/007-timing/timgrid/tile_txt2json.py","file_name":"tile_txt2json.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"24304838","text":"class PersegiPanjang:\n def __init__(self, panjang, lebar):\n self.panjang = panjang\n self.lebar = lebar\n self.hitung_luas()\n self.hitung_keliling()\n def hitung_luas(self):\n self.luas = self.panjang * self.lebar\n def hitung_keliling(self):\n self.keliling = 2 * (self.panjang + self.lebar)\n def ubah_panjang(self, nilai):\n self.panjang = nilai\n self.hitung_luas()\n self.hitung_keliling()\n def ubah_lebar(self, nilai):\n self.lebar = nilai\n self.hitung_luas()\n self.hitung_keliling()\n\npp = PersegiPanjang(10, 7)\npp.ubah_panjang(9)\nprint(\n pp.panjang,\n pp.lebar,\n pp.luas,\n pp.keliling\n)\n","sub_path":"code/atribut_turunan.py","file_name":"atribut_turunan.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"99448768","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9) # 무한을 의미하는 값으로 10억을 설정\n\n# 노드, 간선의 개수 입력 받기\nn, m = map(int, input().split())\n\n# 시작노드 입력 받기\nstart = int(input())\n\n# 각 노��에 연결되어 있는 노드에 대한 정보를 담는 리스트 만들기\ngraph = [[] for _ in range(n+1)]\n\n#최단 거리 테이블을 모두 무한으로 초기화\ndistance = [INF] * (n+1)\n\n#visited = [False] * (n+1)\n\n# 모든 간선 정보 입력받기\nfor _ in range(m) :\n\ta, b, c = map(int, input().split())\n\t# a번 노드에서 b번 노드로 가는 비용이 c라는 의미\n\tgraph[a].append((b,c))\n\n#print(graph)\n\ndef dijkstra(start) :\n\th = []\n\t# 시작 노드로 가기 위한 최단 경로는 0으로 설정하여, 큐에 삽입\n\theapq.heappush(h,(0,start))\n\tdistance[start] = 0\n\n\t# 큐가 비어있지 않다면\n\twhile h :\n\t\t# 가장 최단 거리가 짧은 노드에 대한 정보 꺼내기\n\t\tdist, now = heapq.heappop(h)\n\t\t \n\t\t#if not visited[now[1]] :\n\t\t#\tvisited[now[1]] = True\n\n\t\t# 현재 노드가 이미 처리된 적이 있는 노드라면 무시\n\t\tif distance[now] < dist :\n\t\t\tcontinue\n\n\t\t# 현재 노드와 연결된 다른 인접한 노드들을 확인\n\t\tfor i in graph[now] :\n\t\t\tcost = dist + i[1]\n\t\t\t# 현재 노드를 거쳐서, 다른 노드로 이동하는 거리가 더 짧은 경우\n\t\t\tif cost < distance[i[0]] :\n\t\t\t\tdistance[i[0]] = cost\n\t\t\t\theapq.heappush(h, (cost, i[0]))\n\n# 다익스트라 알고리즘을 수행\ndijkstra(start)\n\n# 모든 노드로 가기 위한 최단거리를 출력\nfor i in range(1,n+1) :\n\t# 도달할 수 없는 경우, 무한(INFINITY)이라고 출력\n\tif distance[i] == INF :\n\t\tprint('INFINITY')\n\t# 도달할 수 있는 경우, 거리를 출력\n\telse :\n\t\tprint(distance[i])\n","sub_path":"Shortest_Path/Dijkstra_v2.py","file_name":"Dijkstra_v2.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"257307424","text":"from django.shortcuts import render,redirect\nfrom django.views.decorators.http import require_http_methods, require_POST\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom .models import User\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\n\n# Create your views here.\n@require_http_methods(['GET','POST'])\ndef signup(request):\n if request.user.is_authenticated:\n return redirect('maps:index')\n \n if request.method=='POST':\n form = CustomUserCreationForm(request.POST)\n # print(request.POST)\n # print(form.is_valid())\n # print(form.error_messages)\n if form.is_valid():\n user = form.save()\n auth_login(request, user)\n return redirect('maps:index')\n else:\n form = CustomUserCreationForm()\n\n context = {\n 'form': form,\n }\n return render(request, 'accounts/signup.html', context)\n\n\n@require_http_methods(['GET','POST'])\ndef login(request):\n if request.user.is_authenticated:\n return redirect('maps:index')\n\n if request.method == 'POST':\n form = AuthenticationForm(request, request.POST)\n if form.is_valid():\n user = form.get_user()\n auth_login(request, user)\n return redirect('maps:index')\n\n else:\n form = AuthenticationForm()\n context = {\n 'form': form\n }\n return render(request, 'accounts/signup.html', context)\n\n\n@require_http_methods(['POST',])\ndef logout(request):\n auth_logout(request)\n return redirect('maps:index')\n\n\n@login_required\ndef update(request):\n # form = CustomUserChangeForm(instance = request.user)\n user = User.objects.get(username=request.user)\n if request.method == 'POST':\n form = CustomUserChangeForm(request.POST, instance=user)\n print(form.data)\n print(form.errors)\n print(form.is_valid())\n if form.is_valid():\n form.save()\n return redirect('accounts:update')\n else:\n\t form = CustomUserChangeForm(instance=user)\n\n context = {\n 'form': form,\n }\n return render(request, 'accounts/mypage.html', context)\n\n\n@login_required\ndef delete(request):\n if request.method == 'POST':\n request.user.delete()\n return redirect('maps:index')\n return redirect('accounts:mypage')","sub_path":"kindeffects/kindeffects/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"380007209","text":"\"\"\"\n209 Minimum size subarray sum\nGiven an array of n positive integers and a positive integer s, find the minimal length of a contiguous subarray of which the sum ≥ s. If there isn't one, return 0 instead.\n\nFor example, given the array [2,3,1,2,4,3] and s = 7,\nthe subarray [4,3] has the minimal length under the problem constraint.\n\nclick to show more practice.\n\nCredits:\nSpecial thanks to @Freezen for adding this problem and creating all test cases.\n\"\"\"\nclass Solution(object):\n def minSubArrayLen(self, s, nums):\n \"\"\"\n :type s: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n l = 0\n r = -1\n sum = 0\n res = len(nums) + 1\n while r + 1 < len(nums):\n while r + 1 < len(nums) and sum < s:\n r += 1\n sum += nums[r]\n\n if sum >= s:\n res = min(res, r - l + 1)\n\n while l < len(nums) and sum >= s:\n sum -= nums[l]\n l += 1\n if sum >= s:\n res = min(res, r - l + 1)\n if res == len(nums) + 1:\n return 0\n return res\n #滑动窗口问题","sub_path":"array/minimumSubArray.py","file_name":"minimumSubArray.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"274236811","text":"import setuptools\nfrom numpy.distutils.core import Extension, setup\n\nwith open(\"fodMC/version.py\", \"r\") as fh:\n version = {}\n exec(fh.read(), version)\n\nwith open(\"README.md\", \"r\") as fh:\n name=\"fodMC\", \n long_description = fh.read() \n\nsetup(\n name=\"fodMC\",\n version=version[\"__version__\"],\n author=\"Kai Trepte\",\n author_email=\"kai.trepte1987@gmail.com\",\n description=\"Fermi-orbital descriptor Monte-Carlo\",\n url=\"https://github.com/pyflosic/fodMC\",\n license='APACHE2.0',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n packages = ['fodMC/pyfodmc','fodMC/gui','fodMC/lib'],\n zip_safe=False,\n ext_modules=[Extension(name='fodmc', sources=['fodMC/lib/fodmc.f90'], f2py_options=['--quiet'])]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"256006983","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 21 11:43:36 2019\n@author: kmussar\n\"\"\"\n# Imports\nfrom TwitterSearch import TwitterSearch, TwitterSearchOrder, TwitterSearchException, TwitterUserOrder \n# import pandas as pd\nfrom collections import defaultdict\nimport time\n# import pickle\nimport json\n\n\n# Authenticate - enter your developer keys here\nts = TwitterSearch(\n consumer_key = '',\n consumer_secret = '',\n access_token = '',\n access_token_secret = ''\n )\n\n# add additional terms here \nsearch_terms = ['flu','cough','sick']\n\n# to comply with TwitterAPI request restrictions\ndef my_callback_closure(current_ts_instance): # accepts ONE argument: an instance of TwitterSearch\n queries, tweets_seen = current_ts_instance.get_statistics()\n if queries > 0 and (queries % 5) == 0: # trigger delay every 5th query\n time.sleep(60) # sleep for 60 seconds\n print(tweet_count)\n \nfor term in search_terms:\n with open('tweets_'+term +'.json', 'a+') as outfile:\n tweet_count = 0 \n while tweet_count <100000:\n try:\n tso = TwitterSearchOrder() # create a TwitterSearchOrder object\n #tso.set_since_id=194380851\n tso.set_keywords([term]) \n tso.set_language('en') # we want to see English tweets only\n tso.set_include_entities(True) # and do give us all those entity information\n\n for tweet in ts.search_tweets_iterable(tso, callback=my_callback_closure):\n tweet_count += 1\n #if tweet_count % 100:\n #print(tweet_count)\n user = defaultdict()\n place = defaultdict()\n geo = defaultdict\n coordinates = defaultdict\n tweet_unwind = defaultdict()\n # print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )\n user = tweet['user']\n place = tweet['place']\n geo = tweet['geo']\n coordinates = tweet['coordinates']\n #unpack dictionaries and save them to lists\n if user != None:\n tweet.update(user)\n if geo != None:\n tweet.update(geo)\n if place != None:\n tweet.update(place)\n if coordinates != None:\n tweet.update(coordinates)\n json.dump(tweet, outfile, sort_keys=True, indent=4)\n outfile.write(\", \\n\")\n #df_sick2 = df_sick.append(tweet, ignore_index=True)\n\n except TwitterSearchException as e:\n print(e) ","sub_path":"GetTweets_post.py","file_name":"GetTweets_post.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"327347148","text":"import select\nimport socket\nimport sys\nimport logging\nfrom Data_parser import DataParser, DataReceiver\nfrom Http_worker import HttpWorker\nfrom Https_worker import HttpsWorker\nimport traceback\n\n\nlogging.basicConfig(filename=\"logs.log\", filemode=\"w\", level=logging.INFO,\n format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\nlogger = logging.getLogger(\"log\")\n\n\ndef process_request(sock, data, dict_sockets_connection, actual_socket):\n config = DataParser.create_config(data)\n logger.info(\"Parse data and create connection\")\n if len(data) == 0 or len(config) == 0:\n sock.close()\n return\n logger.info(\"Config with data: {0}\".format(config))\n if config[\"method\"] == b'GET':\n HttpWorker.create_connection_get(sock, config)\n sock.close()\n if config[\"method\"] == b'CONNECT':\n HttpsWorker.create_connection_connect(sock, config, dict_sockets_connection, actual_socket)\n\n\ndef main():\n dict_sockets_connections = {}\n actual_socket = set()\n try:\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind((\"\", 8080))\n logger.info(\"Bind listening socket to port 8080\")\n server_socket.listen(200)\n actual_socket.add(server_socket)\n except Exception as e:\n print(e, traceback.format_exc())\n sys.exit(e)\n while True:\n try:\n act_soc, _, _ = select.select(actual_socket, [], [])\n for sock in act_soc:\n if sock is server_socket:\n client, addr = server_socket.accept()\n logger.info(\"Receive data from {0}\".format(socket.socket.getsockname(client)))\n data = DataReceiver.receive_data(client)\n process_request(client, data, dict_sockets_connections, actual_socket)\n else:\n logger.info(\"Receive data from {0}\".format(socket.socket.getsockname(sock)))\n data = DataReceiver.receive_data(sock)\n if len(data) == 0:\n continue\n else:\n logger.info(\"Send data to {0}\".format(socket.socket.getsockname(dict_sockets_connections[sock])))\n dict_sockets_connections[sock].send(data)\n except Exception as e:\n print(e)\n sys.exit(e)\n client.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"proxy/start_proxy_server.py","file_name":"start_proxy_server.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"3402074","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: chk_slv_err.py\n\n Description: Unit testing of chk_slv_err in mysql_clone.py.\n\n Usage:\n test/unit/mysql_clone/chk_slv_err.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\n\n# Local\nsys.path.append(os.getcwd())\nimport mysql_clone\nimport lib.gen_libs as gen_libs\nimport version\n\n__version__ = version.__version__\n\n\nclass Server(object):\n\n \"\"\"Class: Server\n\n Description: Class stub holder for mysql_class.Server class.\n\n Methods:\n __init__ -> Class initialization.\n get_err_stat -> get_err_stat function.\n get_name -> get_name function.\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n \"\"\"\n\n self.io_err = \"IO\"\n self.sql = \"SQL\"\n self.io_msg = \"IO_Messages\"\n self.sql_msg = \"SQL_Messages\"\n self.io_time = \"IO_Time\"\n self.sql_time = \"SQL_Time\"\n self.name = \"ServerName\"\n\n def get_err_stat(self):\n\n \"\"\"Method: get_err_stat\n\n Description: get_err_stat function.\n\n Arguments:\n\n \"\"\"\n\n return self.io_err, self.sql, self.io_msg, self.sql_msg, \\\n self.io_time, self.sql_time\n\n def get_name(self):\n\n \"\"\"Method: get_name\n\n Description: get_name function.\n\n Arguments:\n\n \"\"\"\n\n return self.name\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp -> Initialize testing environment.\n test_no_error -> Test with no errors.\n test_sql_error -> Test with SQL error.\n test_io_error -> Test with IO error.\n test_io_sql_errors -> Test with IO and SQL errors.\n test_no_slave -> Test with no slave present.\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.slave = Server()\n self.slaves = [self.slave]\n\n def test_no_error(self):\n\n \"\"\"Function: test_no_error\n\n Description: Test with no errors.\n\n Arguments:\n\n \"\"\"\n\n self.slave.io_err = None\n self.slave.sql = None\n\n self.assertFalse(mysql_clone.chk_slv_err(self.slaves))\n\n def test_sql_error(self):\n\n \"\"\"Function: test_sql_error\n\n Description: Test with SQL error.\n\n Arguments:\n\n \"\"\"\n\n self.slave.io_err = None\n\n with gen_libs.no_std_out():\n self.assertFalse(mysql_clone.chk_slv_err(self.slaves))\n\n def test_io_error(self):\n\n \"\"\"Function: test_io_error\n\n Description: Test with IO error.\n\n Arguments:\n\n \"\"\"\n\n self.slave.sql = None\n\n with gen_libs.no_std_out():\n self.assertFalse(mysql_clone.chk_slv_err(self.slaves))\n\n def test_io_sql_errors(self):\n\n \"\"\"Function: test_io_sql_errors\n\n Description: Test with IO and SQL errors.\n\n Arguments:\n\n \"\"\"\n\n with gen_libs.no_std_out():\n self.assertFalse(mysql_clone.chk_slv_err(self.slaves))\n\n def test_no_slave(self):\n\n \"\"\"Function: test_no_slave\n\n Description: Test with no slave present.\n\n Arguments:\n\n \"\"\"\n\n with gen_libs.no_std_out():\n self.assertFalse(mysql_clone.chk_slv_err([]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/unit/mysql_clone/chk_slv_err.py","file_name":"chk_slv_err.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"248329300","text":"# -*- encoding: utf-8 -*-\n\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import UserError, ValidationError\nimport time\n\nclass AsistenteReporteBanco(models.TransientModel):\n _name = 'conciliacion_bancaria.asistente_reporte_banco'\n\n def _default_cuenta(self):\n if len(self.env.context.get('active_ids', [])) > 0:\n return self.env.context.get('active_ids')[0]\n else:\n return None\n\n cuenta_bancaria_id = fields.Many2one(\"account.account\", string=\"Cuenta\", required=True, default=_default_cuenta)\n mostrar_circulacion = fields.Boolean(string=\"Mostrar documentos en circulación\")\n fecha_desde = fields.Date(string=\"Fecha Inicial\", required=True, default=lambda self: time.strftime('%Y-%m-01'))\n fecha_hasta = fields.Date(string=\"Fecha Final\", required=True, default=lambda self: time.strftime('%Y-%m-%d'))\n saldo_banco = fields.Float('Saldo banco')\n\n def print_report(self):\n active_ids = self.env.context.get('active_ids', [])\n data = {\n 'ids': active_ids,\n 'model': self.env.context.get('active_model', 'ir.ui.menu'),\n 'form': self.read()[0]\n }\n return self.env['report'].get_action([], 'conciliacion_bancaria.reporte_banco', data=data)\n","sub_path":"report/asistente_reporte_banco.py","file_name":"asistente_reporte_banco.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"446801101","text":"#Change Calculator\n# Read in a cost\n# Read in the amount given\n# Calculate the change\n# Break the change into how many nickels, dimes, quarters\n# loonies, toonies, $5s, $10s, $20s, $50s, $100s\n# If amount is below the cost, say how much more they owe.\ncost=float(input(\"How much does the item cost?\"))\namount=float (input(\"What's the payment?\"))\nchange=amount-cost\nprint (\"Your change is\", change)\nhundred=change//100\nchange=change%100\nif hundred>0:\n print (hundred, \" x $100\")\nfifty=change//50\nchange=change%50\nif fifty>0:\n print (fifty, \" x $50\")\ntwenty=change//20\nchange=change%20\nif twenty>0:\n print (twenty, \" x $20\")\nten=change//10\nchange=change%10\nif ten>0:\n print (ten, \" x $10\")\nfive=change//5\nchange=change%5\nif five>0:\n print (five, \" x $5\")\ntoonie=change//2\nchange=change%2\nif toonie>0:\n print (toonie, \" x $2\")\none=change//1\nchange=change%1\nif one>0:\n print (one, \" x $1\")\nquarter=change//0.25\nchange=change%0.25\nif quarter>0:\n print (quarter, \" x $0.25\")\ndime=change//0.10\nchange=change%0.10\nif dime>0:\n print (dime, \" x $0.10\")\nnickle=change//0.05\nchange=change%0.05\nif nickle>0:\n print (nickle, \" x $0.05\")\nif change<0.03:\n nickle=nickle + 0.05\n\n\n\n\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"284178273","text":"# 우리가 제공되는 API(Advanced Programming Interface)를 사용해야 하는 이유\nclass Person:\n\n def __init__(self, age=0, name=''):\n self._age = age\n self._name = name\n \n @property\n def age(self):\n return self._age\n @age.setter\n def age(self, age):\n # 0을 포함한 양의 정수값만 입력이 가능하도록\n try:\n age = str(age)\n if not (age[0] == '-' and age[1:].isdecimal()):\n self._age = int(age)\n else:\n print('음수는 입력할 수 없습니다.')\n except Exception as e:\n print('입력값이 부정확 합니다')\n \n장동건 = Person()\n장동건.age = 0\nprint('age: {}'.format(장동건.age))\n","sub_path":"lectures on python/파이썬 기초/week03-2.py","file_name":"week03-2.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"60339689","text":"#! /usr/bin/env python\n\"\"\"\nThis program uses matplotlib to plot the number of Sunspots per month\nstarting at January 1749.\n\nJorge Bustos\nFeb 12, 2019\n\"\"\"\nfrom __future__ import division,print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.loadtxt(\"sunspots.txt\",float)\nx = data[0:1000,0] #reads only 1000 data points from 1st column\ny = data[0:1000,1] # \" \t\t\" \t\t\" \t\t\"\t\t\" 2nd column\n\nY = np.zeros(len(y)) #creates an array full of zeros, of desired length\nr = 5\nfor k in range(len(y)):\n\tif (k < r) or (k > 1000 - r): #for the case that the indices of y go out of bounds\n\t\tY[k] = y[k]\n\telse:\n\t\tfor m in range(k-r,k+r,1):\n\t\t\tY[k] += 1/(2*r+1) * y[m]\n\t\t\t\nplt.plot(x,y)\nplt.plot(x,Y,\"r\") #plotting the running average as a red curve\nplt.title(\"Sunspots by month since Jan 1749\")\nplt.xlabel(\"month\")\nplt.ylabel(\"Sunspots\")\nplt.savefig(\"Sunspots.png\")\nplt.show()","sub_path":"plots and data/sunspots.py","file_name":"sunspots.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"144361788","text":"\n#!/usr/bin/env python\n\"\"\"\nCopyright Netherlands eScience Center\nFunction : Quantify stationary and transient eddy from atmospheric kinetic energy (MERRA2)(Cartesius customised)\nAuthor : Yang Liu\nDate : 2018.11.30\nLast Update : 2019.10.30\nDescription : The code aims to calculate the time and space dependent components\n of atmospheric meridional energy transport based on atmospheric\n reanalysis dataset MERRA2 from NASA. The complete procedure\n includes the ecomposition of standing & transient eddies.\n Much attention should be paid that we have to use daily\n mean since the decomposition takes place at subdaily level could introduce\n non-meaningful oscillation due to daily cycling.\n The procedure is generic and is able to adapt any atmospheric\n reanalysis datasets, with some changes.\n Referring to the book \"Physics of Climate\", the concept of decomposition\n of circulation is given with full details. As a consequence, the meridional\n energy transport can be decomposed into 4 parts:\n @@@ A = [/overbar{A}] + /ovrebar{A*} + [A]' + A'* @@@\n [/overbar{A}]: energy transport by steady mean circulation\n /ovrebar{A*}: energy transport by stationary eddy\n [A]': energy transport by transient eddy\n A'*: energy transport by instantaneous and asymmetric part\n An example is given at page 277, in terms of transport of moisture.\n Here we will focus on three components of total meridional energy\n transport:\n @@@ [/overbar{vT}] = [/overbar{v}] x [/overbar{T}] + [/overbar{v}* x /overbar{T}*] + [/overbar{v'T'}] @@@\n [/overbar{v}] x [/overbar{T}]: energy transport by steady mean circulation\n [/overbar{v}* x /overbar{T}*]: energy transport by stationary eddy\n [/overbar{v'T'}]: energy transport by transient eddy\n Due to a time dependent surface pressure, we will take the vertical\n integral first and then decompose the total energy transport. Hence,\n we actually harness the equation of single variable. Thus, we will calculate\n all the 4 components.\nReturn Value : NetCFD4 data file\nDependencies : os, time, numpy, netCDF4, sys, matplotlib\nvariables : Logarithmic Surface Pressure lnsp\n Zonal Divergent Wind u\n Meridional Divergent Wind v\n\t\t Surface geopotential \t z\nCaveat!!\t : This module is designed to work with a batch of files. Hence, there is\n pre-requists for the location and arrangement of data. The folder should\n have the following structure:\n /MERRA2\n /MERRA2_100.instM_3d_asm_Np.198001.nc4.nc\n /MERRA2_100.instM_3d_asm_Np.198002.nc4.nc\n ...\n /MERRA2_200.instM_3d_asm_Np.199201.nc4.nc\n ...\n ...\n Please use the default names after downloading from NASA. \n The files are in netCDF format. Originally, MERRA2 has ascending lat.\n The pressure levels are from surface to TOA.\n \n The data is saved on a descending pressure coordinate. In order\n to use the script, the data should have an ascending coordinate.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport time as tttt\nfrom netCDF4 import Dataset, num2date\nimport os\n\n##########################################################################\n########################### Units vacabulory #########################\n# cpT: [J / kg K] * [K] = [J / kg]\n# Lvq: [J / kg] * [kg / kg] = [J / kg]\n# gz in [m2 / s2] = [ kg m2 / kg s2 ] = [J / kg]\n\n# multiply by v: [J / kg] * [m / s] => [J m / kg s]\n# sum over longitudes [J m / kg s] * [ m ] = [J m2 / kg s]\n\n# integrate over pressure: dp: [Pa] = [N m-2] = [kg m2 s-2 m-2] = [kg s-2]\n# [J m2 / kg s] * [Pa] = [J m2 / kg s] * [kg / s2] = [J m2 / s3]\n# and factor 1/g: [J m2 / s3] * [s2 /m2] = [J / s] = [Wat]\n##########################################################################\n\n# constants\nconstant = {'g' : 9.80616, # gravititional acceleration [m / s2]\n 'R' : 6371009, # radius of the earth [m]\n 'cp': 1004.64, # heat capacity of air [J/(Kg*K)]\n 'Lv': 2264670, # Latent heat of vaporization [J/Kg]\n 'R_dry' : 286.9, # gas constant of dry air [J/(kg*K)]\n 'R_vap' : 461.5, # gas constant for water vapour [J/(kg*K)]\n }\n##########################################################################\n########################### level information #########################\nnative_level = np.array(([1000, 975, 950, 925, 900,\n 875, 850, 825, 800, 775,\n 750, 725, 700, 650, 600,\n 550, 500, 450, 400, 350,\n 300, 250, 200, 150, 100,\n 70, 50, 40, 30, 20,\n 10, 7, 5, 4, 3,\n 2, 1, 0.699999988079071, 0.5, 0.400000005960464,\n 0.300000011920929, 0.100000001490116]),dtype=int)\n################################ Input zone ######################################\n# specify starting and ending time\nstart_year = 1980\nend_year = 2017\n# choose the slice number for the vertical layer\n# pressure levels: (0)200, (1)300, (2)400, (3)500, (4)600, (5)750, (6)850, (7)950\n# corresponding target levels (0)7, (1) 6, (2) 5, (3) 4, (4)3, (5) 2, (6) 1, (7) 0\nlev_slice = 0\nname_list = ['200', '300', '400', '500', '600', '750', '850', '950']\nname_list = name_list[::-1]\n# specify data path\n# ERAI 3D fields on pressure level\n#datapath = '/home/ESLT0068/WorkFlow/Core_Database_AMET_OMET_reanalysis/ERAI/regression/pressure/daily'\ndatapath = '/projects/0/blueactn/reanalysis/MERRA2/subdaily/pressure'\n# specify output path for figures\noutput_path = '/home/lwc16308/reanalysis/MERRA2/output/eddy'\n# benchmark datasets for basic dimensions\nbenchmark_file = 'MERRA2_300.inst3_3d_asm_Np.20091223.SUB.nc'\nbenchmark = Dataset(os.path.join(datapath, 'merra2009_Np', benchmark_file))\n####################################################################################\n\ndef var_key_retrieve(datapath, year, month, day):\n # get the path to each datasets\n print (\"Start retrieving datasets {0} (y) {1} (m) {2}\".format(year, month, day+1))\n if year < 1992:\n datapath_var = os.path.join(datapath, 'merra{}_Np'.format(year), 'MERRA2_100.inst3_3d_asm_Np.{}{}{:02d}.SUB.nc'.format(year, namelist_month[month-1], day+1))\n elif year < 2001:\n datapath_var = os.path.join(datapath, 'merra{}_Np'.format(year), 'MERRA2_200.inst3_3d_asm_Np.{}{}{:02d}.SUB.nc'.format(year, namelist_month[month-1], day+1))\n elif year < 2011:\n datapath_var = os.path.join(datapath, 'merra{}_Np'.format(year), 'MERRA2_300.inst3_3d_asm_Np.{}{}{:02d}.SUB.nc'.format(year, namelist_month[month-1], day+1))\n else:\n datapath_var = os.path.join(datapath, 'merra{}_Np'.format(year), 'MERRA2_400.inst3_3d_asm_Np.{}{}{:02d}.SUB.nc'.format(year, namelist_month[month-1], day+1))\n # get the variable keys\n var_key = Dataset(datapath_var)\n\n print (\"Retrieving datasets successfully and return the variable key!\")\n return var_key\n\ndef initialization(benchmark):\n print (\"Prepare for the main work!\")\n # date and time arrangement\n # namelist of month and days for file manipulation\n namelist_month = ['01','02','03','04','05','06','07','08','09','10','11','12']\n long_month_list = np.array([1,3,5,7,8,10,12])\n leap_year_list = np.array([1976,1980,1984,1988,1992,1996,2000,2004,2008,2012,2016,2020])\n # create the month index\n period = np.arange(start_year,end_year+1,1)\n index_month = np.arange(1,13,1)\n # create dimensions for saving data\n #Dim_level = len(benchmark.variables['level'][:])\n Dim_latitude = len(benchmark.variables['lat'][:])\n Dim_longitude = len(benchmark.variables['lon'][:])\n Dim_month = len(index_month)\n Dim_period = len(period)\n # mask for terrain\n wind = benchmark.variables['V'][0,lev_slice,:,:]\n mask = np.ones(wind.shape, dtype=int)\n mask[wind>10000] = 0\n # a list of the index of starting day in each month\n month_day_length = [31,28,31,30,31,30,31,31,30,31,30,31] #! we ignore the last day of February for the leap year\n month_day_index = [0,31,59,90,120,151,181,212,243,273,304,334]\n # create variables\n v_temporal_sum = np.zeros((365,Dim_latitude,Dim_longitude),dtype=float) #! we ignore the last day of February for the leap year\n return period, index_month, namelist_month, long_month_list, leap_year_list, mask, \\\n Dim_latitude, Dim_longitude, Dim_month, Dim_period, month_day_length, \\\n month_day_index, v_temporal_sum\n\ndef pick_var(var_key):\n # extract variables\n print (\"Start extracting velocity for the calculation of mean over time and space.\")\n # extract data at certain levels\n v = var_key.variables['V'][:,lev_slice,:,:]\n ps = var_key.variables['PS'][:] # surface pressure Pa\n level = var_key.variables['lev'][lev_slice] * 100\n # correct the filling values\n v[v>10000] = 0 \n # daily mean\n v_out = np.mean(v, 0)\n\n return v_out\n\ndef initialization_eddy(v_temporal_mean):\n # Here we only use the temporal mean, for the spatial mean we will take it dynamically\n # during the calculation of eddies, for the sake of memory usage.\n # create space for eddies\n var_v2_transient_pool = np.zeros((Dim_period,Dim_month,Dim_latitude,Dim_longitude),dtype=float)\n var_v2_standing_pool = np.zeros((Dim_period,Dim_month,Dim_latitude,Dim_longitude),dtype=float)\n var_v2_transient_mean_pool = np.zeros((Dim_period,Dim_month,Dim_latitude),dtype=float)\n var_v2_stationary_mean_pool = np.zeros((Dim_period,Dim_month,Dim_latitude,Dim_longitude),dtype=float)\n # create space for overall momentum\n var_v2_overall_pool = np.zeros((Dim_period,Dim_month,Dim_latitude,Dim_longitude),dtype=float)\n # calculate mean meridional circulation\n var_v_steady_mean_zonal_mean = np.mean(v_temporal_mean,2)\n var_v_steady_mean_monthly_zonal_mean = np.zeros((12,Dim_latitude),dtype=float)\n\n for i in np.arange(Dim_month):\n var_v_steady_mean_monthly_zonal_mean[i,:] = np.mean(var_v_steady_mean_zonal_mean[month_day_index[i-1]:month_day_index[i-1]+month_day_length[i-1],:],0)\n var_v2_steady_mean = var_v_steady_mean_monthly_zonal_mean * var_v_steady_mean_monthly_zonal_mean\n\n return var_v2_transient_pool, var_v2_standing_pool, var_v2_transient_mean_pool,\\\n var_v2_stationary_mean_pool, var_v2_overall_pool, var_v2_steady_mean, \\\n\ndef compute_eddy(var_v_temporal_mean_select, var_v):\n '''\n We follow the method given by Peixoto and Oort, 1983.\n The equation is listed on page 61-63.\n equation 4.6 and 4.10\n The example is given on page 288.\n Here we take our naming convention for different eddies.\n For the details, please visit \"Transient & Standing eddy\"\n in notes.\n '''\n # shape of v[days,lat,lon]\n seq, _, _ = var_v.shape\n # mask[lat, lon]\n mask_3D = np.repeat(mask[np.newaxis,:,:],seq,0)\n # calculate transient eddies\n ################# transient eddy ###################\n print (\"Calculate transient eddies!\")\n var_v_prime = var_v - var_v_temporal_mean_select\n # eddy\n var_v2_transient = var_v_prime * var_v_prime\n # monthly mean\n # shape[lat,lon]\n var_v2_transient_monthly_mean = np.mean(var_v2_transient,0)\n ####################################################\n # calculate transient mean eddies\n ############### transient mean eddy ################\n print (\"Calculate transient mean eddies!\")\n var_v_prime_zonal_mean = np.mean(var_v,2) - np.mean(var_v_temporal_mean_select,2)\n # eddy\n var_v2_transient_mean = var_v_prime_zonal_mean * var_v_prime_zonal_mean\n # monthly mean\n # shape[lat]\n var_v2_transient_mean_monthly_mean = np.mean(var_v2_transient_mean,0)\n ####################################################\n # Calculate standing eddies\n ################## standing eddy ###################\n print (\"Calculate standing eddies!\")\n var_v_star = np.zeros(var_v.shape,dtype=float)\n var_v_zonal_mean = np.mean(var_v,2)\n var_v_zonal_mean_enlarge = np.repeat(var_v_zonal_mean[:,:,np.newaxis],Dim_longitude,2)\n var_v_star = var_v - var_v_zonal_mean_enlarge\n # eddy\n var_v2_standing = var_v_star * var_v_star * mask_3D\n # monthly mean\n # shape[lat,lon]\n var_v2_standing_monthly_mean = np.mean(var_v2_standing,0)\n ####################################################\n # Calculate stationary mean eddies\n ############## stationary mean eddy ###############\n print (\"Calculate stationary mean eddies!\")\n var_v_monthly_mean = np.mean(var_v,0)\n var_v_monthly_zonal_mean = np.mean(var_v_monthly_mean,1)\n var_v_monthly_zonal_mean_enlarge = np.repeat(var_v_monthly_zonal_mean[:,np.newaxis],Dim_longitude,1)\n var_v_star_monthly_zonal_mean = var_v_monthly_mean - var_v_monthly_zonal_mean_enlarge\n # monthly mean\n # shape[lat,lon]\n var_v2_stationary_mean_monthly_mean = var_v_star_monthly_zonal_mean * var_v_star_monthly_zonal_mean\n ####################################################\n # calculate the overall momentum transport\n ############## overall transport ###############\n print (\"Calculate overall momentum transport!\")\n var_v2_overall = var_v * var_v\n # monthly mean\n # shape[lat,lon]\n var_v2_overall_monthly_mean = np.mean(var_v2_overall,0)\n ####################################################\n\n return var_v2_transient_monthly_mean, var_v2_transient_mean_monthly_mean,\\\n var_v2_standing_monthly_mean, var_v2_stationary_mean_monthly_mean,\\\n var_v2_overall_monthly_mean\n\n\ndef create_netcdf_point_eddy(var_v2_overall,var_v2_transient,var_v2_transient_mean,\n var_v2_standing, var_v2_stationary_mean, var_v2_steady_mean,\n output_path):\n # take the zonal mean\n var_v2_overall_zonal = np.mean(var_v2_overall,3)\n var_v2_transient_zonal = np.mean(var_v2_transient,3)\n # transient_mean is zonal mean already\n var_v2_standing_zonal = np.mean(var_v2_standing,3)\n var_v2_stationary_mean_zonal = np.mean(var_v2_stationary_mean,3)\n # create netCDF\n print ('*******************************************************************')\n print ('*********************** create netcdf file*************************')\n print ('*******************************************************************')\n # wrap the datasets into netcdf file\n # 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'\n data_wrap = Dataset(os.path.join(output_path,'model_merra_daily_v2_eddies_{0}hPa_point.nc'.format(name_list[lev_slice])),\n 'w',format = 'NETCDF4')\n # create dimensions for netcdf data\n year_wrap_dim = data_wrap.createDimension('year',Dim_period)\n month_wrap_dim = data_wrap.createDimension('month',Dim_month)\n lat_wrap_dim = data_wrap.createDimension('latitude',Dim_latitude)\n lon_wrap_dim = data_wrap.createDimension('longitude',Dim_longitude)\n # create coordinate variables for 1-dimensions\n year_wrap_var = data_wrap.createVariable('year',np.int32,('year',))\n month_wrap_var = data_wrap.createVariable('month',np.int32,('month',))\n lat_wrap_var = data_wrap.createVariable('latitude',np.float32,('latitude',))\n lon_wrap_var = data_wrap.createVariable('longitude',np.float32,('longitude',))\n # create the 4-d variable\n var_v2_overall_wrap_var = data_wrap.createVariable('v2_overall',np.float64,('year','month','latitude','longitude'),zlib=True)\n var_v2_transient_wrap_var = data_wrap.createVariable('v2_transient',np.float64,('year','month','latitude','longitude'),zlib=True)\n var_v2_standing_wrap_var = data_wrap.createVariable('v2_standing',np.float64,('year','month','latitude','longitude'),zlib=True)\n var_v2_stationary_mean_wrap_var = data_wrap.createVariable('v2_stationary_mean',np.float64,('year','month','latitude','longitude'),zlib=True)\n # create the 4d variable\n var_v2_overall_zonal_wrap_var = data_wrap.createVariable('v2_overall_zonal',np.float64,('year','month','latitude'),zlib=True)\n var_v2_transient_zonal_wrap_var = data_wrap.createVariable('v2_transient_zonal',np.float64,('year','month','latitude'),zlib=True)\n var_v2_transient_mean_wrap_var = data_wrap.createVariable('v2_transient_mean',np.float64,('year','month','latitude'),zlib=True)\n var_v2_standing_zonal_wrap_var = data_wrap.createVariable('v2_standing_zonal',np.float64,('year','month','latitude'),zlib=True)\n var_v2_stationary_mean_zonal_wrap_var = data_wrap.createVariable('v2_stationary_mean_zonal',np.float64,('year','month','latitude'),zlib=True)\n # create the 2d variable\n var_v2_steady_mean_wrap_var = data_wrap.createVariable('v2_steady_mean',np.float64,('month','latitude'),zlib=True)\n # global attributes\n data_wrap.description = 'Monthly stationary and transient eddies at each grid point'\n # variable attributes\n lat_wrap_var.units = 'degree_north'\n lon_wrap_var.units = 'degree_east'\n \n var_v2_overall_wrap_var.units = 'm2/s2'\n var_v2_transient_wrap_var.units = 'm2/s2'\n var_v2_standing_wrap_var.units = 'm2/s2'\n var_v2_stationary_mean_wrap_var.units = 'm2/s2'\n var_v2_overall_zonal_wrap_var.units = 'm2/s2'\n var_v2_transient_zonal_wrap_var.units = 'm2/s2'\n var_v2_transient_mean_wrap_var.units = 'm2/s2'\n var_v2_standing_zonal_wrap_var.units = 'm2/s2'\n var_v2_stationary_mean_zonal_wrap_var.units = 'm2/s2'\n var_v2_steady_mean_wrap_var.units = 'm2/s2'\n\n lat_wrap_var.long_name = 'Latitude'\n lon_wrap_var.long_name = 'Longitude'\n \n var_v2_overall_wrap_var.long_name = 'Northward transport of kinetic energy by all motions'\n var_v2_transient_wrap_var.long_name = 'Northward transport of kinetic energy by transient eddy'\n var_v2_standing_wrap_var.long_name = 'Northward transport of kinetic energy by standing eddy'\n var_v2_stationary_mean_wrap_var.long_name = 'Northward transport of kinetic energy by stationary mean eddy'\n var_v2_overall_zonal_wrap_var.long_name = 'Zonal mean of northward transport of kinetic energy by all motions'\n var_v2_transient_zonal_wrap_var.long_name = 'Zonal mean of northward transport of kinetic energy by transient eddy'\n var_v2_transient_mean_wrap_var.long_name = 'Northward transport of kinetic energy by transient mean eddy'\n var_v2_standing_zonal_wrap_var.long_name = 'Zonal mean of northward transport of kinetic energy by standing eddy'\n var_v2_stationary_mean_zonal_wrap_var.long_name = 'Zonal mean of northward transport of kinetic energy by stationary mean eddy'\n var_v2_steady_mean_wrap_var.long_name = 'Northward transport of kinetic energy by steady mean meridional circulation'\n \n # writing data\n year_wrap_var[:] = period\n month_wrap_var[:] = index_month\n lat_wrap_var[:] = benchmark.variables['lat'][:]\n lon_wrap_var[:] = benchmark.variables['lon'][:]\n \n var_v2_overall_wrap_var[:] = var_v2_overall\n var_v2_transient_wrap_var[:] = var_v2_transient\n var_v2_standing_wrap_var[:] = var_v2_standing\n var_v2_stationary_mean_wrap_var[:] = var_v2_stationary_mean\n var_v2_overall_zonal_wrap_var[:] = var_v2_overall_zonal\n var_v2_transient_zonal_wrap_var[:] = var_v2_transient_zonal\n var_v2_transient_mean_wrap_var[:] = var_v2_transient_mean\n var_v2_standing_zonal_wrap_var[:] = var_v2_standing_zonal\n var_v2_stationary_mean_zonal_wrap_var[:] = var_v2_stationary_mean_zonal\n var_v2_steady_mean_wrap_var[:] = var_v2_steady_mean\n\n # close the file\n data_wrap.close()\n print (\"The generation of netcdf files for fields on surface is complete!!\")\n \nif __name__==\"__main__\":\n # calculate the time for the code execution\n start_time = tttt.time()\n # initialization\n period, index_month, namelist_month, long_month_list, leap_year_list, mask, Dim_latitude,\\\n Dim_longitude, Dim_month, Dim_period, month_day_length, month_day_index,\\\n v_temporal_sum = initialization(benchmark)\n print ('*******************************************************************')\n print ('************ calculate the temporal and spatial mean ************')\n print ('*******************************************************************')\n for i in period:\n for j in index_month:\n # determine how many days are there in a month\n if j in long_month_list:\n last_day = 31\n elif j == 2:\n if i in leap_year_list:\n last_day = 29\n else:\n last_day = 28\n else:\n last_day = 30\n # matrix to collect fields for each month\n var_v = np.zeros((last_day,Dim_latitude,Dim_longitude),dtype=float)\n # daily loop\n for k in np.arange(last_day):\n # get the key of each variable\n variable_key = var_key_retrieve(datapath, i, j, k)\n daily_v = pick_var(variable_key)\n var_v[k,:,:] = daily_v\n # in case of Feburary with 29 days\n if j == 2 and i in leap_year_list:\n var_v = var_v[:-1,:,:]\n # add daily field to the summation operator\n v_temporal_sum[month_day_index[j-1]:month_day_index[j-1]+month_day_length[j-1],:,:] = \\\n v_temporal_sum[month_day_index[j-1]:month_day_index[j-1]+month_day_length[j-1],:,:] + var_v\n # calculate the temporal mean\n v_temporal_mean = v_temporal_sum / Dim_period\n print ('*******************************************************************')\n print ('********** calculate the stationary and transient eddy **********')\n print ('*******************************************************************')\n # Initialization\n # Grab temporal & spatial mean\n # The mean meridional circulation is calculated here\n var_v2_transient_pool, var_v2_standing_pool, var_v2_transient_mean_pool,\\\n var_v2_stationary_mean_pool, var_v2_overall_pool, var_v2_steady_mean,\\\n = initialization_eddy(v_temporal_mean)\n # start the loop for the computation of eddies\n for i in period:\n for j in index_month:\n # determine how many days are there in a month\n if j in long_month_list:\n last_day = 31\n elif j == 2:\n if i in leap_year_list:\n last_day = 29\n else:\n last_day = 28\n else:\n last_day = 30\n # matrix to collect fields for each month\n var_v = np.zeros((last_day,Dim_latitude,Dim_longitude),dtype=float)\n # daily loop\n for k in np.arange(last_day):\n # get the key of each variable\n variable_key = var_key_retrieve(datapath, i, j, k)\n daily_v, daily_T, daily_q, daily_z = pick_var(variable_key)\n var_v[k,:,:] = daily_v \n # in case of Feburary with 29 days\n if j == 2 and i in leap_year_list:\n var_v = var_v[:-1,:,:]\n # take the temporal mean for the certain month\n var_v_temporal_mean_select = v_temporal_mean[month_day_index[j-1]:month_day_index[j-1]+month_day_length[j-1],:,:] # calculate the eddies\n var_v2_transient, var_v2_transient_mean, var_v2_standing, var_v2_stationary_mean, var_v2_overall\\\n = compute_eddy(var_v_temporal_mean_select, var_v)\n # save output to the data pool for netCDF\n var_v2_overall_pool[i-start_year,j-1,:,:] = var_v2_overall\n var_v2_transient_pool[i-start_year,j-1,:,:] = var_v2_transient\n var_v2_transient_mean_pool[i-start_year,j-1,:] = var_v2_transient_mean\n var_v2_standing_pool[i-start_year,j-1,:,:] = var_v2_standing\n var_v2_stationary_mean_pool[i-start_year,j-1,:,:] = var_v2_stationary_mean\n create_netcdf_point_eddy(var_v2_overall_pool,var_v2_transient_pool,var_v2_transient_mean_pool,\n var_v2_standing_pool,var_v2_stationary_mean_pool,var_v2_steady_mean,\n output_path)\n print ('The full pipeline of the decomposition of meridional energy transport in the atmosphere is accomplished!')\n print (\"--- %s minutes ---\" % ((tttt.time() - start_time)/60))","sub_path":"Preprocessing/dataPacking/Eddy_Decomposition/v2_eddy_decom_pres_merra_subdaily_minus_daily.py","file_name":"v2_eddy_decom_pres_merra_subdaily_minus_daily.py","file_ext":"py","file_size_in_byte":24995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"453735571","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import WubaurlItem\nimport re\n\n\nclass Shop58UrlSpider(scrapy.Spider):\n name = 'url_spider'\n custom_settings = {\n 'DOWNLOADER_MIDDLEWARES' : {\n 'Wuba.middlewares.ProxyMiddleware': 200,\n 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 300,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None\n\n },\n \"ITEM_PIPELINES\": {\n 'Wuba.pipelines.URLDiplicatesPipeline': 200,\n 'Wuba.pipelines.URLDBPipeline': 300\n }\n }\n\n start_urls = ['https://cd.58.com/shangpucz/']\n\n def parse(self, response):\n region_list = response.xpath('/html/body/div[5]/div[3]/dl[1]/dd/a/@href').extract()\n for url in region_list:\n region_url = 'https://cd.58.com' + url\n yield scrapy.Request(region_url, callback=self.region_parse )\n\n def region_parse(self, response):\n item = WubaurlItem()\n links = re.findall(' 1:\n s += \", \"\n s += \"(\" + str(current.elem)+ \")\"\n current = current.next\n s += \"]\"\n print(s)\n\nif __name__ == \"__main__\":\n myGraph = GraphAdjacencyList()\n nodo0 = myGraph.insertNode(0)\n print(\"nodo0=insertNodo(0)\")\n nodo1 = myGraph.insertNode(2)\n print(\"nodo1=insertNodo(2)\")\n nodo2 = myGraph.insertNode(4)\n print(\"nodo2=insertNodo(4)\")\n nodo3 = myGraph.insertNode(6)\n print(\"nodo3=insertNodo(6)\")\n nodo4 = myGraph.insertNode(8)\n print(\"nodo4=insertNodo(8)\")\n nodo5 = myGraph.insertNode(10)\n print(\"nodo5=insertNodo(10)\\n\")\n\n myGraph.insertEdge(nodo0.index, nodo2.index)\n print(\"insertEdge(nodo0,nodo2)\")\n myGraph.insertEdge(nodo3.index, nodo4.index)\n print(\"insertEdge(nodo3,nodo4)\")\n myGraph.insertEdge(nodo0.index, nodo1.index)\n print(\"insertEdge(nodo0,nodo1)\")\n myGraph.insertEdge(nodo4.index, nodo3.index)\n print(\"insertEdge(nodo4,nodo3)\")\n myGraph.insertEdge(nodo5.index, nodo1.index)\n print(\"insertEdge(nodo5,nodo1)\")\n myGraph.insertEdge(nodo5.index, nodo4.index)\n print(\"insertEdge(nodo5,nodo4)\")\n myGraph.insertEdge(nodo2.index, nodo3.index)\n print(\"insertEdge(nodo2,nodo3)\\n\")\n\n myGraph.printGraph()\n\n print(\"adiacente(nodo0,nodo2)=\" + str(myGraph.isAdj(nodo0.index,\\\n nodo2.index)))\n print(\"adiacente(nodo5,nodo2)=\" + str(myGraph.isAdj(nodo5.index,\\\n nodo2.index)))\n\n print(\"\\ngenericSearch da nodo0\" )\n tree = myGraph.genericSearch(nodo0)\n s = tree.BFS()\n print([str(item) for item in s])\n\n myGraph.deleteNode(nodo2.index)\n print(\"\\ndeleteNodo(nodo2)\")\n myGraph.printGraph()\n\n myGraph.deleteEdge(nodo4.index, nodo3.index)\n print(\"deleteEdge(nodo4,nodo3)\")\n myGraph.printGraph();\n\n\n myGraph.deleteNode(nodo3.index)\n print(\"deleteNodo(nodo3)\")\n myGraph.deleteNode(nodo0.index)\n print(\"deleteNodo(nodo0)\")\n myGraph.deleteNode(nodo1.index)\n print(\"deleteNodo(nodo1)\")\n myGraph.printGraph()\n\n","sub_path":"graph/Graph_AdjacencyList.py","file_name":"Graph_AdjacencyList.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"172599459","text":"#!/usr/bin/python\n#\n# Flask server, woo!\n#\n\nfrom datetime import timedelta\nfrom functools import update_wrapper\n\nfrom pymongo import MongoClient\nfrom dateutil import parser\nfrom flask import Flask, request, redirect, url_for, send_from_directory, jsonify,make_response, current_app\nfrom json import loads\n\n# Setup Flask app.\napp = Flask(__name__,static_url_path='')\napp.debug = True\nmon = MongoClient('mongodb://none:none@ds031711.mongolab.com:31711/vea')\n\ndef fix_put(data):\n data = loads(data)\n data['startTime'] = parser.parse(data['startTime'])\n data['endTime'] = parser.parse(data['endTime'])\n data['completed'] = 0\n data['views'] = 0\n return data\n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n# Routes\n@app.route('/')\ndef root():\n return app.send_static_file('index.html')\n\n@app.route('/PUT/newitem',methods=['PUT','POST'])\n@crossdomain(origin='*')\ndef newitem():\n # send_static_file will guess the correct MIME type\n error = None\n if request.method == 'PUT':\n data = fix_put(request.data)\n mon.vea.rewards.insert(data)\n return str(\"\")\n\n@app.route('/GET/items',methods=['GET'])\n@crossdomain(origin='*')\ndef getitems():\n out = []\n for item in mon.vea.rewards.find():\n item['startTime'] = item['startTime'].strftime(\"%Y-%m-%d %H:%M:%S\")\n item['endTime'] = item['endTime'].strftime(\"%Y-%m-%d %H:%M:%S\")\n item['_id'] = str(item['_id'])\n out.append(item)\n return jsonify(response=out)\n \n\n@app.route('/GET/item/',methods=['GET'])\n@crossdomain(origin='*')\ndef getitem(item_id):\n out = []\n for item in mon.vea.rewards.find({\"id\":item_id}).limit(1):\n item['startTime'] = item['startTime'].strftime(\"%Y-%m-%d %H:%M:%S\")\n item['endTime'] = item['endTime'].strftime(\"%Y-%m-%d %H:%M:%S\")\n item['_id'] = str(item['_id'])\n out.append(item)\n return jsonify(response=out)\n\n@app.route('/PUT/user',methods=['PUT'])\n@crossdomain(origin='*')\ndef newuser():\n error = None\n if request.method == 'PUT':\n data = fix_put(request.data)\n mon.vea.users.insert(data)\n return str(\"\")\nif __name__ == '__main__':\n app.run(debug=True,host=\"0.0.0.0\")","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"117715861","text":"my_name = 'Zed A. Shaw'\r\nmy_age = 35\r\nmy_height = 74 #inches\r\nmy_weight = 180 #lbs\r\nmy_eyes = 'Blue'\r\nmy_teeth = 'White'\r\nmy_hair = 'Brown'\r\nm = (my_height*3)/100\r\nprint(f\"Let's talk about {my_name}.\")\r\nprint(f\"To express {my_height} in 'm', is {m}.\")","sub_path":"lpthw/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"375269517","text":"from keras.layers import Dense, Dropout, GRU, Reshape, LSTM\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Sequential\n\ndef get_model(opt, input_shape):\n print(\"Building net..\")\n model = Sequential()\n model.add(GRU(60, recurrent_dropout = 0.4, activation='relu',return_sequences=True, input_shape=input_shape))\n model.add(Dropout(0.4))\n model.add(GRU(100, recurrent_dropout = 0.4, activation='relu',return_sequences=True))\n model.add(Dropout(0.3))\n model.add(GRU(100, recurrent_dropout = 0.4, activation='relu',return_sequences=False))\n model.add(Dropout(0.4))\n model.add(Dense(3,activation='sigmoid'))\n model.add(Dropout(0.4))\n model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])\n\n return model\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"179720126","text":"import logging\n\nfrom flask import Response\n\nLOG = logging.getLogger(__name__)\nERROR_DICT = {}\n\n\nclass ErrorsMiddleware:\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n try:\n return self.app(environ, start_response)\n except tuple(ERROR_DICT) as ex:\n code, message = ERROR_DICT[type(ex)]\n LOG.info(message.format(ex))\n res = Response(message.format(ex), mimetype='application/json', status=code)\n return res(environ, start_response)\n except Exception as ex:\n LOG.exception(f'Unknown error caught in API - {ex}')\n res = Response('Internal server error', mimetype='application/json', status=500)\n return res(environ, start_response)\n","sub_path":"hillel_shop/src/app/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"173430732","text":"# -*- coding: utf-8 -*-\nimport KBEngine;\nfrom KBEDebug import *;\n# 速度增加量\nADD_SPEED = 0.1\n# 吃到食物减速度\nSUB_SPEED = 0.2\n# Min speed\nMIN_SPEED = 0.5\nMAX_SPEED = 1.5\n\nAVATAR_CRESH = 1; #碰撞体积\nTIMER_CHECK_AVATAR_SPEED = 1;\n\n\nclass Avatar(KBEngine.Entity):\n def __init__(self):\n KBEngine.Entity.__init__(self);\n INFO_MSG(\"Cell Avatar init ...\");\n\n def onGetWitness(self):\n DEBUG_MSG(\"Cell::Avatar::onGetWitness\");\n self.addProximity(AVATAR_CRESH, 0, 0);\n\n def onEnterTrap(self,entityEntering,range_xz,range_y,controllerID,userarg):\n DEBUG_MSG(\"Cell::Avatar::onEnterTrap ...\");\n if entityEntering.className == \"Food\":\n entityEntering.destroy();\n self.EnergyChange(1);\n if self.MoveSpeed > MIN_SPEED:\n self.MoveSpeed -= SUB_SPEED;\n self.IsRun = True;\n\n def onLeaveTrap(self,entityLeaving,range_xz,range_y,controllerID,userarg):\n pass;\n\n def EnergyChange(self,Change):\n self.GatherEnergy += Change;\n\n def SetRun(self,value):\n self.IsRun = value;\n self.CheckAvatarSpeed();\n\n def ChangeSpeed(self,Offset):\n if self.MoveSpeed + Offset > MAX_SPEED or self.MoveSpeed + Offset < MIN_SPEED:\n return ;\n self.MoveSpeed += Offset;\n\n def CheckAvatarSpeed(self):\n if self.IsRun:\n if self.MoveSpeed < MIN_SPEED:\n self.MoveSpeed = 0.0;\n self.ChangeSpeed(MIN_SPEED);\n else:\n self.ChangeSpeed(ADD_SPEED);\n self.addTimer(0.1,0,TIMER_CHECK_AVATAR_SPEED);\n INFO_MSG(\"add_timer TIMER_CHECK_AVATAR_SPEED = %i\" % (TIMER_CHECK_AVATAR_SPEED));\n #self.MoveSpeed = 0.0;\n else:\n self.MoveSpeed = 0.0;\n\n def onTimer(self, tid, userArg):\n if userArg == TIMER_CHECK_AVATAR_SPEED:\n INFO_MSG(\"Enter Timer Process Timer Param = %i\" % (TIMER_CHECK_AVATAR_SPEED));\n self.CheckAvatarSpeed();","sub_path":"Kbe_Svr/server_assets/scripts/cell/Avatar.py","file_name":"Avatar.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"43043398","text":"import re\r\nimport xml.etree.ElementTree as ET\r\nfrom xml.dom import minidom\r\nimport os\r\n\r\ntext = open(\"a.txt\", \"r\").read()\r\n\r\ntext = re.sub(\"[,.;]\", \"\", text)\r\n\r\ntext2 = text.split('\\n')\r\n\r\ntree = ET.parse('c.xml')\r\n\r\nroot = tree.getroot()\r\n\r\nends = ET.SubElement(root, 'ends')\r\n\r\nused = []\r\nbool = False\r\n\r\nfor i, radok in enumerate(text2):\r\n for j, slovo in enumerate(radok.split()):\r\n if len(slovo) >= 1:\r\n end = re.findall(r\"([A-Za-zА-Яа-яїєщ']{,3}\\b)\", slovo)\r\n repeats = text.count(str(end[1]))\r\n\r\n b = ET.SubElement(ends, 'end')\r\n b.set('name', str(end))\r\n\r\n used.append(end)\r\n for g,k in enumerate(text2):\r\n for h,l in enumerate(k.split()):\r\n check = re.findall(r\"([A-Za-zА-Яа-яїєщ']{,3}\\b)\", l)\r\n if check == end:\r\n q = ET.SubElement(b, 'details')\r\n q.set(u\"word\", l)\r\n q.set(u\"repeats\", str(repeats))\r\n q.set(u'tuple', tuple(l))\r\n q.set(u'num', str(h + 1))\r\n q.set(u'row', str(g + 1))\r\n\r\ntree.write('с.xml', encoding='utf-8')\r\n","sub_path":"Python labs/Lab_2/Lab_2/Lab_2.4.py","file_name":"Lab_2.4.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"105295346","text":"#!/usr/bin/env python \n\n'''Created by Tobias Landenberger on 23.04.18 1:06 PM'''\nimport argparse\n\n\n# Initialize NxN DP Matrix with zeroes\ndef initializeMatrix(n):\n dp = [[0 for i in range(n)] for j in range(n)]\n return dp\n\n\ndef delta(i, j, seq, minLoopLength):\n if i >= j-minLoopLength:\n score = 0\n else:\n if (seq[i] == 'G' and seq[j] == 'C') or (seq[i] == 'C' and seq[j] == 'G'):\n score = int(args.score_GC)\n elif (seq[i] == 'A' and seq[j] == 'U') or (seq[i] == 'U' and seq[j] == 'A'):\n score = int(args.score_AU)\n elif (seq[i] == 'G' and seq[j] == 'U') or (seq[i] == 'U' and seq[j] == 'G'):\n score = int(args.score_GU)\n else:\n score = 0\n return score\n\n\ndef fourth_case(i, j, dp):\n tmp = 0\n for k in range(i + 1, j - 1):\n tmp = max(tmp, (dp[i][k] + dp[k + 1][j]))\n return tmp\n\n\ndef gamma(i, j, dp, seq, minLoopLength):\n return (max(dp[i + 1][j],\n dp[i][j - 1],\n dp[i + 1][j - 1] + delta(i, j, seq, minLoopLength),\n fourth_case(i, j, dp)))\n\n\ndef printMatrix(dp):\n for i in range(len(dp)):\n print(dp[i])\n\n\ndef traceback(i, j, dp, seq, struc, minLoopLength):\n if i < j:\n if dp[i][j] == dp[i+1][j]:\n traceback(i+1, j, dp, seq, struc, minLoopLength)\n elif dp[i][j] == dp[i][j-1]:\n traceback(i, j-1, dp, seq, struc, minLoopLength)\n elif dp[i][j] == dp[i+1][j-1] + delta(i, j, seq, minLoopLength):\n struc.append((i+1, j+1))\n traceback(i+1, j-1, dp, seq, struc, minLoopLength)\n else:\n for k in range(i+1, j-1):\n if dp[i][j] == dp[i][k] + dp[k+1][j]:\n traceback(i, k, dp, seq, struc, minLoopLength)\n traceback(k+1, j, dp, seq, struc, minLoopLength)\n break\n\n\ndef createBpseq(seq, bp, maxScore):\n out = \"\"\n out += \"Filename: \" + str(args.input) + \"\\n\"\n out += \"Min-Loop: \" + str(args.min_loop_length) + \"\\n\"\n out += \"GC: \" + str(args.score_GC) + \"\\n\"\n out += \"AU: \" + str(args.score_AU) + \"\\n\"\n out += \"GU: \" + str(args.score_GU) + \"\\n\"\n out += \"Score: \" + str(maxScore) + \"\\n\"\n out += \"\\n\"\n\n for i in range(0, len(seq)):\n out += str(i+1) + \" \" + seq[i]\n tpl = [(x, y) for x, y in bp if x == i + 1]\n if tpl:\n out += \" \" + str(tpl[0][1]) + \"\\n\"\n else:\n out += \" 0\" + \"\\n\"\n\n\n return out\n\n\ndef createDotBracket(seq, bp, maxScore):\n out = \"\\n\"\n out += seq + \"\\n\"\n\n for i in range(0, len(seq)):\n tpl = [(x, y) for x, y in bp if x == i + 1]\n tpl2 = [(x, y) for x, y in bp if y == i + 1]\n if tpl:\n out += \"(\"\n elif tpl2:\n out += \")\"\n else:\n out += \".\"\n\n return out\n\n\ndef nussinov(seq, minLoopLength):\n n = len(seq)\n dp = initializeMatrix(n)\n bp = []\n\n # Fill the matrix\n for k in range(1, n):\n for i in range(n - k):\n j = i + k\n dp[i][j] = gamma(i, j, dp, seq, minLoopLength)\n\n # Traceback\n traceback(i, j, dp, seq, bp, minLoopLength)\n\n maxScore = dp[0][n - 1]\n\n #printMatrix(dp)\n\n print(createBpseq(seq, bp, maxScore))\n\n print(createDotBracket(seq, bp, maxScore))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\"\"\"Predicting rna secondary structure with Nussinov algorithm\"\"\")\n parser.add_argument('-i', \"--input\", help='Input file in fasta format')\n parser.add_argument(\"--min-loop-length\", default=3, help='Set minimum loop length')\n parser.add_argument(\"--score-GC\", default=1, help='Set GC-score for scoring function')\n parser.add_argument(\"--score-AU\", default=1, help='Set AU-score for scoring function')\n parser.add_argument(\"--score-GU\", default=1, help='Set Gu-score for scoring function')\n\n args = parser.parse_args()\n\n seq = \"\"\n # Read Fasta file\n with open(\"/Users/Tobias/Documents/Studium/SS_18/SSBI/Assignments/test.fasta\") as fasta_file:\n for line in fasta_file:\n if line.startswith(\">\"):\n continue\n else:\n seq += line.strip()\n\n nussinov(seq, int(args.min_loop_length))\n","sub_path":"nussinov.py","file_name":"nussinov.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"4160761","text":"import numpy as np\nimport pandas as pd\nimport itertools\nimport yaml\nimport os\n\n\ndef process_hue(df, col, hue=None):\n if hue:\n method_cols = ['operation', 'spectrum', 'distance']\n method_cols.remove(hue)\n df[col] = df[method_cols].apply(lambda x: '_'.join(x), axis=1)\n return df\n\n\ndef cartesian_apply(func, *args):\n args = map(lambda x: x if type(x) is list else [x], args)\n for inputs in itertools.product(*args):\n func(*inputs)\n\n\ndef get_config():\n with open(\"config.yaml\", 'r') as stream:\n try:\n config = yaml.load(stream)\n except yaml.YAMLError as exc:\n config = None\n return config['experiment']\n\n\ndef run_config(callback):\n def helper(df, x, y, hue):\n col = 'experiment'\n df = process_hue(df, col, hue)\n callback(df, col, y, hue)\n\n config = get_config()\n for k, v in config.items():\n try:\n df = pd.read_csv(os.path.join('csv', k + '.csv'))\n except:\n print('Warning: Cannot find {0}.csv, '\n 'experiment \"{0}\" has not been executed!'.format(k))\n continue\n df.name = k\n x = v['x']\n y = v['y']\n hue = v['hue']\n cartesian_apply(helper, df, x, y, hue)\n\n\ndef main():\n print(get_config())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"result/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"383856269","text":"from __future__ import unicode_literals\n\nfrom pyramid.response import Response\nfrom pyramid.httpexceptions import HTTPNotAcceptable, HTTPNotFound, HTTPGone, HTTPFound\nfrom mock import Mock, patch\nfrom requests.exceptions import ReadTimeout\n\nfrom clld.tests.util import TestWithEnv, XmlResponse, WithDbAndDataMixin\nfrom clld.db.models import common\nfrom clld.interfaces import IDataTable\n\n\nclass Tests(WithDbAndDataMixin, TestWithEnv):\n def test_index_view(self):\n from clld.web.views import index_view\n\n ctx = self.env['registry'].getUtility(IDataTable, name='contributors')\n\n # note: this invocation of the index view does also exercise the ExcelAdapter\n res = index_view(\n ctx(self.env['request'], common.Contributor), self.env['request'])\n self.assertTrue(isinstance(res, Response))\n res = index_view(\n ctx(self.env['request'], common.Sentence), self.env['request'])\n res = index_view(\n ctx(self.env['request'], common.Value), self.env['request'])\n\n self.set_request_properties(is_xhr=True, params={'sEcho': 'a'})\n res = index_view(\n ctx(self.env['request'], common.Contributor), self.env['request'])\n self.assertEqual(res.content_type, 'application/json')\n\n class X(ctx):\n def row_class(self, item):\n return 'row-%s' % item.pk\n\n res = index_view(X(self.env['request'], common.Contributor), self.env['request'])\n self.assertEqual(res.content_type, 'application/json')\n\n class Route(Mock):\n name = 'contributors_alt'\n\n self.set_request_properties(\n is_xhr=False, matched_route=Route(), matchdict={'ext': 'csv'})\n index_view(ctx(self.env['request'], common.Contributor), self.env['request'])\n\n def test_resource_view(self):\n from clld.web.views import resource_view\n\n ctx = common.Language(id='a', name='Name')\n res = resource_view(ctx, self.env['request'])\n self.assertTrue(isinstance(res, Response))\n\n self.set_request_properties(matchdict={'ext': 'x'})\n self.assertRaises(HTTPNotAcceptable, resource_view, ctx, self.env['request'])\n\n def test_select_combination(self):\n from clld.web.views import select_combination\n\n self.assertRaises(HTTPNotFound, select_combination, None, self.env['request'])\n self.set_request_properties(\n params={'parameters': 'parameter'})\n self.assertRaises(HTTPFound, select_combination, None, self.env['request'])\n self.set_request_properties(\n params=[('parameters', 'parameter'), ('parameters', 'no-domain')])\n self.assertRaises(HTTPFound, select_combination, None, self.env['request'])\n\n def test__raise(self):\n from clld.web.views import _raise\n\n self.assertRaises(ValueError, _raise, None)\n\n def test__ping(self):\n from clld.web.views import _ping\n\n self.assertEqual(_ping(None)['status'], 'ok')\n\n def test_js(self):\n from clld.web.views import js\n\n js(self.env['request'])\n\n def test_gone(self):\n from clld.web.views import gone\n\n self.assertRaises(HTTPGone, gone, None, None)\n\n def test_redirect(self):\n from clld.web.views import redirect\n\n self.assertRaises(HTTPFound, redirect, HTTPFound, lambda r: 'x', None, None)\n\n def test_unapi(self):\n from clld.web.views import unapi\n\n assert XmlResponse(unapi(self.env['request'])).root.tag == 'formats'\n\n self.set_request_properties(params={'format': 'bib'})\n self.assertTrue(isinstance(unapi(self.env['request']), HTTPNotFound))\n\n self.set_request_properties(params={'id': '/contributions/ccc'})\n self.assertTrue(isinstance(unapi(self.env['request']), HTTPNotFound))\n\n self.set_request_properties(params={'id': '/contributions/contribution'})\n assert XmlResponse(unapi(self.env['request'])).findall('format')\n\n self.set_request_properties(params={'format': 'bib', 'id': '/languages/language'})\n unapi(self.env['request'])\n self.set_request_properties(\n params={'format': 'bibtex', 'id': '/contributions/contribution'})\n unapi(self.env['request'])\n\n def test_atom_feed(self):\n from clld.web.views import atom_feed\n\n class FeedResponseWithTitle(object):\n status_code = 200\n content = b\"\"\"\\\n\n\n Comments for The World Atlas of Language Structures Online\n http://blog.wals.info\n WALS Online Blog\n \n Comment on Datapoint\n http://blog.wals.info/datapoint\n Wed, 04 Nov 2015 22:11:03 +0000\n http://blog.wals.info/datapoint-26a-wals_code_juk/\n Some description\n some description

\n]]>
\n
\n
\n\"\"\"\n\n class FeedResponseWithoutTitle(object):\n status_code = 200\n content = b\"\"\"\\\n\n\n http://blog.wals.info\n WALS Online Blog\n\n\"\"\"\n\n class MockRequests1(object):\n get = Mock(return_value=FeedResponseWithTitle)\n\n class MockRequests2(object):\n get = Mock(return_value=FeedResponseWithoutTitle)\n\n class MockRequestsTimeout(object):\n def get(self, *args, **kw):\n raise ReadTimeout()\n\n with patch('clld.web.views.requests', MockRequests1()):\n res = atom_feed(self.env['request'], None)\n self.assertIn('', res.body.decode('utf8'))\n\n with patch('clld.web.views.requests', MockRequests2()):\n res = atom_feed(self.env['request'], None)\n self.assertNotIn('', res.body.decode('utf8'))\n\n with patch('clld.web.views.requests', MockRequestsTimeout()):\n res = atom_feed(self.env['request'], None)\n self.assertNotIn('', res.body.decode('utf8'))\n","sub_path":"clld/tests/test_web_views.py","file_name":"test_web_views.py","file_ext":"py","file_size_in_byte":6518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"408554595","text":"import math\n\nN = int(input())\nX = list(map(int, input().split()))\n\nm, y, c = 0, 0, 0\nfor x in X:\n c = max(c, abs(x))\n x = abs(x)\n m += x\n y += x**2\nprint(m)\nprint(math.sqrt(y))\nprint(c)\n","sub_path":"ABC_B/ABC180_B.py","file_name":"ABC180_B.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"392974464","text":"from collections import defaultdict\ndef solution(n, computers):\n count = 0\n graph = defaultdict(list)\n for i, connection in enumerate(computers):\n for j, c in enumerate(connection):\n if c==1:\n graph[i + 1].append(j + 1)\n\n visit = []\n for start in range(n):\n start+=1\n stack = [start]\n if start not in visit:\n while stack:\n node = stack.pop()\n visit.append(node)\n for other in graph[node]:\n if other not in visit:\n stack.append(other)\n count+=1\n\n return count","sub_path":"dfs/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"430119915","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^$', 'GeniusATlaS.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/$', include(admin.site.urls)),\n url(r'^hello/$', 'GeniusATlaS.views.hello'),\n url(r'^time/$', 'GeniusATlaS.views.current_datetime'),\n url(r'^time/plus/(\\d{1,2})/$', 'GeniusATlaS.views.hours_ahead'),\n url(r'^sum/(\\d+)/$', 'GeniusATlaS.views.sum'),\n]\n","sub_path":"Project/site/GeniusATlaS/GeniusATlaS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"308215225","text":"from ._util import *\n\n# make a calculator function\ndef scoreOnshoreWindLocation(windspeed, roadDist, powerDist, settlementDist,\n windspeedStart=4.5, roadDistStart=1000, powerDistStart=1000, settlementDistStart=500,\n windspeedStop=7, roadDistStop=10000, powerDistStop=10000, settlementDistStop=1000,\n windspeedFlip=False, roadDistFlip=True, powerDistFlip=True, settlementDistFlip=False,\n windspeedWeight=0.5, roadDistWeight=0.2, powerDistWeight=0.2, settlementDistWeight=0.1,):\n \"\"\"Performs a multi-criteria scoring of potential wind turbine site based off:\n * Average wind speed at 100 meters\n * Distance from the nearest settlement area\n * Distance from the nearest roadway\n * Distance from the nearest power line\n \"\"\"\n totalScore = windspeedWeight * linearTransition( np.array(windspeed), windspeedStart, windspeedStop , windspeedFlip)\n totalScore += roadDistWeight * linearTransition( np.array(roadDist), roadDistStart, roadDistStop , roadDistFlip)\n totalScore += powerDistWeight * linearTransition( np.array(powerDist), powerDistStart, powerDistStop , powerDistFlip)\n totalScore += settlementDistWeight * linearTransition( np.array(settlementDist), settlementDistStart, settlementDistStop , settlementDistFlip)\n return totalScore\n","sub_path":"reskit/windpower/_score.py","file_name":"_score.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"95184720","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nfrom numpy.core.arrayprint import IntegerFormat\r\nfrom fileinput import filename\r\nimport time \r\nfrom datetime import timedelta\r\nimport datetime\r\nfrom numpy.core.defchararray import isdigit, isdecimal, isnumeric\r\nfrom decimal import Decimal \r\nimport csv\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom _socket import close\r\nfrom _operator import concat\r\nfrom matplotlib.pyplot import title\r\nfrom unicodedata import numeric\r\nfrom numpy import double\r\nimport logging\r\n\r\n\r\n# my files BAUSTELLE\r\nimport GenerateInputList\r\nimport GenerateSearchList\r\nimport sys \r\n\r\n# try to find Modules RedBlackTree and Skiplist \r\nfrom red_black_tree.RedBlackTree import * \r\nfrom skiplist.SkipList import * \r\nfrom fingerManagement.MinMaxFinger import *\r\nfrom fingerManagement.LazyFinger import *\r\n\r\n# Ziel der funktion, file öffnen, alle Listen auslesen, alle stringsauslesen und in INT convertieren, \r\n# Liste von Listen zurückgeben\r\ndef readMYfile(filename):\r\n with open( filename) as csv_file:\r\n reader=csv.reader (csv_file, delimiter=',', skipinitialspace=True)\r\n \r\n output_list =[]\r\n line_count = 0\r\n \r\n for line in reader:\r\n new_list = []\r\n new_list.append(line)\r\n # entfernen von extra klammern\r\n simple_list = new_list.pop()\r\n # Alle string in liste zu INTEGER\r\n for i in range(0, len(simple_list)):\r\n simple_list[i]= int(simple_list[i])\r\n output_list.append(simple_list)\r\n \r\n line_count +=1\r\n #print(\"\\n -> Done reading File: \", filename)\r\n return output_list\r\n\r\n\r\ndef map_lists(in_listOfLists, se_listOfLists, func):\r\n result = []\r\n while len(in_listOfLists) > 0 and len(se_listOfLists) > 0:\r\n # always use first element\r\n x = in_listOfLists.pop(0)\r\n y = se_listOfLists.pop(0)\r\n \r\n z = func(x, y)\r\n result.append(z)\r\n return result\r\n\r\ndef subListLengh(listOfLists):\r\n lenSublist_Output = [] \r\n for list in listOfLists:\r\n lenSublist_Output.append(len(list))\r\n #print (\"the lenght of every SubList in ListOfLists is\", lenSublist_Output)\r\n return lenSublist_Output \r\n\r\ndef messureTime_INSERT_RedBlackTree (inputList):\r\n if type(inputList) != list:\r\n inputList = inputList.tolist()\r\n #start timer\r\n # start_time = time.monotonic()\r\n bst = RedBlackTree()\r\n start = time.perf_counter_ns()\r\n # init für Blackred tree\r\n \r\n bst.insertMultipleElem(inputList)\r\n \r\n # End Timer \r\n end = time.perf_counter_ns()\r\n delta_time = 0\r\n delta_time = (int(end-start)/(100000))\r\n return int(delta_time)\r\n\r\ndef messureTime_INSERT_SkipList(inputList):\r\n if type(inputList) != list:\r\n inputList = inputList.tolist()\r\n #start timer\r\n delta_time = 0\r\n skl = SkipList()\r\n #start_time = time.monotonic()\r\n start = time.perf_counter_ns()\r\n # init für Skiplist tree\r\n \r\n skl.insertMultipleElem(inputList)\r\n \r\n # End Timer\r\n end = time.perf_counter_ns()\r\n delta_time = (int(end-start)//(100000))\r\n return (int(delta_time))\r\n\r\ndef messureNodes_SEARCH_RedBlackTree(inputList, searchlist):\r\n # formale sache\r\n if type(inputList) != list:\r\n inputList = inputList.tolist()\r\n if type(searchlist) != list:\r\n searchlist = searchlist.tolist() \r\n \r\n # init für Tree und Finger\r\n bst = RedBlackTree()\r\n bst.insertMultipleElem(inputList)\r\n \r\n # Messen\r\n x = bst.usedNodesInSearch\r\n\r\n # Aktion\r\n bst.findMultipleElem(searchlist)\r\n x = bst.usedNodesInSearch\r\n\r\n return int(x)\r\n\r\ndef messureNodes_MinMaxFingerSEARCH_RedBlackTree(inputList, searchlist):\r\n # formale sache\r\n if type(inputList) != list:\r\n inputList = inputList.tolist()\r\n if type(searchlist) != list:\r\n searchlist = searchlist.tolist() \r\n \r\n # init für Tree und Finger\r\n bst = RedBlackTree()\r\n bst.insertMultipleElem(inputList)\r\n mmf = MinMaxFinger()\r\n mmf.maxiFinger = mmf.setMaxiFingerFrom(bst)\r\n mmf.miniFinger = mmf.setMiniFingerFrom(bst)\r\n # Aktion \r\n mmf.findMultipleElem_with_MinMaxFinger(bst, searchlist)\r\n\r\n # Messen\r\n x = (bst.usedNodesInSearch + mmf.usedNodesInSearch)\r\n return int(x)\r\n\r\ndef messureNodes_LAZYFingerSEARCH_RedBlackTree(inputList, searchlist):\r\n # formale sache\r\n if type(inputList) != list:\r\n inputList = inputList.tolist()\r\n if type(searchlist) != list:\r\n searchlist = searchlist.tolist() \r\n \r\n # init für Tree und Finger\r\n bst = RedBlackTree()\r\n bst.insertMultipleElem(inputList)\r\n lf = LazyFinger()\r\n lf.LazyFinger = lf.setfirst_LazyFinger(bst)\r\n \r\n # Aktion \r\n lf.findMultipleElem_with_LazyFinger(bst, searchlist)\r\n \r\n # Messen\r\n x = (bst.usedNodesInSearch + lf.usedNodesInSearch)\r\n return int(x)\r\n\r\ndef messureNodes_SPLAYFingerSEARCH_RedBlackTree(inputList, searchlist):\r\n # formale sache\r\n if type(inputList) != list:\r\n inputList = inputList.tolist()\r\n if type(searchlist) != list:\r\n searchlist = searchlist.tolist() \r\n \r\n # init für Tree und Finger\r\n inputSplay = inputList.copy()\r\n\r\n splay = BinarySplayTree()\r\n splay.insertMultipleElem(inputSplay) \r\n\r\n bst = RedBlackTree()\r\n bst.insertMultipleElem(inputList)\r\n\r\n # Aktion\r\n splay.findMultipleElem_with_SplayTree(bst, searchlist)\r\n \r\n # Messen\r\n x = (bst.usedNodesInSearch + splay.usedNodesInSearch)\r\n return int(x)\r\n\r\n\r\ndef timePerformanceINSERTRedBlackTree(listOfLists):\r\n perf_OutputList = [] \r\n for list in listOfLists:\r\n timeRBT = messureTime_INSERT_RedBlackTree(list)\r\n perf_OutputList.append(timeRBT)\r\n \r\n \r\n print (\"timePerformanceINSERTRedBlackTree will return:\", perf_OutputList)\r\n return perf_OutputList\r\n\r\n# def timePerformanceINSERTSkipList(listOfLists):\r\n # perf_OutputList = [] \r\n # for list in listOfLists:\r\n # timeSKL = messureTime_INSERT_SkipList(list)\r\n # perf_OutputList.append(timeSKL)\r\n # print (\"timePerformanceINSERTSkipList has messured time pro list iteration, and will return:\", perf_OutputList)\r\n # return perf_OutputList\r\n\r\ndef PerformanceSEARCHRedBlackTree(input_listOfLists, search_listOfLists): \r\n res = map_lists(input_listOfLists, search_listOfLists, messureNodes_SEARCH_RedBlackTree)\r\n \r\n print (\"PerformanceSEARCHRedBlackTree will return:\", res)\r\n return res\r\n\r\n# def timePerformanceSEARCHSkipList(input_listOfLists, search_listOfLists):\r\n # res = map_lists(input_listOfLists, search_listOfLists, messureTime_SEARCH_Skiplist)\r\n # reverse list, because of pop() use (witch starts from opposite end)\r\n # res.reverse()\r\n # print (\"timePerformanceSEARCHRedBlackTree has messured time pro list iteration, and will return:\", res)\r\n # return res \r\n\r\ndef PerformanceMinMaxFingerSEARCHRedBlackTree (input_listOfLists, search_listOfLists):\r\n res = map_lists(input_listOfLists, search_listOfLists, messureNodes_MinMaxFingerSEARCH_RedBlackTree)\r\n \r\n print (\"PerformanceMinMaxFingerSEARCHRedBlackTree will return:\", res)\r\n return res \r\n\r\ndef PerformanceLAZYFingerSEARCHRedBlackTree(input_listOfLists, search_listOfLists):\r\n res = map_lists(input_listOfLists, search_listOfLists, messureNodes_LAZYFingerSEARCH_RedBlackTree)\r\n \r\n print (\"PerformanceLAZYFingerSEARCHRedBlackTree will return:\", res)\r\n return res \r\n\r\ndef PerformanceSPLAYFingerSEARCHRedBlackTree(input_listOfLists, search_listOfLists):\r\n #print (\"PerformanceSPLAYFingerSEARCHRedBlackTree started with:\", len(input_listOfLists), len(search_listOfLists))\r\n res = map_lists(input_listOfLists, search_listOfLists, messureNodes_SPLAYFingerSEARCH_RedBlackTree)\r\n \r\n print (\"PerformanceSPLAYFingerSEARCHRedBlackTree will return:\", res)\r\n return res \r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n # Je nach dem wie stark die Rechenleistung ist, bitte begrenzen:\r\n sys.setrecursionlimit(50000)\r\n logging.basicConfig(filename='logFILE-PlotsCounterWindows.log', encoding='utf-8', level=logging.DEBUG)\r\n \r\n #Performance INSERT\r\n # in_listOfLists = readMYfile('inputLists.csv')\r\n # insert_TimeRBT = timePerformanceINSERTRedBlackTree(in_listOfLists)\r\n \r\n # in_listOfLists = readMYfile('inputLists.csv')\r\n # insert_TimeSKL = timePerformanceINSERTSkipList(in_listOfLists)\r\n \r\n #Performance ROOTSEARCH\r\n input_listOfLists = readMYfile('inputLists.csv')\r\n search_listOfLists = readMYfile('searchLists.csv') \r\n search_TimeRBT = PerformanceSEARCHRedBlackTree(input_listOfLists, search_listOfLists)\r\n \r\n # input_listOfLists = readMYfile('inputLists.csv')\r\n # search_listOfLists = readMYfile('searchLists.csv')\r\n # search_TimeSKL = timePerformanceSEARCHSkipList(input_listOfLists, search_listOfLists)\r\n \r\n #Performance FINGER SEARCH\r\n input_listOfLists = readMYfile('inputLists.csv')\r\n search_listOfLists = readMYfile('searchLists.csv')\r\n search_MinMaxFinger_TimeRBT = PerformanceMinMaxFingerSEARCHRedBlackTree(input_listOfLists, search_listOfLists)\r\n \r\n input_listOfLists = readMYfile('inputLists.csv')\r\n search_listOfLists = readMYfile('searchLists.csv')\r\n search_LazyFinger_TimeRBT = PerformanceLAZYFingerSEARCHRedBlackTree(input_listOfLists, search_listOfLists)\r\n \r\n input_listOfLists = readMYfile('inputLists.csv')\r\n search_listOfLists = readMYfile('searchLists.csv')\r\n search_SplayFinger_TimeRBT = PerformanceSPLAYFingerSEARCHRedBlackTree(input_listOfLists, search_listOfLists)\r\n \r\n \r\n # logischer weise hat diese Kennzahl das gleiche format wie List_performanceTime, gut für plot...\r\n # Anzahl der Search Werte auslesen pro Liste < Anzahl der Input werte\r\n listOfLists = readMYfile('searchLists.csv')\r\n numberOfInputValuesRBT = subListLengh(listOfLists)\r\n \r\n listOfLists = readMYfile('searchLists.csv')\r\n numberOfInputValuesSKL = subListLengh(listOfLists)\r\n\r\n \r\n# ------------------------------\r\n# Plot Idea: list lenght and time in Datastructure, global Time\r\n \r\n # Actual plot\r\n # https://www.grund-wissen.de/informatik/python/scipy/matplotlib.html\r\n \r\n fig, ((plt1, plt2), (plt3, plt4)) = plt.subplots(2, 2)\r\n fig.suptitle('Laufzeiten Rotschwarz Baum und Skiplist')\r\n \r\n # Plot 1\r\n #plt1.plot (numberOfInputValuesRBT, insert_TimeRBT, 'ro', linestyle='--', label=r'insert time in RedBlackTree')\r\n #plt1.plot (numberOfInputValuesSKL, insert_TimeSKL, 'mo', linestyle='--', label=r'insert time in SkipList')\r\n \r\n #plt1.set_ylabel('Time in nano sec 10^(-6)')\r\n #plt1.set_xlabel('Number of values per list from CSV')\r\n \r\n # Plot 2\r\n plt2.plot (numberOfInputValuesRBT, search_TimeRBT, 'bo', linestyle='--', label=r'Rootsearch in Tree ') \r\n plt2.plot (numberOfInputValuesRBT, search_MinMaxFinger_TimeRBT, 'go', linestyle='--', label=r'MinMax-Finger-Search in Tree') \r\n plt2.plot (numberOfInputValuesRBT, search_LazyFinger_TimeRBT, 'ko', linestyle='--', label=r'Lazy-Finger-Search in Tree') \r\n plt2.plot (numberOfInputValuesRBT, search_SplayFinger_TimeRBT, 'ro', linestyle='--', label=r'SplayTree-Finger-Search in Tree')\r\n \r\n plt2.set_ylabel('Total number of touched Nodes')\r\n plt2.set_xlabel('Number of Nodes in Datastructure (used from CSV)')\r\n \r\n # Plot 3\r\n # plt3.plot (numberOfInputValuesSKL, search_TimeSKL, 'ko', linestyle='--', label=r'Rootsearch time in SkipList ') \r\n # plt3.set_ylabel('Total number of touched Nodes')\r\n # plt3.set_xlabel('Number of Values from CSV')\r\n \r\n # Plot 4\r\n plt4.plot (numberOfInputValuesRBT, search_TimeRBT, 'bo', linestyle='--', label='Rootsearch in RedBlackTree ') \r\n plt4.plot (numberOfInputValuesRBT, search_SplayFinger_TimeRBT, 'ro', linestyle='--', label='SplayTree-Finger-Search in RedBlackTree') \r\n \r\n plt4.set_ylabel('Total number of touched Nodes')\r\n plt4.set_xlabel('Number of Nodes in Datastructure (used from CSV)')\r\n \r\n \r\n # Legende einblenden:\r\n #plt1.legend(loc='upper left', frameon=True)\r\n plt2.legend(loc='upper left', frameon=True)\r\n #plt3.legend(loc='upper left', frameon=True)\r\n plt4.legend(loc='upper left', frameon=True)\r\n \r\n print (\"Plot is on Display\")\r\n plt.show()\r\n \r\n\r\n","sub_path":"main/PlotsCounterWindows.py","file_name":"PlotsCounterWindows.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"245816979","text":"from django.db import models\n\n# Has: assignments\nclass WeekSchedule(models.Model):\n \"\"\"\n A service schedule for one week in the training.\n \"\"\"\n\n start = models.DateField() # should be the Tuesday of every week\n description = models.TextField(blank=True, null=True)\n # period = models.ForeignKey(Period) # ???Can't we get this from start?\n\n # workload calculations\n workload_margin = models.PositiveSmallIntegerField(default=2)\n\n # exceptions inactive for just this week\n silenced_exceptions = models.ManyToManyField('Exception', blank=True, verbose_name='Exceptions to ignore this week')\n\n #TODO\n # # average workload for this schedule\n @property\n def avg_workload(self):\n return self.instances.all().aggregate(Avg('workload')) / \\\n Worker.objects.filter(active=True)\n\n # Prevent from working way above the average trainee workload (standard of deviation)\n # avg_workload + margin = workload ceiling\n @property\n def workload_ceiling(self):\n return self.avg_workload + self.workload_margin\n\n ## Info on scheduler who created the schedule and info on last modified\n scheduler = models.ForeignKey('accounts.Trainee')\n last_modified = models.DateTimeField(auto_now=True)\n\n @staticmethod\n def current_week_schedule():\n \"\"\" TODO: Return the current week_schedule or if doesn't exist create and return \"\"\"\n return WeekSchedule.objects.latest('start')\n\n def __unicode__(self):\n return 'Week Schedule - ' + str(self.start)\n\n @classmethod\n def create(cls, start, desc, period):\n schedule = cls(start=start, desc=desc, period=period)\n\n # create instances\n for sv in self.period.services:\n inst = Instance(service=sv, period=self.period)\n # since the week starts on Tuesday, add 6 and modulo 7 to get the\n # delta\n inst.date = self.start + \\\n timedelta(days=((int(sv.weekday) + 6) % 7))\n inst.save()\n self.instances.add(inst) # add created instance to this schedule\n\n return schedule\n\n def assign_designated_services(self):\n # assign designated services\n for dsv in self.instances.filter(service__designated=True):\n dsv.workers.add(dsv.service.designated_workers)\n\n def calculate_solution_space(self):\n # calculate solution space\n for worker in Worker.objects.filter(active=True):\n # clear any old eligibility data (e.g. from previous week)\n worker.services_eligible.clear()\n\n # if over workload ceiling, not eligible for any services, so skip\n if worker.workload >= self.workload_ceiling:\n continue\n\n # first assume everyone is eligible for every service\n worker.services_eligible.add(self.instances.all())\n\n # then remove based on exceptions\n worker.services_eligible.remove(worker.services_exempted)\n\n # remove based on gender\n if worker.account.gender == 'B':\n worker.services_eligible.remove(\n self.instances.filter(service__gender='S'))\n else:\n worker.services_eligible.remove(\n self.instances.filter(service__gender='B'))\n\n def assign(self, workers, instance, role='wor', commit=False):\n \"\"\" assign workers to a service instance \"\"\"\n\n warnings = list()\n # convert to list if passed single worker\n if type(workers) is not list: workers = [workers]\n\n for worker in workers:\n # check worker's exceptions against instance\n for exception in worker.exceptions:\n if not exception.checkException(worker, instance):\n warnings.append(LogEvent.exception_violated(\n self, exception, instance, worker))\n\n # check worker's new workload versus workload ceiling\n if (worker.workload + instance.workload) > self.workload_ceiling:\n warnings.append(LogEvent.workload_excessive(\n self, instance, worker, worker.workload + instance.workload))\n\n if commit: # dry-run by default to preview warnings\n Assignment(instance=instance, worker=worker, role=role,\n schedule=self).save() # assign worker to instance\n for warning in warnings:\n warning.save() # write warnings to log\n # recalculate solution space\n if worker.workload > self.workload_ceiling:\n worker.services_eligible.clear()\n else:\n # remove same-day services\n worker.services_eligible.remove(self.instances.filter(date=instance.date))\n worker.save()\n\n return warnings\n\n def unassign(self, worker, instance):\n \"\"\" unassign a worker from a service instance \"\"\"\n\n # delete service assignment\n Assignment.objects.get(instance=instance, worker=worker).delete()\n # restore workload\n worker.workload -= instance.workload\n\n if worker.workload > self.workload_ceiling:\n worker.save()\n return # terminate early\n\n # otherwise, rebuild solution space for this worker:\n # add all services again\n worker.services_eligible.add(self.instances.all())\n # then remove based on exceptions\n worker.services_eligible.remove(worker.services_exempted)\n # remove based on gender\n if worker.account.gender == 'B':\n worker.services_eligible.remove(\n self.instances.filter(service__gender='S'))\n else:\n worker.services_eligible.remove(\n self.instances.filter(service__gender='B'))\n\n # then simulate reassigning current services\n for inst in worker.instance_set:\n worker.services_eligible.remove(self.instances.filter(date=inst.date))\n\n\n def heuristic(self, instance, pick=1):\n \"\"\" heuristic to choose a worker from an instance's eligible workers \"\"\"\n\n workers=instance.workers_eligible.annotate(\n num_eligible=Count('services_eligible'))\n\n # sort by:\n # how many services the trainee is elilgible for\n # trainee's current workload\n workers.order_by('workload', 'num_eligible')\n return workers[:pick]\n\n\n def fill(self, instances):\n \"\"\" takes a list of instances and automatically assigns workers to them \"\"\"\n\n # yes, i know nested loops are bad.\n while not instances:\n # sorts instances by number of eligilble workers\n instance=instances.sort(\n key=lambda inst: inst.workers_eligible.count()).pop()\n while not instance.filled and instance.workers_eligible > 0:\n if instance.workers_eligible <= instance.workers_needed:\n # assign everyone if not enough workers\n assign(instance.workers_eligible, instance, commit=True)\n else:\n assign(heuristic(instance, pick=1), instance, commit=True)\n\n def validate(self):\n \"\"\" validate this schedule, report any warnings \"\"\"\n LogEvent.info(self, \"beginning validation\").save()\n\n # check instances are filled\n for instance in self.instances:\n if not instance.filled:\n LogEvent.instance_unfilled(self, instance)\n else:\n continue\n\n for worker in Workers.objects.filter(active=True):\n # check each workers assignments against exceptions\n if worker.services_exempted and worker.assignment_set.filter(schedule=self).values('service'):\n pass\n # issue exception warnings\n\n # check workload ceilings\n if worker.workload > self.workload_ceiling:\n LogEvent.workload_excessive(self, worker).save()\n\n def finalize(self):\n Workers.objects.filter(active=True).update(weeks=F('weeks') + 1)\n self.validate()\n","sub_path":"ap/services/models/week_schedule.py","file_name":"week_schedule.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"262181069","text":"\"\"\"\nlinear_least_squares.py\n\nhttps://en.wikipedia.org/wiki/Linear_least_squares\n\"\"\"\n\nimport numpy as np\n\ndef ordinary_least_squares_estimate(h_mat, y_mat):\n # convert to numpy arrays\n h_mat = np.array(h_mat)\n y_mat = np.array(y_mat)\n estimator = get_ordinary_least_squares_estimator(h_mat)\n est = np.matmul(estimator, y_mat)\n return est\n\ndef get_ordinary_least_squares_estimator(h_mat):\n h_mat = np.array(h_mat) \n estimator = np.matmul(np.linalg.inv(np.matmul(h_mat.T, h_mat)), h_mat.T)\n return h_mat\n\ndef generalized_least_squares_estimate(h_mat, y_mat, cov_mat):\n h_mat = np.array(h_mat)\n y_mat = np.array(y_mat)\n cov_mat = np.array(cov_mat)\n estimator = get_generalized_least_squares_estimator(h_mat, cov_mat)\n est = np.matmul(estimator, y_mat)\n return est\n\ndef get_generalized_least_squares_estimator(h_mat, cov_mat):\n h_mat = np.array(h_mat)\n cov_mat = np.array(cov_mat)\n weights = np.linalg.inv(cov_mat)\n inv = np.linalg.inv(np.matmul(np.matmul(h_mat.T, weights), h_mat))\n estimator = np.matmul(np.matmul(inv, h_mat.T), weights)\n return estimator\n","sub_path":"estimation/linear_least_squares.py","file_name":"linear_least_squares.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"303754108","text":"# Project Euler --> https://projecteuler.net/problem=4\n# Problem 4 : Largest palindrome product\n# A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\n# Find the largest palindrome made from the product of two 3-digit numbers.\n\nn = 0\nfor a in range(999, 100, -1):\n for b in range(a, 500, -1):# i used 500 to save time and avoid duplication\n x = a * b\n if x > n: # we check if x is palindrome and if it is we will store it in n\n s = str(x)\n if s == s[::-1]:\n n = x\nprint(n)\n","sub_path":"p4_Largest_palindrome_product.py","file_name":"p4_Largest_palindrome_product.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"382626506","text":"# HYDROGEN_S - calculate hydrogen in spherical coordinates\n# beta is B/Bc, M is number of mu, N is number of radial\n# the wavefunction is phi=f(r, mu=cos(theta))/r exp(i m phi)\n# eigenfunctions in V, e-val in Lam, coords in (w,rs)\n\nfrom cheb import cheb\nimport numpy as np\nfrom scipy.linalg import eig\n\ndef hydrogen_s(beta,M,N,mphi):\n# w coordinate, ranging from -1 to 1\n Dw,w = cheb(M)\n D2w = np.dot(Dw,Dw)\n if (mphi!=0):\n w=w[1:-1]\n Dw=Dw[1:-1,1:-1]\n D2w=D2w[1:-1,1:-1]\n Hw=-np.dot(np.diag(1-w*w),D2w)+np.dot(np.diag(2.*w),Dw);\n# r coordinate, ranging from 1 to -1, rp from 1 to 0\n D,r = cheb(N)\n rp = 0.5*(r+1)\n D = 2*D\n D2 = np.dot(D,D)\n hh = np.diag(1-rp*rp)\n D2 = np.dot(hh,( np.dot(hh,D2)+ np.dot(np.diag(-2*r),D)))\n D = np.dot(hh,D)\n D2 = D2[1:-1,1:-1]\n D = D[1:-1,1:-1]\n rp = rp[1:-1]\n# zoom factor: set by coulomb and larmor radius; , rs from inf to 0\n zoom=1/(1.0/110.0+beta**0.5/41)\n rs=zoom*np.arctanh(rp)\n R = np.diag(1/rs)\n R2 = np.diag(1/(rs*rs))\n Hr=-1/(zoom*zoom)*D2-2*R\n ww,rr = np.meshgrid(w,rs)\n rr = np.ravel(rr)\n ww = np.ravel(ww)\n rperp2=rr*rr*(1-ww*ww)\n if (mphi==0):\n H = np.kron(Hr,np.eye(M+1))+np.kron(R2,Hw)+np.diag(beta*beta*rperp2)\n else:\n H = np.kron(Hr,np.eye(M-1))+np.kron(R2,Hw)+np.diag(beta*beta*rperp2)+np.diag(mphi*mphi/rperp2)\n Lam,V = eig(H)\n ii=np.argsort(Lam)\n Lam = Lam[ii]\n Lam = Lam+2*beta*(mphi-1)\n V = V[ii]\n# check outer B.C. and for bound states\n igood=0\n# igood = find((V(1,:).*V(1,:))'<(M*N)^(-2)*1e-4 & Lam<0);\n# \n return V,Lam,w,rs,igood,zoom\n","sub_path":"hydrogen_s.py","file_name":"hydrogen_s.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"524665868","text":"import json\nimport os\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom stucture import Dataset\n\n\ndef get_project_root() -> Path:\n return Path(__file__).parent # nb of .parent calls depends on file location\n\n\ndef load_dataset_raw(i):\n \"\"\" Load and return Dataset nb i \"\"\"\n with open(os.path.join(get_project_root(), f'data/{i:d}.json')) as f:\n data = json.load(f)\n return data\n\n\ndef load_datasets_raw(i):\n \"\"\" Load and return Datasets \"\"\"\n datasets = []\n for i in range(1, 7):\n datasets.append(load_dataset_raw(i))\n return datasets\n\n\ndef load_dataset(i):\n \"\"\" Load and return Dataset nb i \"\"\"\n with open(os.path.join(get_project_root(), f'data/{i:d}.json')) as f:\n data = json.load(f)\n quotas = data['quotas']\n workers = data['workers']\n dataset = Dataset(i, quotas, workers)\n return dataset\n\n\ndef load_datasets():\n \"\"\" Load and return the list of the 6 Datasets \"\"\"\n\n datasets = []\n for i in range(1, 7):\n datasets.append(load_dataset(i))\n return datasets\n\n\ndef get_score(solution, dataset):\n import collections\n peoplePerTrain = collections.Counter()\n for worker in solution:\n morningOptions = dataset['workers'][worker['name']]['morningOptions']\n for train in morningOptions[worker['morningOptionIndex']]:\n peoplePerTrain[train] += 1\n eveningOptions = dataset['workers'][worker['name']]['eveningOptions']\n for train in eveningOptions[worker['eveningOptionIndex']]:\n peoplePerTrain[train] += 1\n return sum(v * (v - 1) for v in peoplePerTrain.values())\n\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\n\ndef write_solution(solution, i, prefix=''):\n with open(os.path.join(get_project_root(), f'solution/{prefix}_{i:d}.json'), 'w') as file:\n file.write(json.dumps(solution, cls=NpEncoder))\n\n\ndef write_solutions(solutions, prefix=''):\n for i, solution in enumerate(solutions):\n with open(os.path.join(get_project_root(), f'solution/{i+1:d}.txt'), 'w') as file:\n file.write(json.dumps(solution))\n","sub_path":"AXAcodeChallenge/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"520844004","text":"from typing import List\nimport bisect\nimport collections\n\ndef accountsMerge(accounts: List[List[str]]) -> List[List[str]]:\n adj = [set() for _ in accounts]\n emails = {} # email -> index\n for i, account in enumerate(accounts):\n for e in account[1:]:\n if e in emails: \n adj[i].add(emails[e])\n adj[emails[e]].add(i) # bi-directional\n emails[e] = i\n\n s, res = [], []\n for i in reversed(range(len(adj))):\n if adj[i] is not None:\n res.append([])\n s.append(i)\n while s:\n cur = s.pop()\n if adj[cur] is not None:\n for e in accounts[cur][1:]:\n if emails[e] != -1:\n bisect.insort_left(res[-1], e)\n emails[e] = -1\n for n in adj[cur]: s.append(n)\n adj[cur] = None\n res[-1][0:0] = [accounts[i][0]]\n\n return res\n","sub_path":"graph/dfs/accounts_merge/accounts_merge.py","file_name":"accounts_merge.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"364315292","text":"\"\"\"A tiny Python program to check that nginx is running and start it if not.\nTry running this program from the command line like this:\n python3 start_webserver.py\n\"\"\"\n\nimport subprocess\n\n\ndef checknginx():\n try:\n cmd = 'ps -A | grep nginx'\n\n subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(\"Nginx Server IS running\")\n\n except subprocess.CalledProcessError:\n print(\"Nginx Server IS NOT running\")\n\n\n# Define a main() function.\ndef main():\n checknginx()\n\n\n# This is the standard boilerplate that calls the main() function.\nif __name__ == '__main__':\n main()\n","sub_path":"check_webserver.py","file_name":"check_webserver.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"640701134","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for tna_bookshop_webcrawler project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# https://doc.scrapy.org/en/latest/topics/settings.html\n# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'tna_bookshop_webcrawler'\n\nSPIDER_MODULES = ['tna_bookshop_webcrawler.spiders']\nNEWSPIDER_MODULE = 'tna_bookshop_webcrawler.spiders'\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\nUSER_AGENT = 'tna_bookshop_webcrawler (+http://www.nationalarchives.gov.uk/)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = True\n\nDEPTH_LIMIT = 100\nDOWNLOAD_DELAY = 0.2\n\n#LOG_LEVEL = 'INFO'\n#LOG_FILE = 'bookshop_dev.log'\n\nCOOKIES_ENABLED = False\nRETRY_ENABLED = False\n\n\n# ITEM_PIPELINES = {'scrapyelasticsearch.scrapyelasticsearch.ElasticSearchPipeline': 100,\n# 'tna_bookshop_webcrawler.pipelines.MongoDBPipeline': 200}\n\nITEM_PIPELINES = {'scrapyelasticsearch.scrapyelasticsearch.ElasticSearchPipeline': 100}\n\n# ITEM_PIPELINES = {'tna_bookshop_webcrawler.pipelines.MongoDBPipeline': 200}\n\n\n\"MONGODB PIPELINE SETTINGS\"\nMONGODB_SERVER = \"localhost\"\nMONGODB_PORT = 27017\nMONGODB_DB = 'tna_webcrawlers'\nMONGODB_COLLECTION = 'tna_bookshop'\nMONGODB_UNIQ_KEY = 'ID'\nMONGODB_ITEM_ID_FIELD = '_id'\n\n\"ELASTIC PIPELINE SETTINGS\"\nELASTICSEARCH_SERVERS = ['localhost']\nELASTICSEARCH_INDEX = 'discovery_website_dev'\nELASTICSEARCH_TYPE = 'tnawebpageassetview'\nELASTICSEARCH_UNIQ_KEY = 'ID'\n\n\n'''\"SOLR PIPELINE SETTINGS\"\nSOLR_URL = 'http://localhost:8983/solr/website_dev'\nSOLR_MAPPING = {\n 'id': 'url',\n 'CONTENT': 'content',\n 'DOCREFERENCE': 'url',\n 'TITLE': 'title',\n 'CATEGORY': 'category',\n 'TYPE': 'type',\n 'SOURCE': 'source',\n 'KEYWORDS': 'keywords',\n 'MODIFIED': 'modified',\n 'SORTDATE': 'sortdate'\n }'''","sub_path":"webcrawlers/tna_bookshop_webcrawler/tna_bookshop_webcrawler/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"146638253","text":"from baseTFLearnModel import BaseTFLearnModel\n\nfrom tflearn import DNN\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\n\nclass CNNModel3D(BaseTFLearnModel):\n\tdef __init__(self, config):\n\t\tsuper().__init__(config)\n\t\n\tdef createModel(self):\n\t\tprint(\"Creating model...\")\n\t\timageSize = self.config.sequenceLength\n\t\tnbClasses = self.config.nbClasses\n\t\tnChannels = self.config.nChannels\n\n\t\tnet = input_data(shape=[None, imageSize, imageSize, nChannels], name='input')\n\n\t\tnet = conv_2d(net, 8, 2, activation='elu', weights_init=\"Xavier\", name='conv1')\n\t\tnet = max_pool_2d(net, 2)\n\n\t\tnet = conv_2d(net, 4, 4, activation='elu', weights_init=\"Xavier\", name='conv2')\n\t\tnet = max_pool_2d(net, 2)\n\n\t\tnet = conv_2d(net, 2, 8, activation='elu', weights_init=\"Xavier\", name='conv3')\n\t\tnet = max_pool_2d(net, 2)\n\n\t\tnet = conv_2d(net, 1, 16, activation='elu', weights_init=\"Xavier\", name='conv4')\n\t\tnet = max_pool_2d(net, 2)\n\n\t\tnet = fully_connected(net, imageSize*8, activation='elu')\n\t\tnet = dropout(net, 0.5)\n\n\t\tnet = fully_connected(net, nbClasses, activation='softmax')\n\t\tnet = regression(net, optimizer='rmsprop', loss='categorical_crossentropy')\n\n\t\tself.model = DNN(net)\n\t\tprint(\"Model created!\")\n\n\tdef datasetName(self):\n\t\treturn 'CNNModel'","sub_path":"modelCNN3D.py","file_name":"modelCNN3D.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"559220619","text":"#! /usr/bin/env python\n\nimport re\n\n# for mapsplice2\nReFus_ms2 = re.compile('FUS_(\\d+)_(\\d+)\\(([\\-\\+])([\\-\\+])\\)')\n\n# for STAR\ncigarSRe_right = re.compile('(\\d+)S$')\ncigarSRe_left = re.compile('^(\\d+)S')\n\nabnormal_insert_size = 500000\nmin_major_clip_size = 15 \n\n\ndef getFusInfo_ms2(tempLine, fusInfo):\n\n # check the fusion validity\n ufusInfo = list(set(fusInfo))\n for fus in ufusInfo:\n if fus == 0: continue\n\n chr_primary, pos_primary, dir_primary, chr_pair, pos_pair, dir_pair, chr_chimera, pos_chimera, dir_chimera = \"*\", \"*\", \"*\", \"*\", \"*\", \"*\", \"*\", \"*\", \"*\"\n mq_primary, cover_primary, mq_pair, cover_pair, mq_chimera, cover_chimera = \"*\", \"*\", \"*\", \"*\", \"*\", \"*\"\n # index showing the order of fusion \n # 1: FUS_(breakPos_primary)_(breakPos_chimera)_(breakDir_primary)_(breakDir_chimera),\n # -1: FUS_(breakPos_chimera)_(breakPos_primary)_(breakDir_chimera)_(breakDir_primary)\n fusOrder = 0\n \n for i in range(0, len(tempLine)):\n \n FF = tempLine[i].split('\\t')\n flags = format(int(FF[1]), \"#014b\")[:1:-1]\n if flags[8] == \"1\" and fusInfo[i] == fus:\n chr_chimera = FF[2]\n pos_chimera = FF[3]\n dir_chimera = (\"+\" if flags[4] != \"1\" else \"-\")\n mq_chimera = FF[4]\n cover_chimera = myCigar.getCoverRegion(FF[2], FF[3], FF[5])\n if fusOrder == 0: fusOrder = -1\n elif flags[8] != \"1\" and fusInfo[i] == fus:\n chr_primary = FF[2]\n pos_primary = FF[3]\n dir_primary = (\"+\" if flags[4] != \"1\" else \"-\")\n mq_primary = FF[4]\n cover_primary = myCigar.getCoverRegion(FF[2], FF[3], FF[5])\n if fusOrder == 0: fusOrder = 1\n elif flags[8] != \"1\" and flags[2] != \"1\" and (fusInfo[i] != fus or chr_primary != \"*\"):\n chr_pair = FF[2]\n pos_pair = FF[3]\n dir_pair = (\"+\" if flags[4] != \"1\" else \"-\")\n mq_pair = FF[4]\n cover_pair = myCigar.getCoverRegion(FF[2], FF[3], FF[5])\n\n if chr_primary == \"*\": continue\n fusSplit = fus.split(',')\n if fusOrder == 1:\n breakPos_primary, breakPos_chimera, breakDir_primary, breakDir_chimera = fusSplit[0], fusSplit[1], fusSplit[2], fusSplit[3]\n breakDir_chimera = (\"+\" if breakDir_chimera == \"-\" else \"-\") \n else:\n breakPos_primary, breakPos_chimera, breakDir_primary, breakDir_chimera = fusSplit[1], fusSplit[0], fusSplit[3], fusSplit[2]\n breakDir_primary = (\"+\" if breakDir_primary == \"-\" else \"-\")\n\n pairPos = 0\n if pos_pair != \"*\":\n if breakDir_primary == \"+\" and dir_pair == \"+\" and int(breakPos_primary) - 500000 <= int(pos_pair) <= int(breakPos_primary): pairPos = 1\n if breakDir_primary == \"-\" and dir_pair == \"-\" and int(breakPos_primary) <= int(pos_pair) <= int(breakPos_primary) + 500000: pairPos = 1\n if breakDir_chimera == \"+\" and dir_pair == \"+\" and int(breakPos_chimera) - 500000 <= int(pos_pair) <= int(breakPos_chimera): pairPos = 2\n if breakDir_chimera == \"-\" and dir_pair == \"-\" and int(breakPos_chimera) <= int(pos_pair) <= int(breakPos_chimera) + 500000: pairPos = 2\n\n if chr_primary < chr_chimera or chr_primary == chr_chimera and breakPos_primary <= breakPos_chimera:\n return '\\t'.join([chr_primary, breakPos_primary, breakDir_primary, chr_chimera, breakPos_chimera, breakDir_chimera, \"---\", tempID, \\\n mq_primary, cover_primary, dir_primary, mq_pair, cover_pair, dir_pair, \\\n mq_chimera, cover_chimera, dir_chimera, str(pairPos), \"1\"] )\n else:\n return '\\t'.join([chr_chimera, breakPos_chimera, breakDir_chimera, chr_primary, breakPos_primary, breakDir_primary, \"---\", tempID, \\\n mq_primary, cover_primary, dir_primary, mq_pair, cover_pair, dir_pair, \\\n mq_chimera, cover_chimera, dir_chimera, str(pairPos), \"2\"] )\n\n\n\n\n\ndef parseJunctionInfo_ms2(inputFilePath, outputFilePath):\n\n \"\"\"\n script for collecting short reads supporting fusion candidates in MapSplice2 sam file\n \"\"\"\n\n hIN = open(inputFilePath, 'r')\n hOUT = open(outputFilePath, 'w')\n\n tempID = \"\"\n fusFlag = [] \n fusInfo = []\n tempLine = []\n\n for line in hIN:\n if line[0] == \"@\": continue\n line = line.rstrip('\\n')\n F = line.split('\\t')\n\n if tempID != F[0]:\n if fusInfo.count(0) != len(tempLine):\n print > hOUT, getFusInfo_ms2(tempLine, fusInfo)\n\n tempID = F[0]\n fusFlag = []\n fusInfo = [] \n tempLine = [] \n \n tempLine.append(line)\n mFus = ReFus.search('\\t'.join(F[11:]))\n if mFus is not None:\n fusFlag.append(1)\n fusInfo.append(','.join([mFus.group(1), mFus.group(2), mFus.group(3), mFus.group(4)]))\n else:\n fusFlag.append(0)\n fusInfo.append(0)\n\n hIN.close()\n\n\n if fusInfo.count(0) != len(tempLine):\n print > hOUT, getFusInfo_ms2(tempLine, fusInfo)\n\n hOUT.close()\n\n\n\ndef getFusInfo_STAR(juncLine):\n\n \"\"\"\n function for organizing and print junction information\n \"\"\"\n\n # be reminded necessary variables\n chr_primary, pos_primary, dir_primary, chr_pair, pos_pair, dir_pair, chr_SA, pos_SA, dir_SA = \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"\n mq_primary, coverRegion_primary, mq_pair, coverRegion_pair, mq_SA, coverRegion_SA = \"\", \"\", \"\", \"\", \"\", \"\"\n juncChr_primary, juncPos_primary, juncDir_primary, juncChr_SA, juncPos_SA, juncDir_SA = \"\", \"\", \"\", \"\", \"\", \"\" \n\n # collect about the supplementary alingment information\n right_clipping_SA = 0\n left_clipping_SA = 0\n flags_SA = \"\"\n for line in juncLine:\n F = line.split('\\t')\n flags = format(int(F[1]), \"#014b\")[:1:-1] \n if flags[8] != \"1\": continue\n\n if chr_SA != \"\":\n print >> sys.stderr, \"Multiple supplementary alignment at:\" + '\\n' + '\\n'.join(juncLine)\n\n chr_SA = F[2]\n juncChr_SA = F[2]\n pos_SA = int(F[3])\n dir_SA = (\"-\" if flags[4] == \"1\" else \"+\")\n mq_SA = F[4]\n coverRegion_SA = myCigar.getCoverRegion(F[2], F[3], F[5])\n endPos_SA = myCigar.getEndPos(pos_SA, F[5])\n\n flags_SA = flags\n if flags_SA[6] == flags_SA[7]: print >> sys.stderr, \"The supplementary Read is both first and second reads at:\" + '\\n' + '\\n'.join(juncLine)\n\n tempMatch = cigarSRe_right.search(F[5])\n if tempMatch is not None: right_clipping_SA = int(tempMatch.group(1))\n\n tempMatch = cigarSRe_left.search(F[5])\n if tempMatch is not None: left_clipping_SA = int(tempMatch.group(1))\n\n # collect information about primary junction read and its pair read\n right_clipping_primary = 0\n left_clipping_primary = 0\n cigar_primary = \"\"\n readLength_primary = 0\n readID_primary = \"\"\n for line in juncLine:\n F = line.split('\\t')\n flags = format(int(F[1]), \"#014b\")[:1:-1]\n if flags[8] == \"1\": continue\n\n # primary junction read\n if flags[6] == flags_SA[6] and flags[7] == flags_SA[7]:\n chr_primary = F[2]\n juncChr_primary = F[2]\n pos_primary = int(F[3])\n dir_primary = (\"-\" if flags[4] == \"1\" else \"+\")\n mq_primary = F[4]\n coverRegion_primary = myCigar.getCoverRegion(F[2], F[3], F[5])\n readLength_primary = len(F[9])\n endPos_primary = myCigar.getEndPos(pos_primary, F[5])\n readID_primary = F[0] + (\"/1\" if flags[6] == \"1\" else \"/2\")\n\n tempMatch = cigarSRe_right.search(F[5])\n if tempMatch is not None: right_clipping_primary = int(tempMatch.group(1))\n \n tempMatch = cigarSRe_left.search(F[5])\n if tempMatch is not None: left_clipping_primary = int(tempMatch.group(1))\n\n elif flags[6] == flags_SA[7] and flags[7] == flags_SA[6]:\n chr_pair = F[2]\n juncChr_pair = F[2]\n pos_pair = int(F[3])\n dir_pair = (\"-\" if flags[4] == \"1\" else \"+\")\n mq_pair = F[4]\n coverRegion_pair = myCigar.getCoverRegion(F[2], F[3], F[5])\n else:\n print >> sys.stderr, \"The following read is both first and second reads at:\" + '\\n' + line\n\n\n if right_clipping_primary >= min_major_clip_size:\n\n clipLen_primary = right_clipping_primary\n\n juncChr_primary = chr_primary\n juncPos_primary = endPos_primary\n juncDir_primary = \"+\"\n\n expected_clipLen_SA = readLength_primary - clipLen_primary\n expected_clipDir_SA = (\"-\" if dir_primary== dir_SA else \"+\")\n\n validFlag = 0\n juncDir_SA = \"\"\n juncPos_SA = \"\"\n\n # the pair read is aligned at the same chromosome with the primary read\n if dir_primary == \"-\" and dir_pair == \"+\" and chr_primary == chr_pair and 0 <= pos_primary - pos_pair < abnormal_insert_size:\n\n if dir_SA == \"+\" and expected_clipDir_SA == \"+\" and right_clipping_SA > 0:\n clipLen_SA = right_clipping_SA\n juncDir_SA = \"+\"\n juncPos_SA = endPos_SA\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA - (expected_clipLen_SA - clipLen_SA)\n juncType = 1\n validFlag = 1\n\n if dir_SA == \"-\" and expected_clipDir_SA == \"-\" and left_clipping_SA > 0:\n clipLen_SA = left_clipping_SA\n juncDir_SA = \"-\"\n juncPos_SA = int(pos_SA)\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA + (expected_clipLen_SA - clipLen_SA)\n juncType = 1\n validFlag = 1\n\n # when the supplementary read is aligned on the same chromosome with the paired read\n if dir_primary == \"+\" and chr_SA == chr_pair:\n\n if dir_SA == \"-\" and dir_pair == \"+\" and 0 <= pos_SA - pos_pair < abnormal_insert_size and expected_clipDir_SA == \"+\" and right_clipping_SA > 0:\n clipLen_SA = right_clipping_SA\n juncDir_SA = \"+\"\n juncPos_SA = endPos_SA\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA - (expected_clipLen_SA - clipLen_SA)\n juncType = 2\n validFlag = 1\n\n if dir_SA == \"+\" and dir_pair == \"-\" and 0 <= pos_pair - pos_SA < abnormal_insert_size and expected_clipDir_SA == \"-\" and left_clipping_SA > 0:\n clipLen_SA = left_clipping_SA\n juncDir_SA = \"-\"\n juncPos_SA = pos_SA\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA + (expected_clipLen_SA - clipLen_SA)\n juncType = 2\n validFlag = 1\n\n if validFlag == 1:\n\n juncSurplus = \"---\"\n if clipLen_SA > expected_clipLen_SA:\n surPlus_start = readLength_primary - clipLen_primary\n surPlus_end = surPlus_start + clipLen_SA - expected_clipLen_SA\n juncSurplus = read.seq[surPlus_start:surPlus_end]\n\n # reorder by the chromosome position and print\n if juncChr_primary < juncChr_SA or juncChr_primary == juncChr_SA and juncPos_primary <= juncPos_SA:\n return '\\t'.join([juncChr_primary, str(juncPos_primary), juncDir_primary, juncChr_SA, str(juncPos_SA), juncDir_SA, \\\n juncSurplus, readID_primary, mq_primary, coverRegion_primary, dir_primary, \\\n mq_pair, coverRegion_pair, dir_pair, mq_SA, coverRegion_SA, dir_SA, str(juncType) , \"1\"])\n\n else:\n return '\\t'.join([juncChr_SA, str(juncPos_SA), juncDir_SA, juncChr_primary, str(juncPos_primary), juncDir_primary, \\\n juncSurplus, readID_primary, mq_primary, coverRegion_primary, dir_primary, \\\n mq_pair, coverRegion_pair, dir_pair, mq_SA, coverRegion_SA, dir_SA, str(juncType) , \"2\"])\n\n\n if left_clipping_primary >= min_major_clip_size:\n\n clipLen_primary = left_clipping_primary\n juncChr_primary = chr_primary\n juncPos_primary = int(pos_primary)\n juncDir_primary = \"-\"\n juncChr_SA = chr_SA\n\n expected_clipLen_SA = readLength_primary - clipLen_primary\n expected_clipDir_SA = (\"+\" if dir_primary== dir_SA else \"-\")\n\n validFlag = 0\n juncDir_SA = \"\"\n juncPos_SA = \"\"\n # the pair read is aligned at the same chromosome with the primary read\n if dir_primary == \"+\" and dir_pair == \"-\" and chr_primary == chr_pair and 0 <= pos_pair - pos_primary < abnormal_insert_size:\n\n if dir_SA == \"+\" and expected_clipDir_SA == \"+\" and right_clipping_SA > 0:\n clipLen_SA = right_clipping_SA\n juncDir_SA = \"+\"\n juncPos_SA = endPos_SA\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA - (expected_clipLen_SA - clipLen_SA)\n juncType = 1\n validFlag = 1\n\n if dir_SA == \"-\" and expected_clipDir_SA == \"-\" and left_clipping_SA > 0:\n clipLen_SA = left_clipping_SA\n juncDir_SA = \"-\"\n juncPos_SA = pos_SA\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA + (expected_clipLen_SA - clipLen_SA)\n juncType = 1\n validFlag = 1\n\n if dir_primary == \"-\" and chr_SA == chr_pair:\n\n if dir_SA == \"-\" and dir_pair == \"+\" and 0 <= pos_SA - pos_pair < abnormal_insert_size and expected_clipDir_SA == \"+\" and right_clipping_SA > 0:\n clipLen_SA = right_clipping_SA\n juncDir_SA = \"+\"\n juncPos_SA = endPos_SA \n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA - (expected_clipLen_SA - clipLen_SA)\n juncType = 2\n validFlag = 1\n\n if dir_SA == \"+\" and dir_pair == \"-\" and 0 <= pos_pair - pos_SA < abnormal_insert_size and expected_clipDir_SA == \"-\" and left_clipping_SA:\n clipLen_SA = left_clipping_SA\n juncDir_SA = \"-\"\n juncPos_SA = pos_SA\n if clipLen_SA < expected_clipLen_SA: juncPos_SA = juncPos_SA + (expected_clipLen_SA - clipLen_SA)\n juncType = 2\n validFlag = 1\n\n\n if validFlag == 1:\n\n juncSurplus = \"---\"\n if clipLen_SA > expected_clipLen_SA:\n surPlus_end = clipLen_primary # this is right\n surPlus_start = surPlus_end - (clipLen_SA - expected_clipLen_SA)\n juncSurplus = read.seq[surPlus_start:surPlus_end]\n\n # reorder by the chromosome position and print\n if juncChr_primary < juncChr_SA or juncChr_primary == juncChr_SA and juncPos_primary <= juncPos_SA:\n return '\\t'.join([juncChr_primary, str(juncPos_primary), juncDir_primary, juncChr_SA, str(juncPos_SA), juncDir_SA, \\\n juncSurplus, readID_primary, mq_primary, coverRegion_primary, dir_primary, \\\n mq_pair, coverRegion_pair, dir_pair, mq_SA, coverRegion_SA, dir_SA, str(juncType) , \"1\"])\n \n else: \n return '\\t'.join([juncChr_SA, str(juncPos_SA), juncDir_SA, juncChr_primary, str(juncPos_primary), juncDir_primary, \\\n juncSurplus, readID_primary, mq_primary, coverRegion_primary, dir_primary, \\\n mq_pair, coverRegion_pair, dir_pair, mq_SA, coverRegion_SA, dir_SA, str(juncType) , \"2\"])\n\n\n\ndef parseJuncInfo_STAR(inputFilePath, outputFilePath):\n\n hIN = open(inputFilePath, 'r')\n hOUT = open(outputFilePath, 'w')\n\n tempID = \"\"\n tempLine = []\n for line in hIN:\n line = line.rstrip('\\n')\n F = line.split('\\t')\n if F[0][0] == \"@\": continue\n\n if tempID != F[0]:\n if tempID != \"\" and len(tempLine) == 3:\n print >> hOUT, getJuncInfo_STAR(tempLine)\n\n tempID = F[0]\n tempLine = []\n\n tempLine.append(line)\n\n hIN.close()\n\n\n if tempID != \"\" and len(tempLine) == 3:\n print >> hOUT, getJuncInfoSTAR(tempLine)\n\n hOUT.close()\n\n\n\ndef clusterJuncInfo(inputFilePath, outputFilePath):\n\n hIN = open(inputFilePath, 'r')\n hOUT = open(outputFilePath, 'w')\n\n tempKey = \"\"\n for line in hIN:\n F = line.rstrip('\\n').split('\\t')\n key = '\\t'.join(F[0:6])\n\n if tempKey != key:\n if tempKey != \"\":\n print >> hOUT,'\\t'.join([tempKey, ';'.join(tempInseq), ';'.join(tempIDs), ';'.join(tempMQ_primary), ';'.join(tempCover_primary), ';'.join(tempDir_primary), \\\n ';'.join(tempMQ_pair), ';'.join(tempCover_pair), ';'.join(tempDir_pair), \\\n ';'.join(tempMQ_SA), ';'.join(tempCover_SA), ';'.join(tempDir_SA), ';'.join(tempPairPos), ';'.join(tempPrimaryPos)])\n \n\n tempKey = key\n tempIDs = []\n tempInseq = []\n tempMQ_primary = []\n tempCover_primary = []\n tempDir_primary = []\n tempMQ_pair = []\n tempCover_pair = []\n tempDir_pair = []\n tempMQ_SA = []\n tempCover_SA = []\n tempDir_SA = []\n tempPairPos = []\n tempPrimaryPos = []\n\n tempInseq.append(F[6])\n tempIDs.append(F[7])\n tempMQ_primary.append(F[8])\n tempCover_primary.append(F[9])\n tempDir_primary.append(F[10])\n tempMQ_pair.append(F[11])\n tempCover_pair.append(F[12])\n tempDir_pair.append(F[13])\n tempMQ_SA.append(F[14])\n tempCover_SA.append(F[15])\n tempDir_SA.append(F[16])\n tempPairPos.append(F[17])\n tempPrimaryPos.append(F[18])\n\n hIN.close()\n\n print >> hOUT, '\\t'.join([tempKey, ';'.join(tempInseq), ';'.join(tempIDs), ';'.join(tempMQ_primary), ';'.join(tempCover_primary), ';'.join(tempDir_primary), \\\n ';'.join(tempMQ_pair), ';'.join(tempCover_pair), ';'.join(tempDir_pair), \\\n ';'.join(tempMQ_SA), ';'.join(tempCover_SA), ';'.join(tempDir_SA), ';'.join(tempPairPos), ';'.join(tempPrimaryPos)])\n\n hOUT.close()\n\n\n","sub_path":"lib/fusionfusion/parseJunctionInfo.py","file_name":"parseJunctionInfo.py","file_ext":"py","file_size_in_byte":18498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"296919866","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport glob\nimport os\nimport json\nimport re\nfrom ipykernel import comm\njson_body = []\nmeasurement={}\ntags ={}\ntagsSmooth ={}\ntagsStreamIndex={}\ntagsStreamIndexAudio= {}\nregex = re.compile(r\"[0-9]*[a-zA-Z]+[0-9]*\")\n\nfields={}\n\nclass Traiter:\n \n def __init__(self, manifestJSONFile):\n #self.manifestJSONFile = manifestJSONFile\n self.manifestJSONFile = manifestJSONFile\n self.configSMOOTH = json.loads(open(str(self.manifestJSONFile)).read())\n self.dicoSmooth = self.configSMOOTH['SmoothStreamingMedia']\n self.listeQltyGenerated = self.configSMOOTH['SmoothStreamingMedia']['StreamIndex']\n self.listStreamIndex = self.configSMOOTH['SmoothStreamingMedia']['StreamIndex']\n self.listStreamIndexChunks = self.configSMOOTH['SmoothStreamingMedia']['StreamIndex']\n self.listpods = []\n \n def genSmoothTable(self, nameCollection, timefile,commande, profil, noeud):\n try:\n json_body = []\n fieldS = {}\n tags = {}\n dic = {}\n #configSMOOTH = json.loads(open(str(self.manifestJSONFile)).read())\n #dicoSmooth = configSMOOTH['SmoothStreamingMedia']\n self.dicoSmooth[\"commande\"] = commande\n self.dicoSmooth[\"profil\"] = profil\n self.dicoSmooth[\"noeud\"] = noeud\n del self.dicoSmooth['StreamIndex']\n #print(dicoSmooth)\n for key, valeur in self.dicoSmooth.items(): \n if str(type(valeur)) != \"\": \n if regex.match(valeur) is None:\n if valeur !='':\n fieldS[key] = int(valeur)\n del self.dicoSmooth[key]\n dic[\"tags\"]= self.dicoSmooth\n dic[\"fields\"] = fieldS\n dic[\"time\"]=timefile\n dic[\"measurement\"]= str(nameCollection)\n json_body.append(dic)\n #print(json_body)\n return json_body\n except Exception as x:\n print(x)\n \n \n \n def genStreamIndexTable(self, nameCollection, timefile,commande, profil, noeud):\n try:\n json_body = []\n for element in self.listStreamIndex:\n if str(type(element)) == \"\": \n del element[\"QualityLevel\"] \n del element[\"c\"]\n fieldS = {}\n tags = {}\n dic = {}\n element[\"commande\"] = commande\n element[\"profil\"] = profil\n element[\"noeud\"] = noeud\n for key, valeur in element.items(): \n if regex.match(valeur) is None:\n if valeur !='':\n fieldS[key] = int(valeur)\n del element[key]\n dic[\"tags\"]= element\n dic[\"fields\"] = fieldS\n dic[\"time\"]=timefile\n dic[\"measurement\"]= str(nameCollection)\n json_body.append(dic)\n #print(json_body)\n return json_body\n except Exception as x:\n print(x) \n \n \n def genQualityTable(self,nameCollection, timefile,commande, profil, noeud):\n try:\n json_body = []\n for element in self.listeQltyGenerated:\n qlt = element.get('QualityLevel')\n if str(type(qlt)) == \"\":\n #print(qlt)\n for eqlty in qlt:\n fieldS = {}\n tags = {}\n dic = {}\n eqlty[\"commande\"] = commande\n eqlty[\"profil\"] = profil\n eqlty[\"noeud\"] = noeud\n for key, valeur in eqlty.items(): \n if regex.match(valeur) is None:\n if valeur !='':\n fieldS[key] = int(valeur)\n del eqlty[key]\n dic[\"tags\"]= eqlty\n dic[\"fields\"] = fieldS\n dic[\"time\"]=timefile\n dic[\"measurement\"]=str(nameCollection)\n json_body.append(dic)\n else:\n if str(type(qlt)) == \"\":\n #print(qlt)\n eqlty = {}\n fieldS = {}\n tags = {}\n dic = {}\n qlt[\"commande\"] = commande\n qlt[\"profil\"] = profil\n qlt[\"noeud\"] = noeud \n for key, valeur in qlt.items(): \n if regex.match(valeur) is None:\n if valeur !='':\n fieldS[key] = int(valeur)\n del qlt[key]\n dic[\"tags\"]= qlt\n dic[\"fields\"] = fieldS\n dic[\"time\"]=timefile\n dic[\"measurement\"]=str(nameCollection)\n json_body.append(dic)\n #print(json_body)\n return json_body\n except Exception as x:\n print(x)\n \n def genChunksTable(self, nameCollection, timefile,commande, profil, noeud):\n try:\n json_body = []\n for element in self.listStreamIndexChunks:\n fieldS = {}\n tags = {}\n dic = {}\n tags[\"Type\"] = element.get('Type')\n tags[\"commande\"] = commande\n tags[\"profil\"] = profil\n tags[\"noeud\"] = noeud\n fieldS[\"duree\"] = int(element['c'][0].get('t'))\n dic[\"tags\"]= tags\n dic[\"fields\"] = fieldS\n dic[\"time\"]=timefile\n dic[\"measurement\"]= str(nameCollection)\n json_body.append(dic)\n #print(json_body)\n return json_body\n except Exception as x:\n print(x)\n \n def generateBody(self,timefile,commande, profil, noeud):\n try:\n ql= self.genQualityTable('QualityLevel',timefile,commande, profil, noeud)\n chunk = self.genChunksTable('Chunks',timefile,commande, profil, noeud)\n strm = self.genStreamIndexTable('StreamIndex',timefile,commande, profil, noeud)\n mss = self.genSmoothTable('SmoothMediaStream',timefile,commande, profil, noeud)\n flux = mss + ql + chunk + strm\n print(json.dumps(flux))\n return json.dumps(flux)\n except Exception as x:\n print(x)\n\"\"\" \ntest = Traiter('/home/adama/Bureau/traitement/Arte_LIVESMOOTH_NODEA1_20170228132851')\n\nprint('--------Flux.....')\ntest.generateBody('2017-01-23T14:31:16Z','LIVE','SMOOTH','NODEouPOD')\n\n\"\"\"","sub_path":"ABSTRACT/abstract1_0/route/traitment/Traiter.py","file_name":"Traiter.py","file_ext":"py","file_size_in_byte":7054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"450023547","text":"# Title : _8DIPPredict.py\n# Created by: julse@qq.com\n# Created on: 2021/3/19 9:12\n# des : TODO\nimport os\nimport time\nfrom PairDealer import ComposeData, PairDealer\nfrom FastaDealear import FastaDealer\nfrom FeatureDealer import BaseFeature, Feature_type\nfrom common import check_path, concatFile\nfrom entry import entry\nfrom myModel import Param\n\nfrom mySupport import savepredict, calculateResults\n\n\ndef _4getFeature(fin_pair,fin_fasta,dir_feature_db,dirout_feature):\n # fin_pair = '%s/dirRelated/2pair.tsv'%dirout\n '''\n generate feature db\n '''\n print('generate feature db')\n fd = FastaDealer()\n fd.getNpy(fin_fasta, dir_feature_db)\n '''\n generate feature\n '''\n print('generate feature')\n BaseFeature().base_compose(dirout_feature, fin_pair, dir_feature_db, feature_type=Feature_type.SEQ_1D)\ndef _5composeBanlancePair(eachdir):\n '''\n 1. ComposeData\n 0 group: load 1000 [0:1000] data from file/8DIPPredict/data/Ecoli/2pair.tsv\n 0 group: load 1000 [0:1000] data from file/8DIPPredict/data_nega/Ecoli/4pairInfo_subcell_differ_related/2pair.tsv\n ...\n '''\n\n fin_p = 'file/8DIPPredict/data/%s/2pair.tsv' % eachdir\n fin_n = 'file/8DIPPredict/data_nega/%s/4pairInfo_subcell_differ_related/2pair.tsv' % eachdir\n\n f1out = 'file/8DIPPredict/predict/%s' % eachdir\n\n flist = [fin_p, fin_n]\n ratios_pn = [1, 1]\n # limit = 100\n # limit = 64939 * 2\n limit = 0\n\n ComposeData().save(f1out, flist, ratios_pn, limit, groupcount=-1, repeate=False,labels=[1,0])\ndef _6predict(fin_pair,dirout_feature,fin_model,dirout_result,limit=0):\n '''\n Ecoli 数据集测试起来效果很差\n :param eachdir:\n :return:\n '''\n\n # dirout_feature = '/home/19jjhnenu/Data/SeqTMPPI2W/feature/%s/' % eachdir\n #\n # fin_model = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/_my_model.h5'\n # dirout_result = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/testDIP/%s' % eachdir\n\n # f1out = 'file/8DIPPredict/predict/%s' % eachdir\n # fin_pair = os.path.join(f1out, '0/all.txt')\n\n '''\n testing on the model\n '''\n print('testing the model')\n savepredict(fin_pair, dirout_feature, fin_model, dirout_result, batch_size=500,limit=limit)\n\n\ndef _7dividedTrainAndTest(dir_data):\n '''\n config path\n '''\n fin_pair = os.path.join(dir_data, 'all.txt')\n\n train = os.path.join(dir_data, 'train.txt')\n validate = os.path.join(dir_data, 'validate.txt')\n test = os.path.join(dir_data, 'test.txt')\n ratios_tvt = [0.8, 0.1, 0.1]\n f3outs = [train, validate, test]\n\n '''\n 2. divided dataset\n divide data to train and test\n\n '''\n print('divided dataset')\n PairDealer().part(fin_pair,ratios_tvt,f3outs)\ndef _7trainAndTest(dirout_feature,fin_train,fin_validate,dirout):\n # time 664909.4274818897 ~ 7.6 day\n\n '''\n training the model\n '''\n print('training on the model')\n check_path(dirout)\n validate = {}\n validate['fin_pair'] = fin_validate\n validate['dir_in'] = dirout_feature\n onehot = True\n entry(dirout, fin_train, dirout_feature, model_type=Param.CNN1D_OH, limit=0, onehot=onehot, kernel_size=90,\n epochs=80,\n # epochs=30,\n filters=300, batch_size=50, validate=validate)\n '''\n testing on the model\n '''\n print('testing the model')\n # fin_pair = 'file/4train/0/test.txt'\n # dir_in = dirout_feature\n # fin_model = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/_my_model.h5'\n # dirout_result = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/test'\n # check_path(dirout_result)\n # savepredict(fin_pair, dir_in, fin_model, dirout_result,batch_size=500)\n\n\n\n\n\nif __name__ == '__main__':\n print('start', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))\n start = time.time()\n\n # import tensorflow as tf\n # # gpu_id = '0,1,2,3'\n # gpu_id = '6,7'\n # os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)\n # os.system('echo $CUDA_VISIBLE_DEVICES')\n #\n # tf_config = tf.compat.v1.ConfigProto()\n # tf_config.gpu_options.allow_growth = True\n # tf.compat.v1.Session(config=tf_config)\n\n\n # for eachdir in ['Ecoli', 'Mus', 'Human', 'SC', 'HP']:\n # for eachdir in ['Mus', 'Human']:\n #\n # # '''\n # # fin\n # # '''\n # # fin_model = '/home/jjhnenu/data/PPI/release/2deployment0325/result/model/group0/_my_model.h5'\n # # fin_fasta = 'file/8DIPPredict/data_all/%s/dirRelated/2pair.fasta' % eachdir\n # # fin_pair = 'file/8DIPPredict/predict/%s/0/all.txt' % eachdir\n # # '''\n # # fout\n # # '''\n # # dirout_feature = '/home/jjhnenu/data/PPI/release/2deployment0325/feature/%s/' % eachdir\n # # dir_feature_db = '/home/jjhnenu/data/PPI/release/2deployment0325/featuredb/%s/' % eachdir\n # # dirout_result = '/home/jjhnenu/data/PPI/release/2deployment0325/result/model/group0/testDIP_PAN/%s' % eachdir\n # # 模型加载有问题\n #\n # # '''\n # # fin\n # # '''\n # # fin_model = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/_my_model.h5'\n # # # fin_model_group0 = '/home/19jjhnenu/Data/SeqTMPPI2W/result/group/0/_my_model.h5'\n # # fin_fasta = 'file/8DIPPredict/data_all/%s/dirRelated/2pair.fasta' % eachdir\n # # fin_pair = 'file/8DIPPredict/predict/%s/0/all.txt' % eachdir\n # # '''\n # # fout\n # # '''\n # # dir_feature_db = '/home/19jjhnenu/Data/SeqTMPPI2W/featuredb/%s/' % eachdir\n # dirout_feature = '/home/19jjhnenu/Data/SeqTMPPI2W/feature/%s/' % eachdir\n # # dirout_result = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/testDIP/%s' % eachdir\n # # # dirout_result_group0 = '/home/19jjhnenu/Data/SeqTMPPI2W/result/group/0/testDIP/%s' % eachdir\n # # # _4getFeature(fin_pair,fin_fasta,dir_feature_db,dirout_feature)\n # # _6predict(fin_pair, dirout_feature, fin_model, dirout_result, limit=0)\n #\n # # dir_data = 'file/8DIPPredict/predict/%s/0/' % eachdir\n # # _7dividedTrainAndTest(dir_data)\n #\n # fin_train = 'file/8DIPPredict/predict/%s/0/train.txt' % eachdir\n # fin_validate = 'file/8DIPPredict/predict/%s/0/validate.txt' % eachdir\n # dirout = '/home/19jjhnenu/Data/SeqTMPPI2W/result/DIP/%s' % eachdir\n # _7trainAndTest(dirout_feature,fin_train,fin_validate,dirout)\n\n # table = []\n # for eachdir in ['Ecoli', 'Mus', 'Human', 'SC', 'HP']:\n # dirout_result = '/home/19jjhnenu/Data/SeqTMPPI2W/result/129878/testDIP/%s' % eachdir\n # fin_pair = os.path.join(dirout_result, 'result.csv')\n # group_dirout = 'file/8DIPPredict/statistic/group.tsv'\n # row = []\n # df = pd.read_csv(fin_pair, header=None)[2]\n # row.extend([eachdir])\n # row.extend(list(df.value_counts()))\n # row.extend([len(df)])\n # table.append(row)\n # pd.DataFrame(table).to_csv(group_dirout, header=None, index=None, sep='\\t')\n\n\n\n '''\n testing on the model\n '''\n\n print('testing the model')\n # for eachdir in ['Ecoli', 'Mus', 'Human', 'SC', 'HP']:\n # fin_pair = 'file/8DIPPredict/data/%s/2pair.tsv'%eachdir\n # dir_in = '/home/19jjhnenu/Data/SeqTMPPI2W/feature/%s/' % eachdir\n # fin_model = '/home/19jjhnenu/Data/SeqTMPPI2W/result/group/0/_my_model.h5'\n # dirout_result = '/home/19jjhnenu/Data/SeqTMPPI2W/result/group/0/testDIP1/%s'%eachdir\n # check_path(dirout_result)\n # savepredict(fin_pair, dir_in, fin_model, dirout_result,batch_size=500,posi=True)\n\n # dirout = '/home/19jjhnenu/Data/SeqTMPPI2W/result/group/0/testDIP1/'\n # calculateResults(dirout, dirout, filename='log.txt', row=2, resultfilename='result.csv')\n pass\n\n print('testing the model')\n # for eachdir in ['Ecoli', 'Mus', 'Human', 'SC', 'HP']:\n #\n # fin_pair = 'file/8DIPPredict/data_all/%s/dirRelated/2pair.tsv'%eachdir\n # dir_in = '/home/19jjhnenu/Data/SeqTMPPI2W/feature/%s/' % eachdir\n # fin_model = '/home/19jjhnenu/Data/SeqTMPPI2W/result/5CV_1/2/4/_my_model.h5'\n # dirout_result = '/home/19jjhnenu/Data/SeqTMPPI2W/result/5CV_1/2/4/testDIP_all/%s'%eachdir\n # check_path(dirout_result)\n #\n # fin_fasta = 'file/8DIPPredict/data_all/%s/dirRelated/2pair.fasta' % eachdir\n #\n # dir_feature_db = '/home/19jjhnenu/Data/SeqTMPPI2W/featuredb/%s/' % eachdir\n # dirout_feature = dir_in\n # _4getFeature(fin_pair, fin_fasta, dir_feature_db, dirout_feature)\n # savepredict(fin_pair, dir_in, fin_model, dirout_result,batch_size=500,posi=True)\n #\n # dirout = '/home/19jjhnenu/Data/SeqTMPPI2W/result/5CV_1/2/4/testDIP_all/'\n # calculateResults(dirout, dirout, filename='log.txt', row=2, resultfilename='result.csv')\n\n\n\n\n\n\n # fin = 'file/8DIPPredict/data/Ecoli/2pair.tsv'\n # countline(fin)\n # df = pd.read_table(f4caseStudyPair_onlyOnePDB,header=None)\n # df1.to_csv(fout,header=None,index=None,sep='\\t')\n\n '''\n concat all kinds of species 正负样本 1:1\n HP 数据太少,只有26个正样本,抛弃\n '''\n # print('concat all kinds of species 正负样本 1:1')\n # dirin = 'file/8DIPPredict/predict'\n # fileList = [os.path.join(dirin,eachfile,'0/all.txt') for eachfile in ['Ecoli', 'Mus', 'Human', 'SC']]\n # fout = os.path.join(dirin,'all.txt')\n # concatFile(fileList, fout)\n\n '''concat fasta'''\n # print('concat all kinds of species fasta')\n # dirin = 'file/8DIPPredict/data_all'\n # fileList = [os.path.join(dirin,eachfile,'dirRelated/2pair.fasta') for eachfile in ['Ecoli', 'Mus', 'Human', 'SC','HP']]\n # fout = os.path.join(dirin,'all.fasta')\n # concatFile(fileList, fout)\n\n '''\n feature \n '''\n # fin_pair = 'file/8DIPPredict/predict/all.txt'\n # fin_fasta= 'file/8DIPPredict/data_all/all.fasta'\n # dir_feature_db = '/home/19jjhnenu/Data/SeqTMPPI2W/featuredb/%s/' % 'DIP'\n # dirout_feature = '/home/19jjhnenu/Data/SeqTMPPI2W/feature/%s/' % 'DIP'\n # _4getFeature(fin_pair, fin_fasta, dir_feature_db, dirout_feature)\n\n '''\n concat DIP posi exclude HP\n '''\n # dirin = 'file/8DIPPredict/data'\n # fileList = [os.path.join(dirin,eachfile,'2pair.tsv') for eachfile in ['Ecoli', 'Mus', 'Human', 'SC']]\n # fout = os.path.join(dirin,'all.txt')\n # concatFile(fileList, fout)\n\n # dir_data = 'file/8DIPPredict/predict'\n # _7dividedTrainAndTest(dir_data)\n\n print('stop', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))\n print('time', time.time() - start)\n\n","sub_path":"_8DIPPredict_1.py","file_name":"_8DIPPredict_1.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"482365947","text":"class Flower(object):\n \"\"\"Class representing flower.\n\n :param name: A string representing the flower's name.\n :param petals_num: An integer representing the number of petals.\n :param price: A float representing the price of flower.\n \"\"\"\n\n def __init__(self, name, petals_num, price):\n self.name = name\n self.petals_num = petals_num\n self.price = price\n\n def set_name(self, name):\n if type(name) is str:\n self.name = name\n else:\n print('Can\\'t set the name for flower, \"%s\" is not a string.' % str(name))\n\n def set_petals_num(self, petals_num):\n if type(petals_num) is int:\n self.petals_num = petals_num\n else:\n print('Can\\'t set the number of petals for flower, \"%s\" is not an integer.' % str(petals_num))\n\n def set_price(self, price):\n if type(price) is float:\n self.price = price\n else:\n print('Can\\'t set the price for flower, \"%s\" is not a float.' % str(price))\n\n def get_name(self):\n return self.name\n\n def get_petals_num(self):\n return self.petals_num\n\n def get_price(self):\n return self.price\n\n\nif __name__ == '__main__':\n\n print('Creating a first flower...')\n lily = Flower('lily', 6, 45.00)\n print('The name of flower is: %s' % lily.get_name())\n print('The number of petals of flower is: %d' % lily.get_petals_num())\n print('The price of flower is: %0.2f' % lily.get_price())\n\n print('Trying to set not valid name for flower...')\n lily.set_name(1273649871263)\n print('Trying to set not valid number of petals for flower...')\n lily.set_petals_num(123.3123123)\n print('Trying to set not valid price for flower...')\n lily.set_price(2)\n","sub_path":"chapter_02/flower.py","file_name":"flower.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"370234756","text":"#!/usr/bin/python\nfrom _MachineMotion import *\n\n# Define a callback to process controller gCode responses (if desired)\ndef templateCallback(data):\n print ( \"Controller gCode responses \" + data )\n\nmachine_motion_example = MachineMotion(templateCallback, DEFAULT_IP_ADDRESS.usb_windows)\n\n# Send a stop command to the Machine (even if it is not moving yet !)\n\nfor i in range(0, 50):\n machine_motion_example.emitStop()\n machine_motion_example.emitRelativeMove(1, \"positive\", 10)\n\nprint ( \"--> Machine Stopped\" )\n","sub_path":"examples/emitStop.py","file_name":"emitStop.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"280857690","text":"##\n## Programación con Pandas\n## ===========================================================================\n##\n## Construya una tabla que contenga _c0 y una lista\n## separada por ',' de los valores de la columna _c5a\n## y _c5b (unidos por ':') de la tabla tbl2.tsv\n## \n## Rta/\n## _c0 lista\n## 0 0 bbb:0,ddd:9,ggg:8,hhh:2,jjj:3\n## 1 1 aaa:3,ccc:2,ddd:0,hhh:9\n## ...\n## 38 38 eee:0,fff:9,iii:2\n## 39 39 ggg:3,hhh:8,jjj:5\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport pandas as pd\nimport numpy as np\npd.set_option('display.notebook_repr_html', False)\n\narchivo=pd.read_csv(\"tbl2.tsv\", sep='\\t')\narchivo=archivo.sort_values(by='_c5a')\narchivo1=archivo['_c5b'].map(lambda x:str(x))\narchivo2=archivo['_c5a']\narchivo['lista']= archivo2+':'+archivo1\nlista2=archivo.groupby(['_c0'])['lista'].apply(lambda x: ','.join(str(e) for e in x))\nlista2=lista2.reset_index()\nprint(lista2)\n","sub_path":"04-pandas=1/q10=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"389337038","text":"#!/usr/bin/python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nfrom itertools import product\nimport argparse\n\n\n\n\ndef plot_data(filename = \"datalog.csv\", show = False, save = None):\n with open(filename) as inputFile:\n inputReader = csv.DictReader(inputFile, delimiter=\";\")\n\n data_reord = list(inputReader)\n\n keys = [\"CALCULATOR\", \"BASE\", \"WIDTH\", \"HEIGHT\", \"ITERS\", \"TIME\"]\n\n data = {k: np.array([x[k] for x in data_reord],\n dtype=\"str\" if k == \"CALCULATOR\" else \"i\") for k in keys}\n\n plt.figure(figsize=(12, 12)) \n\n base_sizes = np.sort(np.unique(data[\"BASE\"]))\n calculators = np.unique(data[\"CALCULATOR\"])\n iters = np.unique(data[\"ITERS\"])\n\n calc_labels = [x.replace(\"MandelCalculator\", \"\") for x in calculators]\n\n for i, (iter, b) in enumerate(product(iters, base_sizes)):\n ax = plt.subplot(2, 4, 1+i)\n ax.set_title(f\"Grid: {3*b}x{2*b} Iters: {iter}\")\n ax.boxplot(\n [ \n data[\"TIME\"][(data[\"CALCULATOR\"] == c) & \n (data[\"BASE\"] == b) &\n (data[\"ITERS\"] == iter)]\n for c in calculators\n ]\n )\n\n ax.set(\n xticks = np.arange(len(calculators)) + 1,\n xticklabels = list(calc_labels),\n ylim = (0, None),\n ylabel = \"Execution time [ms]\"\n )\n\n plt.tight_layout()\n if save:\n plt.savefig(save)\n print(f\"#saving to {save}\")\n if show:\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Compare two npz files\")\n parser.add_argument(\"filename\", type=str, default=\"datalog.csv\")\n parser.add_argument(\"--save\", type=str, default=None)\n parser.add_argument(\"--show\", action='store_const',\n const=True, default=False,)\n\n\n args = parser.parse_args()\n\n print(vars(args))\n\n if not plot_data(**vars(args)):\n exit(1)","sub_path":"MITAI/1MIT/AVS/avs-proj01/scripts/plot_evaluate.py","file_name":"plot_evaluate.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"168855060","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 11:46:31 2017\n\n@author: lansford\n\"\"\"\n\nfrom __future__ import division\nimport os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.neural_network import MLPRegressor\nfrom ir_conv import IR_CONV\nfrom timeit import default_timer as timer\n\nstart = timer()\n\"\"\"loading Spectrum files\"\"\"\nSingleCOLoc = os.path.expanduser('~/Documents/VASP/Testing IR/Vibration_Lists/SingleCO.pkl')\ninfile = open(SingleCOLoc,'rb')\nSingleCO = pickle.load(infile, encoding='latin1') #encoding = 'latin1' if pyton3\ninfile.close()\n\"\"\"Generating Lists of values\"\"\"\nMinDistList = []\nfor i in SingleCO:\n if i[0].CNCO ==5 or i[0].CNCO==0:\n i[0].CNCO=4\n Ptatoms = [atom.index for atom in i[0] if atom.index not in [i[0].Catom,i[0].Oatom]]\n MinDistList.append(min(i[0].get_distances(i[0].Catom,Ptatoms,mic=True)))\nCNCOList = [i[0].CNCO for i in SingleCO]\nGCNList = [i[0].GCN for i in SingleCO]\nCNPtList = [i[0].CNPt for i in SingleCO]\nNumPtList = [i[0].NumPt for i in SingleCO]\n\"\"\"Generating Fingerprint to spread data over k-folds\"\"\" \nFingerprintGCN = np.array([int(str(x)+str(y)) for x,y in zip(list(np.array(np.round(GCNList),dtype='int')),CNCOList)])\nFingerprintCNCO = np.array(CNCOList)\nCNCOList = np.array(CNCOList)\nGCNList = np.array(GCNList)\n#GCNgroups = np.linspace(np.min(GCNList[CNCOList==1]),np.max(GCNList[CNCOList==1]),num=5,endpoint=True)\nGCNgroups = np.array([0,3,5,6.5,7.5])\nGCNlabel = np.zeros(len(GCNList))\nfor i in range(4):\n GCNlabel[(GCNList <= GCNgroups[i+1]) & (GCNList>=GCNgroups[i]) & (CNCOList==1)] = i+1\nFingerprintGCN = FingerprintGCN[GCNlabel>0]\nFingerprintCNCO = FingerprintCNCO[GCNlabel==0]\nskf = StratifiedKFold(n_splits=3,shuffle=True)\nXGCN = np.array([i[0].spectrum[1] for i in SingleCO])[GCNlabel>0]; yGCN = GCNlabel[GCNlabel>0]\nXCNCO = np.array([i[0].spectrum[1] for i in SingleCO])[GCNlabel==0]; yCNCO = CNCOList[GCNlabel==0]\nstart = timer()\nfor train_index, test_index in skf.split(XGCN, FingerprintGCN):\n X_trainGCN, X_testGCN = XGCN[train_index], XGCN[test_index]\n y_trainGCN, y_testGCN = yGCN[train_index], yGCN[test_index]\nfor train_index, test_index in skf.split(XCNCO, FingerprintCNCO):\n X_trainCNCO, X_testCNCO = XCNCO[train_index], XCNCO[test_index]\n y_trainCNCO, y_testCNCO = yCNCO[train_index], yCNCO[test_index]\n\n\"\"\"convoluting spectra randomly and normalizing\"\"\"\nGCNconv = IR_CONV(4)\nprobabilities = GCNconv.get_probabilities((201,11,6))\nXconv_trainGCN, yconv_trainGCN = GCNconv.get_xyconv(X_trainGCN,y_trainGCN,probabilities)\nprobabilities = GCNconv.get_probabilities((101,6,3))\nXconv_testGCN, yconv_testGCN = GCNconv.get_xyconv(X_testGCN,y_testGCN,probabilities)\n\nCNCOconv = IR_CONV(3)\nprobabilities = CNCOconv.get_probabilities((301,11))\nXconv_trainCNCO, yconv_trainCNCO = CNCOconv.get_xyconv(X_trainCNCO,y_trainCNCO,probabilities)\nXconv_testCNCO, yconv_testCNCO = CNCOconv.get_xyconv(X_testCNCO,y_testCNCO,probabilities)\n\n\nXconv_train = GCNconv.add_noise(Xconv_trainGCN,Xconv_trainCNCO,signal2noisemax=2)\nXconv_test = GCNconv.add_noise(Xconv_testGCN,Xconv_testCNCO,signal2noisemax=2)\nyconv_train = yconv_trainGCN; yconv_test = yconv_testGCN\n\nNN = MLPRegressor(hidden_layer_sizes=50, activation='relu', solver='adam', tol=10**-7,alpha=10**-12, verbose=False,batch_size=250, learning_rate='constant', learning_rate_init=0.001, power_t=0.5,early_stopping=True)\nNN.out_activation_ = 'softmax'\nNN.fit(Xconv_train,yconv_train)\n \n \nypredicted = NN.predict(Xconv_test)\nfor i in range(4):\n print((np.mean((ypredicted[:,i]-yconv_test[:,i])**2))**0.5)\n print(np.std(yconv_test[:,i]))\nTestScore = NN.score(Xconv_test,yconv_test)\nTrainScore = NN.score(Xconv_train,yconv_train)\nprint(NN.n_iter_)\nprint(TestScore)\nprint(TrainScore)","sub_path":"old_files/GCN_noise.py","file_name":"GCN_noise.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"375178760","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom components.combo_box import WmmComboBox\n\nclass ConfigWidget(QWidget):\n def __init__(self, controller, model):\n super().__init__()\n self.model = model\n self.controller = controller\n vbox = QHBoxLayout(self)\n vbox.addWidget(QLabel('port'))\n\n self.cb = WmmComboBox()\n self.cb.setModel(self.model)\n self.cb.new_signal.connect(self.handlePort)\n vbox.addWidget(self.cb)\n\n\n vbox.addWidget(QLabel('channel'))\n self.channel_selector = QComboBox()\n self.channel_selector.addItems(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15'])\n self.channel_selector.currentIndexChanged.connect(self.handleChannel)\n vbox.addWidget(self.channel_selector)\n\n self.setLayout(vbox)\n # self.setStyleSheet(\"background-color: lightblue\")\n\n def handlePort(self, previous_id, current_id):\n self.controller.changePort(current_id)\n print('changed port ', current_id)\n # midiout.open_port( id)\n\n def handleChannel(self, id):\n self.controller.updateChannel(id)\n","sub_path":"src/widgets/config_widget.py","file_name":"config_widget.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"405789784","text":"\"\"\"\nTest the functions that load libgmt.\n\"\"\"\nimport os\n\nimport pytest\nfrom pygmt.clib.loading import check_libgmt, clib_names, load_libgmt\nfrom pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError\n\n\ndef test_check_libgmt():\n \"\"\"\n Make sure check_libgmt fails when given a bogus library.\n \"\"\"\n with pytest.raises(GMTCLibError):\n check_libgmt(dict())\n\n\ndef test_load_libgmt():\n \"\"\"\n Test that loading libgmt works and doesn't crash.\n \"\"\"\n check_libgmt(load_libgmt())\n\n\ndef test_load_libgmt_fail():\n \"\"\"\n Test that loading fails when given a bad library path.\n \"\"\"\n # save the old value (if any) before setting a fake \"GMT_LIBRARY_PATH\"\n old_gmt_library_path = os.environ.get(\"GMT_LIBRARY_PATH\")\n\n os.environ[\"GMT_LIBRARY_PATH\"] = \"/not/a/real/path\"\n with pytest.raises(GMTCLibNotFoundError):\n load_libgmt()\n\n # revert back to the original status (if any)\n if old_gmt_library_path:\n os.environ[\"GMT_LIBRARY_PATH\"] = old_gmt_library_path\n else:\n del os.environ[\"GMT_LIBRARY_PATH\"]\n\n\ndef test_clib_names():\n \"\"\"\n Make sure we get the correct library name for different OS names.\n \"\"\"\n for linux in [\"linux\", \"linux2\", \"linux3\"]:\n assert clib_names(linux) == [\"libgmt.so\"]\n assert clib_names(\"darwin\") == [\"libgmt.dylib\"]\n assert clib_names(\"win32\") == [\"gmt.dll\", \"gmt_w64.dll\", \"gmt_w32.dll\"]\n with pytest.raises(GMTOSError):\n clib_names(\"meh\")\n","sub_path":"pygmt/tests/test_clib_loading.py","file_name":"test_clib_loading.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"79472346","text":"import os\nfrom os import listdir\nfrom os.path import isfile, join, basename\nimport re\nfrom bs4 import BeautifulSoup\nimport progressbar\nimport argparse\n\nTEMP_FILE = \"./raw/other/temp.txt\"\nARTICLE_FOLDER = \"./raw/articles\"\nTEXT_FOLDER = \"./text\"\nOUTPUT_FILE = \"./training_data/data.txt\"\nBREAK_CHARACTER = \"SPECIALCHARACTERNOTUSEDELSEWHERE\"\nIGNORED = [\".empty\", \".DS_Store\", \".gitignore\"]\n\ndef check_doesnt_exist(f):\n\tif os.path.exists(f):\n\t\traise Exception(f\"{f} should not exist\")\n\ndef get_files_in_dir(directory):\n\treturn [join(directory, f) for f in listdir(directory) if isfile(join(directory, f)) and (not f in IGNORED)]\n\ndef get_number_of_pages():\n\t# put div with number of pages in a file, then extract the number of pages\n\tcheck_doesnt_exist(TEMP_FILE)\n\tos.system(f\"curl -s https://www.montag.wtf/ | grep 'class=\\\"pages\\\"' > {TEMP_FILE}\")\n\twith open(TEMP_FILE) as f:\n\t\tsoup = BeautifulSoup(f.readline(), 'html.parser')\n\t\tans = int(soup.div.string.split()[-1])\n\tos.system(f\"rm {TEMP_FILE}\")\n\treturn ans\n\ndef list_of_page_urls():\n\tnum_pages = get_number_of_pages()\n\tret = list()\n\tret.append(\"https://www.montag.wtf/\")\n\tfor i in range(2, num_pages + 1):\n\t\tret.append(f\"https://www.montag.wtf/page/{i}/\")\n\treturn ret\n\ndef list_of_article_urls_and_titles():\n\tcheck_doesnt_exist(TEMP_FILE)\n\tret = list()\n\tfor page in list_of_page_urls():\n\t\tos.system(f\"curl -s {page} > {TEMP_FILE}\")\n\t\twith open(TEMP_FILE) as f:\n\t\t\tsoup = BeautifulSoup(f, 'html.parser')\n\t\t\twrapperdivs = soup.find_all(\"div\", class_=\"homepage-post-info\")\n\t\t\tfor wrapperdiv in wrapperdivs:\n\t\t\t\ta = wrapperdiv.contents[1]\n\t\t\t\tlink = f\"https://www.montag.wtf{a.get('href')}\"\n\t\t\t\ttitlediv = a.contents[1]\n\t\t\t\ttitle = clean_title(titlediv.string)\n\t\t\t\tret.append((link, title))\n\tos.system(f\"rm {TEMP_FILE}\")\n\treturn ret\n\ndef clean_title(title):\n\ttitle = title.strip().replace(\" \", \"_\")\n\treturn re.sub(r'\\W+', '', title)\n\ndef get_number_of_articles():\n\tcheck_doesnt_exist(TEMP_FILE)\n\tfor url in list_of_page_urls():\n\t\tos.system(f\"curl -s {url} | grep 'homepage-post-info-wrapper' >> {TEMP_FILE}\")\n\tnum_articles = 0\n\twith open(TEMP_FILE) as f:\n\t\tfor line in f:\n\t\t\tnum_articles += 1\n\tos.system(f\"rm {TEMP_FILE}\")\n\treturn num_articles\n\ndef save_all_articles():\n\tfor article in progressbar.progressbar(list_of_article_urls_and_titles()):\n\t\tos.system(f\"curl -s {article[0]} > {join(ARTICLE_FOLDER, article[1])}.html\")\n\ndef replace_in_all_files(directory, findtok, replacetok):\n\tfiles = get_files_in_dir(directory)\n\tfor fpath in files:\n\t\tfiledata = \"\"\n\t\twith open(fpath, \"r\") as f:\n\t\t\tfiledata = f.read()\n\t\tfiledata = filedata.replace(findtok, replacetok)\n\t\twith open(fpath, \"w\") as f:\n\t\t\tf.write(filedata)\n\ndef fix_article_spacing():\n\treplace_in_all_files(ARTICLE_FOLDER, \"

\", f\"

{BREAK_CHARACTER}\")\n\treplace_in_all_files(ARTICLE_FOLDER, \"
\", f\"
{BREAK_CHARACTER}\")\n\treplace_in_all_files(ARTICLE_FOLDER, \"\", f\"{BREAK_CHARACTER}\")\n\ndef extract_text(break_replacement, remove_newlines):\n\trawfiles = get_files_in_dir(ARTICLE_FOLDER)\n\tfor rawfpath in rawfiles:\n\t\twith open(rawfpath) as rawf:\n\t\t\tsoup = BeautifulSoup(rawf, 'html.parser')\n\t\t\twrapperdiv = soup.find_all(\"div\", class_=\"content-text\")[0]\n\t\t\ttext = wrapperdiv.get_text()\n\t\t\tif remove_newlines:\n\t\t\t\ttext = text.replace(\"\\n\", \" \")\n\t\t\ttext = text.replace(BREAK_CHARACTER, break_replacement)\n\t\t\tfilename = basename(os.path.splitext(rawfpath)[0])\n\t\t\twith open(join(TEXT_FOLDER, filename) + \".txt\", \"a+\") as textf:\n\t\t\t\ttextf.write(text)\n\ndef compile_text():\n\ttextfolder = get_files_in_dir(TEXT_FOLDER)\n\twith open(OUTPUT_FILE, \"a+\") as dataf:\n\t\tfor textpath in textfolder:\n\t\t\twith open(textpath) as textf:\n\t\t\t\tdataf.write(f\"{re.sub(' +', ' ', textf.read()).strip()}\\n\\n\")\n\ndef delete_iteration():\n\tos.system(f\"rm {ARTICLE_FOLDER}/*\")\n\tos.system(f\"rm {TEXT_FOLDER}/*\")\n\tos.system(f\"rm {OUTPUT_FILE}\")\n\ndef run_iteration(compactness):\n\tprint(\"Saving articles...\")\n\tsave_all_articles()\n\tprint(\"Fixing spacing...\")\n\tfix_article_spacing()\n\tprint(\"Extracting text...\")\n\textract_text(\"\\n\" if compactness == \"loose\" else \" \", compactness == \"dense\")\n\tprint(\"Compiling text...\")\n\tcompile_text()\n\ndef main(compactness):\n\tdelete_iteration()\n\trun_iteration(compactness)\n\t\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-c', '--compactness', choices=['dense', 'medium', 'loose'])\n\targs = parser.parse_args()\n\tmain(args.compactness)\n","sub_path":"pyscripts/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"229282419","text":"## This Code implements simple single Perceptron\r\nimport numpy as np\r\nimport random\r\nOrData_x = np.array([(0,0),(0,1),(1,0),(1,1)])\r\nOrData_y = np.array([-1,1,1,1])\r\nAndData_x = np.array([(0,0),(0,1),(1,0),(1,1)])\r\nAndData_y = np.array([-1,-1,-1,1])\r\nmyOwn_Data_x=np.array([(0,0),(0,1),(1,0),(1,1)])\r\nmyOwn_Data_y=np.array([1,-1,-1,1])\r\nlearing_rate=0.4\r\n#Data와 parameter를 담을 class\r\nclass Par:\r\n def __init__(self,w=None,b=None):\r\n self.w=w\r\n self.b=b\r\nclass Data:\r\n def __init__(self,x=None,y=None):\r\n self.x=x\r\n self.y=y\r\n#activatin function\r\ndef activate_fn(input):\r\n if(input>=0):\r\n return 1\r\n else:\r\n return -1\r\ndef linear_Transformation(par,data,i): #linear transformation on ith data\r\n return np.matmul(par.w,data.x[i])+par.b\r\n#Error result를 보여주는 Data의 index를 ret에 저장하여 반환.\r\ndef retErrors(par,data):\r\n ret=np.array([],int)\r\n for i in range(len(data.x)):\r\n if(activate_fn(linear_Transformation(par,data,i))!=data.y[i]):\r\n ret=np.append(ret,int(i))\r\n return ret\r\n#각 에폭마다 나온 결과를 이용하여 각 에폭의 prediction 결과 출력.\r\ndef printResult(par,data,Y):\r\n print(\"Among \"+str(len(data.x))+\" data, there are \"+str(len(Y))+\" errors.\")\r\n print(\"The error rate is \" + str(float(len(Y)/len(data.x))))\r\n for i in range(len(data.y)):\r\n linRet=linear_Transformation(par,data,i)\r\n print('d(x)=' + str(par.w[0]) + '*'+str(data.x[i][0])+'+' + str(par.w[1]) + '*'+str(data.x[i][1])+'+' + str(par.b) +\"= \"+str(linRet)+\", real class : \"+str(data.y[i]), end=' ')\r\n if(activate_fn(linRet)!=data.y[i]):\r\n print(\"Error\")\r\n else:\r\n print('')\r\n print(\"The parameters were: \" + str(par.w)+\" , \"+str(par.b))\r\n return\r\n#Error Data를 바탕으로 Parameter Update\r\ndef update_par(par,data,Y,epoch,printProcess=False):\r\n errors=np.zeros(2)\r\n if(printProcess):\r\n print(str(epoch+1)+\"th Epoch\")\r\n print(\"d(x)=\"+str(par.w[0])+\"x1 + \"+str(par.w[1])+\"x2 + \"+str(par.b))\r\n print(\"Y={\",end='')\r\n print(Y[0],end='')\r\n for i in range(1,len(Y)):\r\n print(', '+str(Y[i]),end='')\r\n print('}')\r\n temp=Par(par.w,par.b)\r\n for i in Y:\r\n errors+=data.y[i]*data.x[i]\r\n par.w+=learing_rate*errors\r\n par.b+=learing_rate*np.sum(data.y[Y])\r\n if(printProcess):\r\n print(\"w(\"+str(epoch+1)+\") = w(\"+str(epoch)+\") + 0.4(\",end='')\r\n print('t'+str(Y[0])+'*x'+str(Y[0]),end='')\r\n for i in range(1,len(Y)):\r\n print(\"+t\"+str(Y[i])+\"*x\"+str(Y[i]),end='')\r\n print(\")=\"+str(temp.w)+\"0.4[\",end='')\r\n print(str(data.y[Y[0]])+\"*\"+str(data.x[Y[0]]),end='')\r\n for i in range(1,len(Y)):\r\n print(\"+\"+str(data.y[Y[i]])+\"*\"+str(data.x[Y[i]]),end='')\r\n print(\"]=\"+str(par.w))\r\n\r\n print(\"b(\" + str(epoch + 1) + \") = b(\" + str(epoch) + \") + 0.4(\", end='')\r\n print('t' + str(Y[0]) , end='')\r\n for i in range(1, len(Y)):\r\n print(\"+t\" + str(Y[i]), end='')\r\n print(\")=\" + str(temp.b) + \"+0.4*\", end='')\r\n retSum=0\r\n for i in range(len(Y)):\r\n retSum+=data.y[Y[i]]\r\n print(str(retSum)+\"=\"+str(par.b))\r\n return par\r\n#maxEpoch까지 Parameter Batch Update\r\ndef training_process(par,data,maxEpoch=10):\r\n if(np.all(data.y==myOwn_Data_y)):\r\n print(\"w(0)=\"+str(par.w)+\", b(0)=\"+str(par.b))\r\n for i in range(maxEpoch):\r\n Y=retErrors(par,data)\r\n print(\"===============================\")\r\n print(str(i+1)+\"th Epoch\")\r\n printResult(par,data,Y)\r\n if(len(Y)==0):\r\n return par\r\n if len(Y)!=0:\r\n par = update_par(par,data,Y,i,np.all(data.y==myOwn_Data_y))\r\n return par\r\n#Parameter 초기화.\r\ndef initiate_Par():\r\n par=Par(np.random.rand(2)*2-1,random.random()*2-1)\r\n return par\r\n#Input Data Table 형식으로 출력.\r\ndef showData(data):\r\n for i in range(len(data.x)):\r\n print(\"Index : \"+str(i)+\" | X: \"+str(data.x[i])+\", Y: \"+str(data.y[i]))\r\ndef main():\r\n data=np.array([Data(OrData_x,OrData_y),Data(AndData_x,AndData_y),Data(myOwn_Data_x,myOwn_Data_y)],Data)\r\n functionName=['OR','And','MyOwn']\r\n for i in range(len(functionName)):\r\n print(functionName[i]+\" function\")\r\n showData(data[i])\r\n par=initiate_Par()\r\n par=training_process(par,data[i])\r\n print(\"===============================\")\r\n print(\"final \"+functionName[i] +\" Parameter\")\r\n print(\"W: \"+ str(par.w)+\", b: \"+str(par.b))\r\n print(\"\\n\")\r\n return\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"SKKU/3_1/AI/HW/AI_HW1.py","file_name":"AI_HW1.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"255200446","text":"import yfinance as yf\r\nfrom pandas_datareader import data as pdr\r\nimport argparse\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow.compat.v1 as tf\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import patches as mpatches\r\nimport datetime\r\nimport time\r\nimport os\r\nimport cv2\r\ndef plt_num_act(act, xl, yl, name):\r\n plt.figure()\r\n plt.title(name,y=1.05)\r\n plt.xlabel(xl)\r\n plt.ylabel(yl)\r\n legend=[\"r\",\"b\",\"g\"]\r\n label=[\"+1\",\"0\",\"-1\"]\r\n recs=[]\r\n for i in range(len(legend)):\r\n recs.append(mpatches.Rectangle((0,0),1,1,fc=legend[i]))\r\n colors=list()\r\n for i in range(len(act)):\r\n if act[i]==-1:\r\n colors.append(\"g\")\r\n elif act[i]==0:\r\n colors.append(\"b\")\r\n else:\r\n colors.append(\"r\")\r\n plt.scatter(x=np.asarray([i for i in range(len(act))]),y=act,s=00.1,color=colors)\r\n plt.legend(recs,label)\r\n plt.savefig(name)\r\n plt.close()\r\n # plt.show()\r\ndef plt_per(per,name,xl,yl):\r\n plt.figure()\r\n plt.title(name,y=1.05)\r\n plt.xlabel(xl)\r\n plt.ylabel(yl)\r\n plt.plot(per, color='black',linewidth=0.5)\r\n plt.savefig(name)\r\n plt.close()\r\n # plt.show()\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(\"Hyper-parameters for training model implementation\")\r\n parser.add_argument(\"--feature_size\", type=int, default=50, help=\"feature size\")\r\n parser.add_argument(\"--lr\", type=float, default=0.0005, help=\"learning rate\")\r\n parser.add_argument(\"--batch_size\", type=int, default=4, help=\"number of batches\")\r\n parser.add_argument(\"--n_epochs\", type=int, default=10, help=\"number of epochs\")\r\n parser.add_argument(\"--memory_fraction\", type=float, default=0.32, help=\"per_process_gpu_memory_fraction\")\r\n parser.add_argument(\"--idx\", type=int, default=0.6, help=\"data split %\")\r\n parser.add_argument(\"--cost_rate\", type=int, default=0.001425, help=\"cost rate\")\r\n return parser.parse_args(args=[])\r\ndef get_input_data(code):\r\n yf.pdr_override()\r\n data_frame = pdr.get_data_yahoo(code)\r\n close_data = data_frame.Close\r\n print(close_data)\r\n z = [close_data[i + 1] - close_data[i] for i in range(len(close_data) - 1)]\r\n p = close_data[1:]\r\n index_bottom, index_top, f, p_t = 0, arglist.feature_size, [], []\r\n while index_top + arglist.feature_size <= len(z) - 1:\r\n f.append(z[index_bottom: index_top])\r\n p_t.append(p[index_bottom: index_top])\r\n index_top += 1\r\n index_bottom += 1\r\n z = np.asarray(z, dtype=np.float32)\r\n f = np.asarray(f, dtype=np.float32)\r\n p_t = np.asarray(p_t, dtype=np.float32)\r\n z_tp1 = f[1:,-1]\r\n z_tp1 = z_tp1[:,np.newaxis]\r\n p_t = p_t[:-1,-1]\r\n p_t = p_t[:, np.newaxis]\r\n f = f[:-1,:]\r\n def data_split(data, arglist):\r\n idx = int(len(data) * arglist.idx)\r\n train_data, test_data = data[:idx], data[idx:]\r\n return train_data, test_data\r\n train_f, test_f = data_split(f, arglist=arglist)\r\n train_z_tp1, test_z_tp1 = data_split(z_tp1, arglist=arglist)\r\n train_p_t, test_p_t = data_split(p_t, arglist=arglist)\r\n return train_f, test_f, train_z_tp1, test_z_tp1, z, train_p_t, test_p_t\r\ndef batch_gen(x,k, arglist):\r\n data = np.asarray(x[k:k+arglist.batch_size])\r\n return data\r\ndef r(delta, z_tp1, p_t, delta_tm1):\r\n profit_per = (delta * z_tp1 - arglist.cost_rate * p_t * abs(\r\n delta - delta_tm1)) / p_t\r\n return profit_per*100\r\ndef plt_save(y1, y2, xl, yl, ln1, ln2, name):\r\n plt.figure()\r\n plt.title(name,y=1.05)\r\n plt.yscale(\"linear\")\r\n plt.xlabel(xl)\r\n plt.ylabel(yl)\r\n l1, = plt.plot(y1, color='blue',linewidth=0.5)\r\n l2, = plt.plot(y2, color='red',linewidth=0.5)\r\n plt.legend(handles=[l1, l2], labels=[ln1, ln2], loc='best')\r\n plt.savefig(name)\r\n plt.close()\r\n # plt.show()\r\nclass model():\r\n def __init__(self,z, arglist):\r\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=arglist.memory_fraction)\r\n session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)\r\n self.sess = tf.Session(config=session_conf)\r\n self.delta_tm1 = tf.placeholder(dtype=tf.float32, shape=[arglist.batch_size, 1], name=\"delta_tm1\")\r\n self.f = tf.placeholder(dtype=tf.float32, shape=[arglist.batch_size, 50], name=\"FuzzyIn\")\r\n self.z = z\r\n self.z_tp1 = tf.placeholder(dtype=tf.float32, shape=[arglist.batch_size, 1], name=\"z_tp1\")\r\n self.p_t = tf.placeholder(dtype=tf.float32, shape=[arglist.batch_size, 1], name=\"p_t\")\r\n self.profit = None\r\n self.delta = None\r\n self.loss = None\r\n with tf.variable_scope(\"k-means\"):\r\n # setting for k-means\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 0.1)\r\n flags = cv2.KMEANS_RANDOM_CENTERS\r\n compactness, label_kmeans, centers = cv2.kmeans(\r\n data=self.z, K=3, bestLabels=None, criteria=criteria, attempts=10, flags=flags)\r\n List0 = []\r\n List1 = []\r\n List2 = []\r\n for ii in range(0, self.z.size):\r\n if 0 == label_kmeans[ii][0]:\r\n List0.append(self.z[ii])\r\n if 1 == label_kmeans[ii][0]:\r\n List1.append(self.z[ii])\r\n if 2 == label_kmeans[ii][0]:\r\n List2.append(self.z[ii])\r\n data0_tensor = tf.convert_to_tensor(value=List0, dtype=tf.float32)\r\n data1_tensor = tf.convert_to_tensor(value=List1, dtype=tf.float32)\r\n data2_tensor = tf.convert_to_tensor(value=List2, dtype=tf.float32)\r\n mean0, variance0 = tf.nn.moments(x=data0_tensor, axes=[0])\r\n mean1, variance1 = tf.nn.moments(x=data1_tensor, axes=[0])\r\n mean2, variance2 = tf.nn.moments(x=data2_tensor, axes=[0])\r\n with tf.variable_scope(\"fuzzy-layer\"):\r\n fuzzy0 = tf.exp(tf.negative(tf.nn.batch_normalization(x=self.f, mean=mean0,\r\n variance=variance0, offset=None, scale=None,\r\n variance_epsilon=0.001)))\r\n fuzzy1 = tf.exp(tf.negative(tf.nn.batch_normalization(x=self.f, mean=mean1,\r\n variance=variance1, offset=None, scale=None,\r\n variance_epsilon=0.001)))\r\n fuzzy2 = tf.exp(tf.negative(tf.nn.batch_normalization(x=self.f, mean=mean2,\r\n variance=variance2, offset=None, scale=None,\r\n variance_epsilon=0.001)))\r\n fuzzyOut = tf.concat(values=[fuzzy0, fuzzy1, fuzzy2], axis=0, name=\"FuzzyOut\")\r\n fuzzyOut = tf.reshape(tensor=fuzzyOut, shape=[arglist.batch_size, 1, 150])\r\n\r\n with tf.variable_scope(\"MLP-layer\"):\r\n dense1 = tf.layers.dense(inputs=fuzzyOut, units=128, activation=tf.nn.sigmoid)\r\n dense2 = tf.layers.dense(inputs=dense1, units=128, activation=tf.nn.sigmoid)\r\n dense3 = tf.layers.dense(inputs=dense2, units=20, activation=tf.nn.sigmoid)\r\n # decoder1 = tf.layers.dense(inputs=dense4, units=50, activation=tf.nn.sigmoid)\r\n # decoder2 = tf.layers.dense(inputs=decoder1, units=100, activation=tf.nn.sigmoid)\r\n # decoder3 = tf.layers.dense(inputs=decoder2, units=150, activation=tf.nn.sigmoid)\r\n # loss3 = tf.losses.mean_squared_error(labels=dense3, predictions=decoder1)\r\n # loss2 = tf.losses.mean_squared_error(labels=dense2, predictions=decoder2)\r\n # loss1 = tf.losses.mean_squared_error(labels=dense1, predictions=decoder3)\r\n # self.pretrain_loss = (loss1+loss2+loss3)\r\n # train_Autoencoder = tf.train.AdamOptimizer(0.002).minimize(loss1 + loss2 + loss3)\r\n # optimizer = tf.train.AdamOptimizer(0.002)\r\n # train_op = optimizer.minimize(self.pretrain_loss)\r\n\r\n with tf.variable_scope(\"RNN-layer\"):\r\n # vanilla_rnn_layer\r\n rnn_In = tf.reshape(tensor=dense3, shape=[1, arglist.batch_size, 20], name=\"reshape1\")\r\n rnn_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=1, activation=tf.tanh)\r\n initial_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32)\r\n self.delta, final_state = tf.nn.dynamic_rnn(rnn_cell, rnn_In, initial_state=initial_state,\r\n dtype=tf.float32, \\\r\n time_major=False)\r\n self.delta = tf.reshape(tensor=self.delta, shape=[arglist.batch_size, 1])\r\n with tf.variable_scope(\"create-loss\"):\r\n self.profit = self.delta * self.z_tp1 - arglist.cost_rate * self.p_t* tf.abs(\r\n self.delta - self.delta_tm1)\r\n self.loss = (-1) * tf.reduce_sum(self.profit)\r\n with tf.variable_scope(\"create-profit_per\"):\r\n self.profit_per = (self.delta * self.z_tp1 - arglist.cost_rate * self.p_t* tf.abs(\r\n self.delta - self.delta_tm1))/self.p_t\r\n with tf.variable_scope(\"train_p\"):\r\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\r\n optimizer = tf.train.AdamOptimizer(learning_rate=arglist.lr)\r\n self.train_op = optimizer.minimize(self.loss, global_step=self.global_step)\r\n\r\n self.timestamp = str(int(time.time()))\r\n self.out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", self.timestamp))\r\n print(\"Writing to {}\\n\".format(self.out_dir))\r\n self.loss_summary = tf.summary.scalar(\"loss\", self.loss)\r\n\r\n # Train Summaries\r\n self.train_summary_op = tf.summary.merge([self.loss_summary])\r\n self.train_summary_dir = os.path.join(self.out_dir, \"summaries\", \"train\")\r\n self.train_summary_writer = tf.summary.FileWriter(self.train_summary_dir, self.sess.graph)\r\n\r\n # Test summaries\r\n self.test_summary_op = tf.summary.merge([self.loss_summary])\r\n self.test_summary_dir = os.path.join(self.out_dir, \"summaries\", \"test\")\r\n self.test_summary_writer = tf.summary.FileWriter(self.test_summary_dir, self.sess.graph)\r\n\r\n # Checkpoint directory. TensorFlow assumes this directory already exists so we need to create it\r\n self.checkpoint_dir = os.path.abspath(os.path.join(self.out_dir, \"checkpoints\"))\r\n self.checkpoint_prefix = os.path.join(self.checkpoint_dir, \"model\")\r\n if not os.path.exists(self.checkpoint_dir):\r\n os.makedirs(self.checkpoint_dir)\r\n self.saver = tf.train.Saver(max_to_keep=30)\r\n def get_delta(self, p_t, z_tp1, delta_tm1, f):\r\n feed_dict = {self.p_t:p_t, self.z_tp1:z_tp1, self.delta_tm1:delta_tm1, self.f:f}\r\n delta = self.sess.run([self.delta],\r\n feed_dict=feed_dict)\r\n # print('q_acts', q_acts.tolist(),'num',num.tolist())\r\n delta = np.asarray(delta)\r\n print(delta)\r\n delta = delta.reshape((arglist.batch_size, 1))\r\n for i in range(len(delta)):\r\n if delta[i,0]>0.3:\r\n delta[i,0] = 1\r\n elif delta[i,0]<-0.3:\r\n delta[i,0] = -1\r\n else:\r\n delta[i,0] = 0\r\n return delta\r\n def get_loss(self, p_t, z_tp1, delta_tm1, f):\r\n feed_dict = {self.p_t:p_t, self.z_tp1:z_tp1, self.delta_tm1:delta_tm1, self.f:f}\r\n loss = self.sess.run([self.loss],\r\n feed_dict=feed_dict)\r\n # print('q_acts', q_acts.tolist(),'num',num.tolist())\r\n loss = np.asarray(loss)\r\n return loss\r\n def get_profit_per(self, p_t, z_tp1, delta_tm1, f):\r\n feed_dict = {self.p_t:p_t, self.z_tp1:z_tp1, self.delta_tm1:delta_tm1, self.f:f}\r\n profit_per = self.sess.run([self.profit_per],\r\n feed_dict=feed_dict)\r\n # print('q_acts', q_acts.tolist(),'num',num.tolist())\r\n profit_per = np.asarray(profit_per)\r\n return profit_per*100\r\n def train_step(self, p_t, f, z_tp1, delta_tm1):\r\n feed_dict = {self.p_t:p_t, self.z_tp1:z_tp1, self.f: f, self.delta_tm1: delta_tm1}\r\n _, step, loss, result = self.sess.run([self.train_op, self.global_step, self.loss, self.delta], \\\r\n feed_dict=feed_dict)\r\n time_str = datetime.datetime.now().isoformat()\r\n print(\"{}: step {}, loss {:g}\".format(time_str, step, loss))\r\n def test_step(self,p_t, f, z_tp1, delta_tm1, writer=None):\r\n feed_dict = {self.p_t:p_t, self.z_tp1:z_tp1, self.f: f, self.delta_tm1: delta_tm1}\r\n step, summaries, loss, result, result_return = self.sess.run(\r\n [self.global_step, self.test_summary_op, self.loss, self.delta, self.result_return], \\\r\n feed_dict=feed_dict)\r\n time_str = datetime.datetime.now().isoformat()\r\n print(\"{}: step {}, loss {:g}\".format(time_str, step, loss))\r\n if writer:\r\n writer.add_summary(summaries, step)\r\n return result_return,\r\nif __name__ == '__main__':\r\n arglist = parse_args()\r\n stock_list = [\"000300.SS\", \"0050.TW\", \"^GSPC\"]\r\n #################################################################\r\n for stock in stock_list:\r\n train_f, test_f, train_z_tp1, test_z_tp1, z, train_p_t, test_p_t = get_input_data(stock)\r\n if stock == \"000300.SS\":\r\n model = model(arglist=arglist, z=z)\r\n name = \"SS300\"\r\n elif stock == \"0050.TW\":\r\n name = \"0050\"\r\n else:\r\n name = stock\r\n model.saver.restore(model.sess, \"FDRNN_init.ckpt\")\r\n model.sess.run(tf.global_variables())\r\n # model.sess.run(tf.global_variables_initializer())\r\n # save_path = model.saver.save(model.sess, \"FDRNN_init.ckpt\")\r\n\r\n loss_all = []\r\n for epoch in range(arglist.n_epochs):\r\n loss_each_epoch = 0\r\n delta_tm1 = np.zeros((arglist.batch_size, 1))\r\n delta_tm1 = np.asarray(delta_tm1, dtype=np.float32)\r\n for k in range(len(train_f)-arglist.batch_size):\r\n p_t, f, z_tp1 = batch_gen(train_p_t, k, arglist), batch_gen(train_f, k, arglist), batch_gen(train_z_tp1, k, arglist)\r\n model.train_step(p_t=p_t, f=f, z_tp1=z_tp1, delta_tm1=delta_tm1)\r\n loss= model.get_loss(p_t=p_t, f=f, z_tp1=z_tp1, delta_tm1=delta_tm1)\r\n loss_each_epoch += loss\r\n delta_tm1 = model.get_delta(p_t=p_t, f=f, z_tp1=z_tp1, delta_tm1=delta_tm1)\r\n current_step = tf.train.global_step(model.sess, model.global_step)\r\n if k == len(train_f)-arglist.batch_size-1:\r\n loss_all.append(loss_each_epoch)\r\n plt_per(per=loss_all, name=\"FDRNN_train_loss_\" + name , xl=\"epochs\",\r\n yl=\"loss\")\r\n\r\n ##############################################################\r\n accumulated_profit_t, accumulated_profit_t_all = 0, []\r\n market_t, market_all = 0,[]\r\n delta_list = []\r\n delta_tm1 = np.zeros((arglist.batch_size, 1))\r\n delta_tm1 = np.asarray(delta_tm1, dtype=np.float32)\r\n for k in range(len(test_f) - arglist.batch_size):\r\n p_t, f, z_tp1 = batch_gen(test_p_t, k, arglist), batch_gen(test_f, k, arglist), batch_gen(test_z_tp1, k,arglist)\r\n model.train_step(p_t=p_t, f=f, z_tp1=z_tp1, delta_tm1=delta_tm1)\r\n delta_t = model.get_delta(p_t=p_t, f=f, z_tp1=z_tp1, delta_tm1=delta_tm1)\r\n delta_list.append(delta_t[-1][-1])\r\n profit_per = r(delta_t, z_tp1, p_t, delta_tm1)\r\n delta_tm1 = delta_t\r\n r_1 = r(np.ones((z_tp1.shape)), z_tp1, p_t, np.ones((z_tp1.shape)))\r\n accumulated_profit_t += profit_per[-1][-1]\r\n market_t += np.asarray(r_1)[-1][-1]\r\n accumulated_profit_t_all.append(accumulated_profit_t.copy())\r\n market_all.append(market_t.copy())\r\n current_step = tf.train.global_step(model.sess, model.global_step)\r\n plt_per(per = accumulated_profit_t_all, name= \"FDRNN_\" + name + \"_accumulated profit(%)\", xl = \"time\", yl = \"accumulated profit(%)\")\r\n plt_num_act(delta_list, \"time\", \"delta\", name+\"_trading_strategy\")\r\n\r\n plt_save(accumulated_profit_t_all, market_all, \"time\", \"accumulated profit(%)\",\"model\", \"market\", name)\r\n\r\n\r\n","sub_path":"Final _report/04_7108053117/code/FDRNN/FDRNN.py","file_name":"FDRNN.py","file_ext":"py","file_size_in_byte":16622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"266601968","text":"from System.WinGUI.WindowConsts import *\nfrom System.WinGUI import WinGUI_system as WGS\nfrom System.WinGUI.WindowComponent import WindowComponent\nfrom GameDecorLine import GameDecorLine\n\nclass GameSlider( WindowComponent ):\n\t\"\"\" GameSlider \"\"\"\n\n\tfactoryString = 'GameSlider'\n\t\n\tdef __init__( self, windowOwner = WGS.WINDOW_MANAGER, sliderType = SLIDER_HORIZONTAL, defPos = ( 0, 0 ), defSize = ( 200, 20 ), defAlign = None ):\n\t\tWindowComponent.__init__( self, defPos = defPos, defSize = defSize, defAlign = defAlign )\n\t\tself.sliderType = sliderType\n\t\tself.windowOwner = windowOwner\n\t\tself.mainFocus = windowOwner.mainFocus\n\t\tself.setColour( 255, 255, 255, 0 )\n\t\tif ( self.sliderType == SLIDER_HORIZONTAL ) : self.setSize( defSize[ 0 ], defSize[ 1 ] )\n\t\telif ( self.sliderType == SLIDER_VERTICAL ) : self.setSize( defSize[ 1 ], defSize[ 0 ] )\n\t\tself.value = 128\n\t\tself.valueMin = 0\n\t\tself.valueMax = 255\n\t\tif ( self.sliderType == SLIDER_NONE ) : self.component.visible = False\n\n\t\tif ( self.sliderType == SLIDER_HORIZONTAL ) : self.decorLine = GameDecorLine( windowOwner = self, defSize = ( 1, 3 ), defAlign = alignOptions( align = ALIGN_RANDOM, size = [ 1, 3 ], aligns = [ 1, 1, 0, 1 ], margins = [ 9, 10, 0, 10 ] ) )\n\t\telif ( self.sliderType == SLIDER_VERTICAL ) : self.decorLine = GameDecorLine( windowOwner = self, defSize = ( 3, 1 ), defAlign = alignOptions( align = ALIGN_RANDOM, size = [ 3, 1 ], aligns = [ 1, 1, 0, 1 ], margins = [ 10, 10, 0, 10 ] ) )\n\t\t\n\t\tself.addClearChild( name = 'slider', asDecorator = True )\n\t\tself.slider.loadAppearance( 'gui/GameGUI/button.xml/button_round_normal' )\n\t\tself.slider.availableToFocus( True )\n\t\tself.slider.setPosition( 1, 1 )\n\t\tself.slider.setSize( 20, 20 )\n\t\tself.slider.onStartDrag = self.onStartDragSlider\n\t\tself.slider.onProcessDrag = self.processDragSlider\n\n\t\twindowOwner.addChildComponent( self )\n\t\tself.setValue( self.value )\n\n\tdef onStartDragSlider( self, wevent ):\n\t\tself.dragPosition = self.slider.getPosition()\n\t\t\t\n\tdef processDragSlider( self, wevent ):\n\t\tdx, dy = wevent.args\n\t\tx = self.dragPosition[ 0 ] + dx\n\t\ty = self.dragPosition[ 1 ] + dy\n\t\txPos = yPos = 5\n\t\tif ( self.sliderType == SLIDER_HORIZONTAL ):\n\t\t\txPos = x\n\t\t\tbndLt = 0\n\t\t\tbndRt = self.getWidth() - 20\n\t\t\tbndDiff = ( bndRt - bndLt )\n\t\t\tif ( bndDiff == 0 ) : bndDiff = 1.0\n\t\t\tif ( xPos < bndLt ) : xPos = bndLt\n\t\t\tif ( xPos > bndRt ) : xPos = bndRt\n\t\t\tself.slider.setLeft( xPos )\n\t\t\tscale = ( xPos - bndLt) / bndDiff\n\t\t\tself.value = int( self.valueMin + ( self.valueMax - self.valueMin ) * scale )\n\t\telif ( self.sliderType == SLIDER_VERTICAL ):\n\t\t\tyPos = y\n\t\t\tbndUp = 0\n\t\t\tbndDn = self.getHeight() - 20\n\t\t\tbndDiff = ( bndDn - bndUp )\n\t\t\tif ( bndDiff == 0 ) : bndDiff = 1.0\n\t\t\tif ( yPos < bndUp ) : yPos = bndUp\n\t\t\tif ( yPos > bndDn ) : yPos = bndDn\n\t\t\tself.slider.setTop( yPos )\n\t\t\tscale = ( yPos - bndUp) / bndDiff\n\t\t\tself.value = int( self.valueMin + ( self.valueMax - self.valueMin ) * scale )\n\t\tself.event( 'onGameSliderChange' )\n\t\t\t\n\tdef setValue( self, value ) :\n\t\tself.value = value\n\t\tif ( self.value < self.valueMin ) : self.value = self.valueMin\n\t\tif ( self.value > self.valueMax ) : self.value = self.valueMax\n\t\tvalueDiff = ( self.valueMax - self.valueMin )\n\t\tif ( valueDiff == 0 ) : valueDiff = 1.0\n\t\tscale = float( self.value - self.valueMin ) / float( valueDiff )\n\t\tif ( self.sliderType == SLIDER_HORIZONTAL ):\n\t\t\tbndLt = 0\n\t\t\tbndRt = self.getWidth() - 20\n\t\t\tbndDiff = ( bndRt - bndLt )\n\t\t\tself.slider.setLeft( int ( bndLt + bndDiff * scale ) )\n\t\telif ( self.sliderType == SLIDER_VERTICAL ):\n\t\t\tbndUp = 0\n\t\t\tbndDn = self.getHeight() - 20\n\t\t\tbndDiff = ( bndDn - bndUp )\n\t\t\tself.slider.setTop( int ( bndUp + bndDiff * scale ) )\n\t\tself.event( 'onGameSliderChange' )\n\t\t\n# GameSlider.py","sub_path":"res/scripts/client/Game/GameGUI/GameSlider.py","file_name":"GameSlider.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"107407261","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import SeparableConvolution2D, Convolution2D, MaxPooling2D\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nfrom keras.utils.visualize_util import plot\n\nimport tensorflow as tf\n\nfrom sklearn.metrics import roc_auc_score\nfrom tensorflow.contrib.metrics import streaming_auc\n\nimport numpy as np\nimport glob\nimport os\nimport itertools\nimport argparse\nfrom tqdm import tqdm\n\n\ndef normalize(img):\n return img/np.max(img)\n\n\ndef reduce_time_res(img, time_warp):\n shape = img.shape\n return img.reshape(shape[0], shape[1] / time_warp, time_warp).sum(axis=2)\n\n\ndef log(img):\n return np.log10(img)\n\n\ndef get_lowres_data(d, trans):\n y_class = {'0': np.array([1, 0]), '1': np.array([0, 1])}\n files = glob.glob(os.path.join(d, 'lowres-*.npz'))\n for f in files:\n y = y_class[os.path.splitext(os.path.basename(f))[0].split('_')[-1]]\n raw_img = np.load(f)\n if np.max(raw_img['arr_0'] == 0):\n continue\n imgs = [trans(img) for _, img in raw_img.items()]\n yield (np.dstack(imgs), y)\n\n\ndef get_hires_data(d, trans):\n y_class = {'0': np.array([1, 0]), '1': np.array([0, 1])}\n files = glob.glob(os.path.join(d, '[0-9]*.npz'))\n for f in files:\n y = y_class[os.path.splitext(os.path.basename(f))[0].split('_')[-1]]\n raw_img = np.load(f)\n if np.max(raw_img['arr_0'] == 0):\n continue\n imgs = [trans(img) for _, img in raw_img.items()]\n yield (np.dstack(imgs), y)\n\n\ndef model_a(input_shape):\n model = Sequential()\n model.add(Convolution2D(32, 3, 9, border_mode='valid',\n input_shape=input_shape))\n # model.add(Convolution2D(32, 34, 3, border_mode='valid',\n # input_shape=(544, 200, 1)))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Convolution2D(64, 3, 3, border_mode='valid'))\n model.add(Activation('relu'))\n model.add(Convolution2D(64, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n return model\n\n\ndef model_b(input_shape):\n model = Sequential()\n model.add(SeparableConvolution2D(32, 3, 9, border_mode='valid',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Convolution2D(64, 3, 3, border_mode='valid'))\n model.add(Activation('relu'))\n model.add(Convolution2D(64, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n return model\n\ndef model_c(input_shape):\n model = Sequential()\n model.add(SeparableConvolution2D(32, 6, 1, border_mode='same',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 1, 6))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3, border_mode='valid'))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3, border_mode='valid'))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3, border_mode='valid'))\n model.add(Activation('relu'))\n # model.add(Convolution2D(32, 3, 3, border_mode='valid'))\n # model.add(Activation('relu'))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=\"RMSprop\",\n metrics=['accuracy'])\n return model\n\n\ndef model_d(input_shape):\n model = Sequential()\n model.add(SeparableConvolution2D(32, 136, 1, border_mode='same',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 1, 6))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3, border_mode='same'))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3, border_mode='same'))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3, border_mode='same'))\n model.add(Activation('relu'))\n # model.add(Convolution2D(32, 3, 3, border_mode='valid'))\n # model.add(Activation('relu'))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=\"RMSprop\",\n metrics=['accuracy'])\n return model\n\n\ndef model_e(input_shape):\n model = Sequential()\n model.add(SeparableConvolution2D(32, 136, 6, border_mode='same',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(Activation('relu'))\n # model.add(Convolution2D(32, 3, 3, border_mode='valid'))\n # model.add(Activation('relu'))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=\"RMSprop\",\n metrics=['accuracy'])\n return model\n\n\ndef data_batch(n, img_generator):\n it = iter(img_generator)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n else:\n x_train, y_train = zip(*chunk)\n yield (np.array(x_train), np.array(y_train))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('ddir', help='directory with npz files')\n parser.add_argument('--hires', action='store_true', help='use hires images.')\n parser.add_argument('-o', '--output', default='keras-classifier.h5',\n help='directory with npz files')\n parser.add_argument('--validation', default=0.3, type=float,\n help='fraction of data used for validation.')\n parser.add_argument('--epochs', default=1, type=int,\n help='number of epochs')\n args = parser.parse_args()\n\n print(\"Loading data...\")\n nb_classes = 2\n if args.hires:\n warp = 10\n img_iter = get_hires_data(args.ddir, lambda img:\n reduce_time_res(normalize(img), warp))\n shape = (136, 800/warp, 16)\n validation = None\n else:\n img_iter = get_lowres_data(args.ddir, normalize)\n shape = (34, 200, 16)\n x_test, y_test = zip(*get_lowres_data(os.path.join(args.ddir, 'test/')))\n validation = (np.array(x_test), np.array(y_test))\n print(\"...DONE\")\n # print \"Loading data...\"\n # x_train, y_train = zip(*img_iter)\n # # x_train, y_train = zip(*itertools.islice(img_iter, 100))\n # print \"done\"\n # print \"Data shape: \", x_train[0].shape\n\n print(\"Compiling model...\")\n model = model_e(shape)\n print(\"...DONE\")\n print(model.summary())\n plot(model, to_file=args.output + \".model.png\", show_shapes=True)\n\n\n model.fit_generator(data_batch(16, itertools.cycle(img_iter)), 1650, args.epochs,\n validation_data=validation)\n\n # model.fit(np.array(x_train), np.array(y_train), batch_size=16,\n # nb_epoch=1, validation_split=args.validation)\n\n model.save(args.output)\n\n if validation is not None:\n y_pred = model.predict_proba(validation[0])\n print(\"ROC: {0:.3f}\".format(roc_auc_score(validation[1], y_pred)))\n","sub_path":"cnn-test.py","file_name":"cnn-test.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"395879213","text":"# Requires tables: effects and items\n# Requires XML: \n#\tequipment_en.xml (From Online Handbook)\n\nimport MySQLdb\nimport xml.dom\nfrom xml.dom import minidom\nimport databaseConfig\n\nclass Effect:\n\tdef __init__(self, effect, attr1, attr2, amount, scaling):\n\t\tself.effect = effect\n\t\tself.attr1 = attr1\n\t\tself.attr2 = attr2\n\t\t\n\t\tself.amount = amount\n\t\tself.scaling = scaling\n\t\t\n\tdef __repr__(self):\n\t\treturn \"%s %s %s %s %s\" % (self.effect, self.attr1, self.attr2, self.amount, self.scaling)\n\nclass EffectItem:\n\tdef __init__(self, aoe, effects, levels, rarity):\n\t\tself.aoe = aoe\n\t\tself.effects = effects\n\t\t\n\t\tself.levels = levels\n\t\tself.rarity = rarity\n\t\t\n\tdef __repr__(self):\n\t\treturn \"%s %s %s %s\" % (self.aoe, self.effects, self.levels, self.rarity)\n\t\n\nxml = minidom.parse('equipment_en.xml')\nitems = xml.getElementsByTagName('Item')\n\n\ncompletedValues = []\n\nfor item in items:\n\tinfo = {}\n\teffects = []\n\tinfo['aoe'] = item.attributes['id'].value\n\tlevel = item.attributes['levels'].value\n\tlevels = level.split('|')\n\tinfo['rarity'] = item.attributes['rarity'].value\n\tfor children in item.childNodes:\n\t\tif (children.localName == 'MaximumVelocity' or \n\t\t\t\tchildren.localName == 'TargetSpeedBoostResist' or\n\t\t\t\tchildren.localName == 'TrainPoints' or\n\t\t\t\tchildren.localName == 'CostAll' or\n\t\t\t\tchildren.localName == 'Damage' or\n\t\t\t\tchildren.localName == 'BuildPoints' or\n\t\t\t\tchildren.localName == 'LOS' or\n\t\t\t\tchildren.localName == 'ConvertResist' or\n\t\t\t\tchildren.localName == 'Hitpoints' or\n\t\t\t\tchildren.localName == 'AreaDamageReduction' or\n\t\t\t\tchildren.localName == 'BuildingWorkRate' or\n\t\t\t\tchildren.localName == 'DamageBonusReduction'):\n\t\t\teffects.append(Effect(children.localName, 'NULL', 'NULL', children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\t\n\t\tif children.localName == 'DamageBonus':\n\t\t\teffects.append(Effect(children.localName, children.attributes['unittype'].value, 'NULL', children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\t\n\t\tif (children.localName == 'HitPercent' or\n\t\t\t\tchildren.localName == 'MaximumRange' or\n\t\t\t\tchildren.localName == 'TargetSpeedBoost'):\n\t\t\teffects.append(Effect(children.localName, children.attributes['action'].value, 'NULL', children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\t\n\t\tif children.localName == 'Armor':\n\t\t\teffects.append(Effect(children.localName, children.attributes['damagetype'].value, 'NULL', children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\tif children.localName == 'CarryCapacity':\n\t\t\teffects.append(Effect(children.localName, children.attributes['resource'].value, 'NULL', children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\t\n\t\tif children.localName == 'WorkRate':\n\t\t\tif children.attributes['action'].value == 'Heal' or children.attributes['action'].value == 'Trade':\n\t\t\t\teffects.append(Effect(children.localName, children.attributes['action'].value, 'NULL', children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\t\tif (children.attributes['action'].value == 'Convert' or\n\t\t\t\t\tchildren.attributes['action'].value == 'Gather' or\n\t\t\t\t\tchildren.attributes['action'].value == 'Build' or\n\t\t\t\t\tchildren.attributes['action'].value == 'Empower'):\n\t\t\t\teffects.append(Effect(children.localName, children.attributes['action'].value, children.attributes['unittype'].value, children.attributes['amount'].value, children.attributes['scaling'].value))\n\t\n\tif len(info) == 2 and len(effects) > 0:\n\t\tcompletedValues.append(EffectItem(info['aoe'], effects, levels, info['rarity']))\n\n#print completedValues\n\n# mysql\nconn = MySQLdb.connect (host = databaseConfig.host, user = databaseConfig.username, passwd = databaseConfig.password, db = databaseConfig.database)\ncursor = conn.cursor()\n\ncursor.execute(\"TRUNCATE TABLE effects_items;\")\n\nfor value in completedValues:\n\tquery = \"SELECT id FROM items WHERE aoe LIKE '%s';\" % (value.aoe)\n\tcursor.execute(query)\n\trows = cursor.fetchall()\n\tif len(rows) == 1:\n\t\titem_id = rows[0][0]\n\t\n\tfor effect in value.effects:\n\t\tif effect.attr1 == 'NULL':\n\t\t\tsub1 = 'is NULL'\n\t\telse:\n\t\t\tsub1 = \"= '%s'\" % (effect.attr1)\n\t\n\t\tif effect.attr2 == 'NULL':\n\t\t\tsub2 = 'is NULL'\n\t\telse:\n\t\t\tsub2 = \"= '%s'\" % (effect.attr2)\n\t\n\t\tquery = \"SELECT id FROM effects WHERE effect LIKE '%s' AND attribute1 %s AND attribute2 %s;\" % (effect.effect, sub1, sub2)\n\t\tcursor.execute(query)\n\t\trows = cursor.fetchall()\n\t\tif len(rows) == 1:\n\t\t\teffect_id = rows[0][0]\n\t\t\n\t\t# Calculate for each level\n\t\tfor level in value.levels:\n\t\t\tif int(level) > 3:\n\t\t\t\tamount = ((float(effect.amount) + (float(effect.scaling) * (int(level))) - 1) * 100)\n\t\t\t\tif effect.effect == 'WorkRate' and effect.attr1 == 'Empower' and effect.attr2 == 'Dropsite':\n\t\t\t\t\tamount = amount * 11\n\t\t\t\t\t\n\t\t\t\tlevel = int(level) - 3 \n\t\t\t\tquery = \"INSERT INTO effects_items (effect_id, item_id, amount, level) VALUES (%s, %s, %s, %s);\" % (effect_id, item_id, amount, level)\n\t\t\t\tcursor.execute(query)\n\n\n#end mysql\ncursor.close()\nconn.commit()\nconn.close()\n\n\n","sub_path":"scripts/effects-items-sql.py","file_name":"effects-items-sql.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"353664478","text":"from django.db import models\nfrom django.conf import settings\nimport random\n\nfrom django.db.models.deletion import SET_NULL\n\nUser = settings.AUTH_USER_MODEL\n\nclass TweetLike(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n tweet = models.ForeignKey(\"Tweet\", on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n# Create your models here.\nclass Tweet(models.Model):\n # Maps to SQL Data\n # id = models.AutoField(primary_key=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n likes = models.ManyToManyField(User, related_name=\"tweet_user\", through=TweetLike,blank=True)\n content = models.TextField(blank=True, null=True)\n parent = models.ForeignKey(\"self\", null=True, on_delete=models.SET_NULL)\n image = models.FileField(upload_to='images/', blank=True, null=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n # def __str__(self):\n # return self.content\n\n class Meta:\n ordering = ['-id']\n\n @property\n def is_retweet(self):\n return self.parent != None\n\n # def serialize(self):\n # return {\n # \"id\": self.id,\n # \"content\": self.content,\n # \"likes\": random.randint(0, 400)\n # }","sub_path":"tweets/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"446958787","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Generic.get_metrics\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2016 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport time\nimport os\nfrom threading import Lock\nimport re\n# Third-party modules\nimport six\nimport ujson\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetmetrics import IGetMetrics\nfrom noc.core.mib import mib\nfrom noc.core.handler import get_handler\nfrom noc.core.matcher import match\n\nNS = 1000000000.0\n\nOID_GENERATOR_TYPE = {}\n\nrx_rule_var = re.compile(r\"{{\\s*([^}]+?)\\s*}}\")\n\n\nclass MetricConfig(object):\n __slots__ = (\n \"id\",\n \"metric\",\n \"path\",\n \"ifindex\",\n \"sla_type\"\n )\n\n def __init__(self, id, metric, path=None, ifindex=None,\n sla_type=None):\n self.id = id\n self.metric = metric\n self.path = path\n self.ifindex = ifindex\n self.sla_type = sla_type\n\n def __repr__(self):\n return \"\" % (self.id, self.metric)\n\n\nclass BatchConfig(object):\n __slots__ = (\"id\", \"metric\", \"path\", \"type\", \"scale\")\n\n def __init__(self, id, metric, path, type, scale):\n self.id = id\n self.metric = metric\n self.path = path\n self.type = type\n self.scale = scale\n\n\nclass OIDRuleBase(type):\n def __new__(mcs, name, bases, attrs):\n m = type.__new__(mcs, name, bases, attrs)\n OID_GENERATOR_TYPE[m.name] = m\n return m\n\n\n@six.add_metaclass(OIDRuleBase)\nclass OIDRule(object):\n \"\"\"\n SNMP OID generator for SNMP_OIDS\n \"\"\"\n name = \"oid\"\n default_type = \"gauge\"\n\n def __init__(self, oid, type=None, scale=1, path=None):\n self.oid = oid\n self.is_complex = not isinstance(oid, six.string_types)\n self.type = type or self.default_type\n if isinstance(scale, six.string_types):\n self.scale = get_handler(\n \"noc.core.script.metrics.%s\" % scale\n )\n else:\n self.scale = scale\n self.path = path or []\n\n def iter_oids(self, script, metric):\n \"\"\"\n Generator yielding oid, type, scale, path\n :param script:\n :param metric:\n :return:\n \"\"\"\n if self.is_complex:\n yield tuple(self.oid), self.type, self.scale, self.path\n else:\n yield self.oid, self.type, self.scale, self.path\n\n @classmethod\n def load(cls, data):\n \"\"\"\n Create from data structure\n :param data:\n :return:\n \"\"\"\n if type(data) != dict:\n raise ValueError(\"object required\")\n if \"$type\" not in data:\n raise ValueError(\"$type key is required\")\n t = data[\"$type\"]\n if t not in OID_GENERATOR_TYPE:\n raise ValueError(\"Invalid $type '%s'\" % t)\n return OID_GENERATOR_TYPE[t].from_json(data)\n\n @classmethod\n def from_json(cls, data):\n kwargs = {}\n for k in data:\n if not k.startswith(\"$\"):\n kwargs[k] = data[k]\n return cls(**kwargs)\n\n @classmethod\n def expand(cls, template, context):\n \"\"\"\n Expand {{ var }} expressions in template with given context\n :param template:\n :param context:\n :return:\n \"\"\"\n return rx_rule_var.sub(\n lambda x: str(context[x.group(1)]),\n template\n )\n\n def expand_oid(self, **kwargs):\n \"\"\"\n Apply kwargs to template and return resulting oid\n :param kwargs:\n :return:\n \"\"\"\n if self.is_complex:\n oids = tuple(mib[self.expand(o, kwargs)] for o in self.oid)\n if None in oids:\n return None\n else:\n return oids\n else:\n return mib[self.expand(self.oid, kwargs)]\n\n\nclass CounterRule(OIDRule):\n \"\"\"\n SNMP OID for SNMP counters\n \"\"\"\n name = \"counter\"\n default_type = \"counter\"\n\n\nclass BooleanRule(OIDRule):\n \"\"\"\n SNMP OID for booleans\n \"\"\"\n name = \"bool\"\n default_type = \"bool\"\n\n\n@six.add_metaclass(OIDRuleBase)\nclass OIDsRule(object):\n \"\"\"\n Multiple items for single metric\n \"\"\"\n name = \"oids\"\n\n def __init__(self, oids):\n self.oids = oids\n\n def iter_oids(self, script, metric):\n for rule in self.oids:\n for r in rule.iter_oids(script, metric):\n yield r\n\n @classmethod\n def from_json(cls, data):\n if \"oids\" not in data:\n raise ValueError(\"oids is required\")\n if type(data[\"oids\"]) != list:\n raise ValueError(\"oids must be list\")\n return OIDsRule(\n oids=[OIDRule.from_json(d) for d in data[\"oids\"]]\n )\n\n\n@six.add_metaclass(OIDRuleBase)\nclass MatcherRule(object):\n \"\"\"\n Multiple items for single metric\n \"\"\"\n name = \"match\"\n\n def __init__(self, oids, matchers):\n self.oids = oids\n self.matchers = matchers\n\n def iter_oids(self, script, metric):\n ctx = script.version\n for matcher, rule in self.oids:\n # match(ctx, []) always True, Priority in metrics matcher config matcher\n if (matcher is None or\n (match(ctx, self.matchers.get(matcher, [])) and matcher in self.matchers) or\n getattr(script, matcher, None)):\n for r in rule.iter_oids(script, metric):\n yield r\n else:\n # Only one match\n break\n\n @classmethod\n def from_json(cls, data):\n if \"$match\" not in data:\n raise ValueError(\"Matcher is required\")\n if type(data[\"$match\"]) != list:\n raise ValueError(\"$match must be list\")\n return MatcherRule(oids=[(d.get(\"$match\"),\n OIDRule.load(d)) for d in data[\"$match\"]],\n matchers=data.get(\"$matchers\", {}))\n\n\n@six.add_metaclass(OIDRuleBase)\nclass CapabilityRule(object):\n \"\"\"\n Capability-based selection\n\n oids is the list of (Capability, OIDRule)\n \"\"\"\n name = \"caps\"\n\n def __init__(self, oids):\n self.oids = oids\n\n def iter_oids(self, script, metric):\n for cap, oid in self.oids:\n if script.has_capability(cap):\n for r in oid.iter_oids(script, metric):\n yield r\n break\n\n @classmethod\n def from_json(cls, data):\n if \"oids\" not in data:\n raise ValueError(\"oids is required\")\n if type(data[\"oids\"]) != list:\n raise ValueError(\"oids must be list\")\n return CapabilityRule(\n oids=[OIDRule.load(d) for d in data[\"oids\"]]\n )\n\n\n@six.add_metaclass(OIDRuleBase)\nclass HiresRule(object):\n \"\"\"\n Select *hires* chain if SNMP | IF-MIB HC capability set,\n Select *normal* capability otherwise\n \"\"\"\n name = \"hires\"\n\n def __init__(self, hires, normal):\n self.hires = hires\n self.normal = normal\n\n def iter_oids(self, script, metric):\n if script.has_capability(\"SNMP | IF-MIB | HC\"):\n g = self.hires.iter_oids\n else:\n g = self.normal.iter_oids\n for r in g(script, metric):\n yield r\n\n @classmethod\n def from_json(cls, data):\n for v in (\"hires\", \"normal\"):\n if v not in data:\n raise ValueError(\"%s is required\" % v)\n return HiresRule(\n hires=OIDRule.load(data[\"hires\"]),\n normal=OIDRule.load(data[\"normal\"])\n )\n\n\nclass InterfaceRule(OIDRule):\n \"\"\"\n Expand {{ifIndex}}\n \"\"\"\n name = \"ifindex\"\n\n def iter_oids(self, script, cfg):\n if cfg.ifindex is not None:\n oid = self.expand_oid(ifIndex=cfg.ifindex)\n if oid:\n yield oid, self.type, self.scale, cfg.path\n\n\nclass CapabilityIndexRule(OIDRule):\n \"\"\"\n Expand {{index}} to range given in capability\n capability: Integer capability containing number of iterations\n start: starting index\n \"\"\"\n name = \"capindex\"\n\n def __init__(self, oid, type=None, scale=1, start=0, capability=None):\n super(CapabilityIndexRule, self).__init__(oid, type=type, scale=scale)\n self.start = start\n self.capability = capability\n\n def iter_oids(self, script, cfg):\n if self.capability and script.has_capability(self.capability):\n for i in range(\n self.start,\n script.capabilities[self.capability] + self.start\n ):\n oid = self.expand_oid(index=i)\n if oid:\n yield oid, self.type, self.scale, cfg.path\n\n\nclass CapabilityListRule(OIDRule):\n \"\"\"\n Expand {{item}} from capability\n capability: String capability, separated by *separator*\n separator: String separator, comma by default\n strip: Strip resulting item, remove spaces from both sides\n \"\"\"\n name = \"caplist\"\n\n def __init__(self, oid, type=None, scale=1, capability=None,\n separator=\",\", strip=True):\n super(CapabilityListRule, self).__init__(oid, type=type, scale=scale)\n self.capability = capability\n self.separator = separator\n self.strip = strip\n\n def iter_oids(self, script, cfg):\n if self.capability and script.has_capability(self.capability):\n for i in script.capabilities[self.capability].split(self.separator):\n if self.strip:\n i = i.strip()\n if not i:\n continue\n oid = self.expand_oid(item=i)\n if oid:\n yield oid, self.type, self.scale, cfg.path\n\n\nclass Script(BaseScript):\n \"\"\"\n Retrieve data for topology discovery\n \"\"\"\n name = \"Generic.get_metrics\"\n interface = IGetMetrics\n requires = []\n\n # Define counter types\n GAUGE = \"gauge\"\n COUNTER = \"counter\"\n #\n _SNMP_OID_RULES = {} # Profile -> metric type ->\n _SNMP_OID_LOCK = Lock()\n\n def __init__(self, *args, **kwargs):\n super(Script, self).__init__(*args, **kwargs)\n self.metrics = []\n self.ts = None\n\n def get_snmp_metrics_get_timeout(self):\n \"\"\"\n Timeout for snmp GET request\n :return:\n \"\"\"\n return self.profile.snmp_metrics_get_timeout\n\n def get_snmp_metrics_get_chunk(self):\n \"\"\"\n Aggregate up to *snmp_metrics_get_chunk* oids\n to one SNMP GET request\n :return:\n \"\"\"\n return self.profile.snmp_metrics_get_chunk\n\n def execute(self, metrics):\n \"\"\"\n Metrics is a list of:\n * id -- Opaque id, must be returned back\n * metric -- Metric type\n * path -- metric path\n * ifindex - optional ifindex\n * sla_test - optional sla test inventory\n \"\"\"\n metrics = [MetricConfig(**m) for m in metrics]\n self.collect_profile_metrics(metrics)\n if self.has_capability(\"SNMP\"):\n self.collect_snmp_metrics(metrics)\n #\n return self.get_metrics()\n\n def collect_profile_metrics(self, metrics):\n \"\"\"\n Profile extension\n \"\"\"\n pass\n\n def collect_snmp_metrics(self, metrics):\n \"\"\"\n Collect all collectible SNMP metrics\n \"\"\"\n batch = {}\n for m in metrics:\n # Calculate oids\n self.logger.debug(\"Make oid for metrics: %s\" % m.metric)\n snmp_rule = self.get_snmp_rule(m.metric)\n if snmp_rule:\n for oid, vtype, scale, path in snmp_rule.iter_oids(self, m):\n batch[oid] = BatchConfig(\n id=m.id,\n metric=m.metric,\n path=path,\n type=vtype,\n scale=scale\n )\n self.logger.debug(\"Batch: %s\" % batch)\n # Run snmp batch\n if not batch:\n self.logger.debug(\"Nothing to fetch via SNMP\")\n return\n # Optimize fetching, aggregating up to GET_CHUNK\n # in single request\n snmp_get_chunk = self.get_snmp_metrics_get_chunk()\n oids = set()\n for o in batch:\n if isinstance(o, six.string_types):\n oids.add(o)\n else:\n oids.update(o)\n oids = list(oids)\n results = {} # oid -> value\n self.snmp.set_timeout_limits(self.get_snmp_metrics_get_timeout())\n while oids:\n chunk, oids = oids[:snmp_get_chunk], oids[snmp_get_chunk:]\n chunk = dict((x, x) for x in chunk)\n try:\n results.update(\n self.snmp.get(chunk)\n )\n except self.snmp.TimeOutError as e:\n self.logger.error(\n \"Failed to get SNMP OIDs %s: %s\",\n oids, e\n )\n except self.snmp.FatalTimeoutError:\n self.logger.error(\n \"Fatal timeout error on: %s\", oids\n )\n break\n except self.snmp.SNMPError as e:\n self.logger.error(\n \"SNMP error code %s\", e.code\n )\n # Process results\n for oid in batch:\n ts = self.get_ts()\n if isinstance(oid, six.string_types):\n if oid in results:\n v = results[oid]\n if v is None:\n continue\n else:\n self.logger.error(\n \"Failed to get SNMP OID %s\",\n oid\n )\n continue\n elif callable(batch[oid].scale):\n # Multiple oids and calculated value\n v = []\n for o in oid:\n if o in results:\n vv = results[o]\n if vv is None:\n break\n else:\n v += [vv]\n else:\n self.logger.error(\n \"Failed to get SNMP OID %s\",\n o\n )\n break\n # Check result does not contain None\n if len(v) < len(oid):\n self.logger.error(\n \"Cannot calculate complex value for %s \"\n \"due to missed values: %s\",\n oid, v\n )\n continue\n else:\n self.logger.error(\n \"Cannot evaluate complex oid %s. \"\n \"Scale must be callable\",\n oid\n )\n continue\n bv = batch[oid]\n self.set_metric(\n id=bv.id,\n metric=bv.metric,\n value=v,\n ts=ts,\n path=bv.path,\n type=bv.type,\n scale=bv.scale\n )\n\n def get_ifindex(self, name):\n return self.ifindexes.get(name)\n\n def get_ts(self):\n \"\"\"\n Returns current timestamp in nanoseconds\n \"\"\"\n if not self.ts:\n self.ts = int(time.time() * NS)\n return self.ts\n\n def set_metric(self, id, metric, value, ts=None,\n path=None, type=\"gauge\", scale=1):\n \"\"\"\n Append metric to output\n \"\"\"\n if callable(scale):\n if not isinstance(value, list):\n value = [value]\n value = scale(*value)\n scale = 1\n self.metrics += [{\n \"id\": id,\n \"ts\": ts or self.get_ts(),\n \"metric\": metric,\n \"path\": path or [],\n \"value\": value,\n \"type\": type,\n \"scale\": scale\n }]\n\n def get_metrics(self):\n return self.metrics\n\n def get_snmp_rule(self, metric_type):\n profile = self.profile.name\n if profile not in self._SNMP_OID_RULES:\n self.load_snmp_rules(profile)\n self.logger.debug(\"Loading profile rules: %s\" % self._SNMP_OID_RULES[profile])\n return self._SNMP_OID_RULES[profile].get(metric_type)\n\n @classmethod\n def load_snmp_rules(cls, profile):\n \"\"\"\n Initialize SNMP rules from JSON\n :param profile:\n :return:\n \"\"\"\n with cls._SNMP_OID_LOCK:\n if profile in cls._SNMP_OID_RULES:\n # Load by concurrent thread\n return\n cls._SNMP_OID_RULES[profile] = {}\n v, p = profile.split(\".\")\n for path in [\n os.path.join(\"sa\", \"profiles\", \"Generic\", \"snmp_metrics\"),\n os.path.join(\"sa\", \"profiles\", v, p, \"snmp_metrics\"),\n os.path.join(\"custom\", \"sa\", \"profiles\", \"Generic\", \"snmp_metrics\"),\n os.path.join(\"custom\", \"sa\", v, p, \"snmp_metrics\")\n ]:\n cls.apply_rules_from_dir(\n cls._SNMP_OID_RULES[profile],\n path\n )\n\n @classmethod\n def apply_rules_from_dir(cls, rules, path):\n if not os.path.exists(path):\n return\n for root, dirs, files in os.walk(path):\n for f in files:\n if f.endswith(\".json\"):\n cls.apply_rules_from_json(rules,\n os.path.join(root, f))\n\n @classmethod\n def apply_rules_from_json(cls, rules, path):\n # @todo: Check read access\n with open(path) as f:\n data = f.read()\n try:\n data = ujson.loads(data)\n except ValueError as e:\n raise ValueError(\n \"Failed to parse file '%s': %s\" % (path, e)\n )\n if type(data) != dict:\n raise ValueError(\n \"Error in file '%s': Must be defined as object\" % path\n )\n if \"$metric\" not in data:\n raise ValueError(\"$metric key is required\")\n rules[data[\"$metric\"]] = OIDRule.load(data)\n","sub_path":"sa/profiles/Generic/get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":18330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"627083447","text":"\r\nfrom flask import Flask, render_template\r\nfrom flask_socketio import SocketIO, emit\r\nimport logging\r\nimport random\r\nfrom argparse import ArgumentParser\r\nfrom itertools import chain\r\nfrom pprint import pformat\r\nimport warnings\r\nimport json\r\nfrom datetime import datetime\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\nfrom pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer\r\nimport threading\r\n\r\nf = open(\"asd.txt\", \"r\")\r\ncount = int (f.read(1))\r\n\r\ndef readfileloop():\r\n\tthreading.Timer(1.0, readfileloop).start()\r\n\tf = open(\"asd.txt\", \"r\")\r\n\tglobal count\r\n\tcount = int (f.read(1))\r\n\r\nreadfileloop()\r\n\r\n\r\n\r\ndef top_filtering(logits, top_k=0., top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')):\r\n\r\n assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code\r\n top_k = min(top_k, logits.size(-1))\r\n if top_k > 0:\r\n # Remove all tokens with a probability less than the last token in the top-k tokens\r\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\r\n logits[indices_to_remove] = filter_value\r\n\r\n if top_p > 0.0:\r\n # Compute cumulative probabilities of sorted tokens\r\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\r\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\r\n\r\n # Remove tokens with cumulative probability above the threshold\r\n sorted_indices_to_remove = cumulative_probabilities > top_p\r\n # Shift the indices to the right to keep also the first token above the threshold\r\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\r\n sorted_indices_to_remove[..., 0] = 0\r\n\r\n # Back to unsorted indices and set them to -infinity\r\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\r\n logits[indices_to_remove] = filter_value\r\n\r\n indices_to_remove = logits < threshold\r\n logits[indices_to_remove] = filter_value\r\n\r\n return logits\r\n\r\n\r\ndef sample_sequence(personality, history, tokenizer, model, args, current_output=None):\r\n special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)\r\n if current_output is None:\r\n current_output = []\r\n\r\n for i in range(args.max_length):\r\n instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)\r\n\r\n input_ids = torch.tensor(instance[\"input_ids\"], device=args.device).unsqueeze(0)\r\n token_type_ids = torch.tensor(instance[\"token_type_ids\"], device=args.device).unsqueeze(0)\r\n\r\n logits = model(input_ids, token_type_ids=token_type_ids)\r\n if isinstance(logits, tuple): # for gpt2 and maybe others\r\n logits = logits[0]\r\n logits = logits[0, -1, :] / args.temperature\r\n logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)\r\n probs = F.softmax(logits, dim=-1)\r\n\r\n prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)\r\n if i < args.min_length and prev.item() in special_tokens_ids:\r\n while prev.item() in special_tokens_ids:\r\n if probs.max().item() == 1:\r\n warnings.warn(\"Warning: model generating special token with probability 1.\")\r\n break # avoid infinitely looping over special token\r\n prev = torch.multinomial(probs, num_samples=1)\r\n\r\n if prev.item() in special_tokens_ids:\r\n break\r\n current_output.append(prev.item())\r\n\r\n return current_output\r\ndef build_input_from_segments(persona, history, reply, tokenizer, lm_labels=False, with_eos=True):\r\n \"\"\" Build a sequence of input from 3 segments: persona, history and last reply. \"\"\"\r\n bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])\r\n sequence = [[bos] + list(chain(*persona))] + history + [reply + ([eos] if with_eos else [])]\r\n sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]\r\n instance = {}\r\n instance[\"input_ids\"] = list(chain(*sequence))\r\n instance[\"token_type_ids\"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s]\r\n instance[\"mc_token_ids\"] = len(instance[\"input_ids\"]) - 1\r\n instance[\"lm_labels\"] = [-1] * len(instance[\"input_ids\"])\r\n if lm_labels:\r\n instance[\"lm_labels\"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]\r\n return instance\r\n \r\nATTR_TO_SPECIAL_TOKEN = {'bos_token': '', 'eos_token': '', 'pad_token': '',\r\n 'additional_special_tokens': ('', '')}\r\nSPECIAL_TOKENS = [\"\", \"\", \"\", \"\", \"\"]\r\nMODEL_INPUTS = [\"input_ids\", \"mc_token_ids\", \"lm_labels\", \"mc_labels\", \"token_type_ids\"]\r\nPADDED_INPUTS = [\"input_ids\", \"lm_labels\", \"token_type_ids\"]\r\n\r\ndef addingSpecialTokens_(model, tokenizer):\r\n\r\n prevTokens = len(tokenizer.encoder)\r\n newTokensLen = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) \r\n # making sure new tokens are being are being added\r\n if newTokensLen > 0: \r\n model.resize_token_embeddings(new_num_tokens= newTokensLen + prevTokens)\r\n\r\ndef tokenize(obj):\r\n if isinstance(obj, str):\r\n return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))\r\n if isinstance(obj, dict):\r\n return dict((n, tokenize(o)) for n, o in obj.items())\r\n return list(tokenize(o) for o in obj)\r\n\r\n\r\n\r\n\r\n\r\n\r\nparser = ArgumentParser()\r\n\r\nparser.add_argument(\"--dataset_path\", type=str, default=\"\", help=\"Path or url of the dataset. If empty download from S3.\")\r\nparser.add_argument(\"--dataset_cache\", type=str, default='./dataset_cache', help=\"Path or url of the dataset cache\")\r\nparser.add_argument(\"--model\", type=str, default=\"openai-gpt\", help=\"Model type (openai-gpt or gpt2)\", choices=['openai-gpt', 'gpt2']) # anything besides gpt2 will load openai-gpt\r\nparser.add_argument(\"--model_checkpoint\", type=str, default=\"\", help=\"Path, url or short name of the model\")\r\nparser.add_argument(\"--max_history\", type=int, default=2, help=\"Number of previous utterances to keep in history\")\r\nparser.add_argument(\"--device\", type=str, default=\"cuda\" if torch.cuda.is_available() else \"cpu\", help=\"Device (cuda or cpu)\")\r\n\r\nparser.add_argument(\"--no_sample\", action='store_true', help=\"Set to use greedy decoding instead of sampling\")\r\nparser.add_argument(\"--max_length\", type=int, default=20, help=\"Maximum length of the output utterances\")\r\nparser.add_argument(\"--min_length\", type=int, default=1, help=\"Minimum length of the output utterances\")\r\nparser.add_argument(\"--seed\", type=int, default=0, help=\"Seed\")\r\nparser.add_argument(\"--temperature\", type=int, default=0.7, help=\"Sampling softmax temperature\")\r\nparser.add_argument(\"--top_k\", type=int, default=0, help=\"Filter top-k tokens before sampling (<=0: no filtering)\")\r\nparser.add_argument(\"--top_p\", type=float, default=0.9, help=\"Nucleus filtering (top-p) before sampling (<=0.0: no filtering)\")\r\nargs = parser.parse_args(args=[])\r\n\r\nif args.model_checkpoint == \"\":\r\n if args.model == 'gpt2':\r\n raise ValueError(\"Interacting with GPT2 requires passing a finetuned model_checkpoint\")\r\n else: \r\n args.model_checkpoint = \"modell\"\r\n\r\nif args.seed != 0:\r\n random.seed(args.seed)\r\n torch.random.manual_seed(args.seed)\r\n torch.cuda.manual_seed(args.seed)\r\n\r\n\r\ntokenizer_class, model_class = (GPT2Tokenizer, GPT2LMHeadModel) if args.model == 'gpt2' else (OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)\r\ntokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)\r\nmodel = model_class.from_pretrained(args.model_checkpoint)\r\nmodel.to(args.device)\r\naddingSpecialTokens_(model, tokenizer)\r\n\r\nwith open('dataset.json', \"r\", encoding=\"utf-8\") as f:\r\n dataset = json.loads(f.read())\r\ndataset_cache=args.dataset_cache\r\ndataset_cache = dataset_cache + '_' + type(tokenizer).__name__\r\ndataset = tokenize(dataset)\r\ntorch.save(dataset, dataset_cache)\r\n\r\n\r\n\r\npersonalities = [dialog[\"personality\"] for dataset in dataset.values() for dialog in dataset]\r\nprint(\"no of personalities are \", len(personalities))\r\npersonality = random.choice(personalities)\r\nprint (personality)\r\n\r\n\r\n\r\nhistory = []\r\n\r\ndef generateResponse(raw_text):\r\n\r\n global history\r\n history.append(tokenizer.encode(raw_text))\r\n with torch.no_grad():\r\n out_ids = sample_sequence(personalities[count], history, tokenizer, model, args)\r\n history.append(out_ids)\r\n history = history[-(2):]\r\n out_text = tokenizer.decode(out_ids, skip_special_tokens=True)\r\n print (count)\r\n return out_text\r\n\r\n\r\napp = Flask(__name__)\r\n\r\napp.config[ 'SECRET_KEY' ] = 'jsbcfsbfjefebw237u3gdbdc'\r\nsocketio = SocketIO( app )\r\n\r\n@app.route( '/' )\r\ndef hello():\r\n return render_template( './ChatApp.html' )\r\n\r\ndef messageRecived():\r\n print( 'message was received!!!' )\r\n\r\n\r\n\r\n@socketio.on( 'my eventes' )\r\ndef handle_my_custom_event1( json1 ):\r\n message = json1['message']\r\n answer= generateResponse(message)\r\n json1['answer'] = answer\r\n json1['bot']='NEO'\r\n print( 'recived my event: ' + str(json1 ))\r\n socketio.emit( 'my response', json1, callback=messageRecived )\r\n\r\nif __name__ == '__main__':\r\n socketio.run( app, debug = True )\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"359676009","text":"#!/usr/bin/env python.\n\n\"\"\"\n\n This is the persons module of the Duke of Silvershire\n \n\"\"\"\n\nimport random\nimport unittest\nfrom standardNames import StandardNames\nfrom person import Person\nfrom races import Race\n\n__author__ = \"Sven Eggert\"\n__copyright__ = \"Copyright 2017, Egertiko Designs\"\n__credits__ = []\n\n__license__ = \"GPL\"\n__version__ = \"0.0.9\"\n__maintainer__ = \"Sven Eggert\"\n__email__ = \"sven674@web.de\"\n__status__ = \"Development\"\n\n\"\"\"\n The Person is an individual in the Duke of Silvershire and has a name\n\"\"\"\nclass Persons(unittest.TestCase):\n titleNames = ['Lord', 'Bishop', 'Wizard', 'Sorcerer', 'Duke', 'Earl', 'Barron', 'Grand Duke', 'King' , 'Sir', 'Prince', 'Principality', 'Chancellor', 'Monarch', \\\n 'Abbot', 'Aldermen', 'Archduke', 'Archbishop']\n additionalEvilTitleNames = ['Dead', 'Black']\n MIN_RIDERS = 100\n MAX_RIDERS = 500\n MIN_WARRIORS = 500\n MAX_WARRIORS = 5000\n\n \"\"\"\n Create a name by an element and a birds name\n \"\"\"\n def _createPersonNameByElementBird(self):\n titleName = random.choice(StandardNames.elementNames)[0]\n birdName = random.choice(StandardNames.birdNamesFirstPart)\n titleName += \"-\" + birdName.title()\n\n return titleName\n\n \"\"\"\n Create a name by an birds name\n \"\"\"\n\n\n def _createPersonNameByBird(self):\n titleName = random.choice(StandardNames.birdNamesFirstPart)\n birdName = random.choice(StandardNames.birdNamesSecondPart)\n titleName += \"-\" + birdName.title()\n\n return titleName\n\n \"\"\"\n Create one person\n \"\"\"\n def createPerson(self, race, loyality):\n person = Person()\n person.race = race\n\n if race == Race.DEADLORD:\n person.is_evil = True\n person.loyality = 0.0\n else:\n person.loyality = loyality\n \n if race in [Race.HUMAN, Race.DEADLORD]:\n person.riders = random.randrange(self.MIN_RIDERS, self.MAX_RIDERS, 100)\n person.warriors = random.randrange(self.MIN_WARRIORS, self.MAX_WARRIORS, 500)\n else:\n person.riders = -1 # cannot have any riders\n person.warriors = random.randrange(self.MIN_WARRIORS, self.MAX_WARRIORS, 500)\n\n person.name = self.createPersonName (person.is_evil)\n\n return person\n \n \"\"\"\n Main routine to create all persons. In this case all\n positive persons\n\n quantity True if the person is a dead lord\n \"\"\"\n def createPersons(self, quantity): \n x = quantity\n while x > 0:\n person = self.createPersonName (False)\n \n x = x - 1 \n\n \"\"\"\n Main routine to create a persons name\n\n deadLord True if the person is a dead lord\n \"\"\"\n def createPersonName(self, deadLord):\n titleName = random.choice(self.titleNames)\n\n if deadLord:\n titleName = random.choice(self.additionalEvilTitleNames) + \" \" + titleName\n\n whatName = random.random()\n \n if whatName < 0.7:\n titleName += \" of \"\n titleName += random.choice(StandardNames.negativeNames)\n else:\n titleName += \" the \"\n titleName += random.choice(StandardNames.specialNegativeNames)\n else:\n whatName = random.randint(1,3)\n if whatName == 1:\n titleName += \" \" + self._createPersonNameByBird()\n elif whatName == 2:\n titleName += \" the \" + random.choice(StandardNames.specialPositiveNames)\n else:\n titleName += \" \" + self._createPersonNameByElementBird() \n\n return titleName\n \n \"\"\"\n Test creating persons\n \"\"\"\n def test_createPersons(self):\n x = 100\n while x > 0:\n race = random.randint(Race.FIRST_RACE, Race.LAST_RACE)\n if race == Race.DEADLORD:\n loyality = 0.0\n else:\n loyality = random.random()\n \n person = self.createPerson (race, loyality)\n print (person.name, person.race, person.loyality, person.riders, person.warriors, person.loyality) \n \n x = x - 1 \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"persons.py","file_name":"persons.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"380120962","text":"my_list = [1, 2, 3]\nmy_set = set()\n\nprint(type(my_list))\nprint(type(my_set))\n\n\nclass Dog():\n def __init__(self, breed, name, spots):\n \n # Attributes\n # We take in the argument\n # Assign it using self.attribute_name\n\n self.breed = breed\n self.name = name\n # Expect boolean True / False\n self.spots = spots\n\n\nmy_dog = Dog(breed=\"Lab\", name=\"Sammy\", spots=False)\nprint(my_dog.breed)\nprint(my_dog.name)\nprint(my_dog.spots)\n\n\n","sub_path":"08 - Object Oriented Programming/061 - Object Oriented Programming - Attributes and Class Keyword.py","file_name":"061 - Object Oriented Programming - Attributes and Class Keyword.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"240995512","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import get_user_model\nfrom .models import User\nfrom django import forms\nfrom django.forms.widgets import SelectDateWidget\n\n\nclass CustomUserCreationForm(UserCreationForm):\n user_phone = forms.CharField(\n label = '핸드폰 번호:',\n widget = forms.TextInput(\n attrs = {\n 'class' : 'my-user-phone form-control',\n 'placeholder' : '핸드폰번호를 입력해주세요', \n }\n )\n )\n user_info = forms.CharField(\n label = '소개 한줄:',\n widget = forms.Textarea(\n attrs = {\n 'class' : 'my-user-info form-control',\n 'placeholder' : '자기소개를 살짝 적어주세요',\n 'rows' : 3,\n 'cols' : 5,\n }\n )\n )\n\n\n \n # 원래라면 forms.ModelForm\n # 내가 어떤 유저 모델을 가르키고 있는지를 알고 있어야 한다.\n # AUTH_USER_MODEL\n class Meta:\n model = get_user_model()\n fields = ('username', 'password1', 'password2', 'user_phone', 'user_profile_image')\n\n# '%Y-%m-%d'","sub_path":"12_Python_Lecture/Django_Practice/Project/foother/accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"36172875","text":"from flask_restful import reqparse, abort, Resource\nimport json\nimport ast\nfrom appverte.back.tables import db, User, Actions\nfrom appverte.back.alchemy_encoder import AlchemyEncoder\nfrom flask_jwt_extended import jwt_required\n \n# insert new likes or dislikes --- curl http://127.0.0.1:5000/api/likes -d \"user_id=xxxxx&action_id=xxxx&likes=1/-1\"\nclass Likes(Resource): \n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('user_id', type = str, required = True,\n help = 'No user_id title provided')\n self.reqparse.add_argument('action_id', type = str, required = True,\n help = 'No action_id title provided')\n self.reqparse.add_argument('likes', type = str, required = True,\n help = 'No likes 1/-1 title provided')\n self.reqparse.add_argument('type', type = str, required = True,\n help = 'No type action/comment/answer provided')\n self.reqparse.add_argument('comment_id', type = str, required = False,\n help = 'No comment_id provided')\n self.reqparse.add_argument('answer_id', type = str, required = False,\n help = 'No answer_id provided')\n\n @jwt_required()\n def post(self): \n args = self.reqparse.parse_args()\n \n if args['type'] == 'action':\n # add the like or dislike to the user \n user = User.query.filter_by(user_id=args['user_id']).first()\n \n if int(args['likes']) == 1: \n likes = ast.literal_eval(user.likes)\n likes.append(args['action_id'])\n user.likes = '{}'.format(likes)\n db.session.commit()\n else: \n dislikes = ast.literal_eval(user.dislikes)\n dislikes.append(args['action_id'])\n user.dislikes = '{}'.format(dislikes) \n db.session.commit()\n\n\n #modify the notation of the action\n action = json.dumps(Actions.query.filter_by(action_id=args['action_id']).first(), cls=AlchemyEncoder)\n notation_action = int(json.loads(action)[\"rating\"])\n notation_action += int(args['likes'])\n\n change2 = Actions.query.filter_by(action_id=args['action_id']).first()\n change2.rating = notation_action\n \n # add the user as dislikes or likes \n if int(args['likes']) == 1: \n likes = ast.literal_eval(change2.liked_by)\n likes.append(args['user_id'])\n change2.liked_by = '{}'.format(likes)\n \n else: \n dislikes = ast.literal_eval(change2.disliked_by)\n dislikes.append(args['user_id'])\n change2.disliked_by = '{}'.format(dislikes)\n \n db.session.commit()\n \n if args['type'] == 'comment':\n action = Actions.query.filter_by(action_id=args['action_id']).first()\n comments = action.comments \n comments = ast.literal_eval(comments)\n comment = comments[args['comment_id']]\n\n #drop the comment in the comments \n comments.pop(args['comment_id'])\n\n if int(args['likes']) == 1: \n comment['likes_count'] += 1\n else:\n comment['dislikes_count'] += 1\n\n comment = {args['comment_id']: comment}\n comments.update(comment)\n \n # update the action\n action.comments = '{}'.format(comments) \n db.session.commit()\n\n \n if args['type'] == 'answer':\n action = Actions.query.filter_by(action_id=args['action_id']).first()\n comments = action.comments \n comments = ast.literal_eval(comments)\n comment = comments[args['comment_id']]\n\n #drop the comment in the comments \n comments.pop(args['comment_id'])\n\n if int(args['likes']) == 1: \n comment['answers'][args['answer_id']]['likes_count'] += 1\n else:\n comment['answers'][args['answer_id']]['dislikes_count'] += 1\n\n\n\n\n comment = {args['comment_id']: comment}\n comments.update(comment)\n \n # update the action\n action.comments = '{}'.format(comments) \n db.session.commit()\n\n return 'done'\n","sub_path":"backend/appverte/back/ressources/Likes.py","file_name":"Likes.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"586122968","text":"from django.shortcuts import get_object_or_404\r\nfrom rest_framework.decorators import action\r\nfrom rest_framework import permissions, viewsets\r\nfrom rest_framework.response import Response\r\nfrom rest_framework_extensions.mixins import NestedViewSetMixin\r\n\r\nfrom django.conf import settings\r\nfrom datetime import datetime\r\n\r\nfrom contest.models import (\r\n Event,\r\n Bout,\r\n Fighter,\r\n Selection,\r\n Entry,\r\n Game\r\n)\r\nfrom contest.serializers import (\r\n UserSerializer,\r\n GroupSerializer,\r\n EventSerializer,\r\n BoutSerializer,\r\n FighterSerializer,\r\n SelectionSerializer,\r\n EntrySerializer,\r\n GameSerializer\r\n)\r\n\r\nfrom contest.commons import load_my_games, load_own_games, build_games\r\n\r\nimport pdb\r\n\r\nclass GameViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\r\n \"\"\"\r\n API endpoint that allows entries to be viewed or edited.\r\n \"\"\"\r\n queryset = Game.objects.all()\r\n serializer_class = GameSerializer\r\n permission_classes = [permissions.AllowAny]\r\n # permission_classes = [permissions.IsAuthenticated]\r\n\r\n @action(methods=['get'], detail=False)\r\n def load_games(self, request, **kwarg):\r\n games = []\r\n latest_event = {}\r\n upcoming_events = []\r\n bouts = []\r\n status = 200\r\n try:\r\n event = Event.objects.latest_event()\r\n latest_event = EventSerializer(event).data\r\n bouts = BoutSerializer(Bout.objects.filter(event__id=latest_event['id']).order_by('order'), many=True).data\r\n for bout in bouts:\r\n bout['fighter1'] = FighterSerializer(Fighter.objects.get(id=bout['fighter1'])).data\r\n bout['fighter2'] = FighterSerializer(Fighter.objects.get(id=bout['fighter2'])).data\r\n upcoming_events = EventSerializer(Event.objects.filter(status='upcoming').order_by('-date'), many=True).data\r\n games = load_my_games(event, request.user.id)\r\n except Exception as err:\r\n print(err)\r\n status = 500\r\n\r\n return Response(dict(games=games, latest_event=latest_event, upcoming_events=upcoming_events, bouts=bouts), status)\r\n\r\n @action(methods=['get'], detail=False)\r\n def load_own_games(self, request, **kwarg):\r\n my_own_games = []\r\n status = 200\r\n message = 'Successfully done.'\r\n try:\r\n if not request.user:\r\n raise Exception()\r\n my_own_games = load_own_games(request.user.id)\r\n except Exception as err:\r\n print(err)\r\n message = 'Something went wrong.'\r\n status = 500\r\n\r\n return Response(dict(my_own_games=my_own_games), status)\r\n\r\n @action(methods=['post'], detail=False)\r\n def delete_game(self, request, **kwarg):\r\n status = 200\r\n message = 'Successfully done'\r\n try:\r\n if not request.user or not request.data['game_id']:\r\n raise Exception()\r\n Game.objects.get(id=request.data['game_id']).delete()\r\n except Exception as err:\r\n print(err)\r\n status = 500\r\n message = 'Something went wrong.'\r\n\r\n return Response(dict(message=message), status)\r\n\r\n @action(methods=['post'], detail=False)\r\n def create_game(self, request, **kwarg):\r\n status = 200\r\n message = 'Successfully created'\r\n games = [{}]\r\n try:\r\n if not request.user:\r\n raise Exception()\r\n data = request.data\r\n data['owner'] = request.user.id\r\n game_serializer = GameSerializer(data=data)\r\n if game_serializer.is_valid():\r\n game = game_serializer.save()\r\n build_games(games, [game], None, request.user.id)\r\n else:\r\n raise Exception()\r\n except Exception as err:\r\n print(err)\r\n status = 500\r\n message = 'Something went wrong.'\r\n\r\n return Response(dict(message=message, game=games[-1]), status)\r\n\r\n @action(methods=['post'], detail=False)\r\n def update_game(self, request, **kwarg):\r\n status = 200\r\n message = 'Successfully updated'\r\n try:\r\n if not request.user:\r\n raise Exception()\r\n data = request.data\r\n data['owner'] = request.user.id\r\n data['joined_users'] = [_['id'] for _ in data['joined_users']]\r\n data['entrants'] = [_['id'] for _ in data['entrants']]\r\n game = Game.objects.get(id=data['id'])\r\n if game:\r\n game_serializer = GameSerializer(game, data=data)\r\n if game_serializer.is_valid():\r\n game = game_serializer.save()\r\n else:\r\n raise Exception()\r\n else:\r\n raise Exception()\r\n except Exception as err:\r\n print(err)\r\n status = 500\r\n message = 'Something went wrong.'\r\n\r\n return Response(dict(message=message), status)\r\n\r\n @action(methods=['post'], detail=False)\r\n def join_game(self, request, **kwarg):\r\n status = 200\r\n message = 'Successfully Joined'\r\n try:\r\n if not request.user:\r\n raise Exception()\r\n game = Game.objects.get(pk=request.data['game_id'])\r\n if game.genre != 'free':\r\n if request.user.coins < game.buyin:\r\n return Response(dict(message=\"You don't have enough coins.\"), 400)\r\n \r\n # deduct user's coins by buyin\r\n request.user.coins -= game.buyin\r\n request.user.save()\r\n\r\n # create new entry with the new entry_number if possible\r\n entries = Entry.objects.filter(game=game.id).filter(user=request.user.id).order_by('entry_number')\r\n entry_number = 0\r\n if not entries:\r\n entry_number = 1\r\n if entries.count() < game.multientry:\r\n for x, entry in enumerate(entries):\r\n if entry.entry_number != x+1:\r\n # Just in case the entry with the middle number was eliminated in My Teams page\r\n entry_number = x + 1\r\n break\r\n # last position + 1\r\n if not entry_number:\r\n entry_number = entries.count() + 1\r\n\r\n if entry_number and entries.count() < game.entry_limit:\r\n new_entry = Entry(event=game.event, game=game, user=request.user, entry_number=entry_number)\r\n new_entry.save()\r\n\r\n # add user to joined_users list in the game\r\n # if game.genre != 'free':\r\n game.joined_users.add(request.user)\r\n game.save()\r\n\r\n except Exception as err:\r\n print(err)\r\n status = 500\r\n message = 'Something went wrong.'\r\n\r\n return Response(dict(message=message, status=status, entry_number=entry_number), status)\r\n\r\n","sub_path":"fighter/contest/views/game_views.py","file_name":"game_views.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"634853838","text":"from google import google\r\nimport csv\r\nimport re\r\nimport time\r\nimport random\r\n\r\n# Old Code that searched to Google with keywords + university names and tried to get the best results.\r\n# Lots of unusable pages found, so was ditched. Similar to conferencefinder.py\r\ncsvv = open('test3.csv','w')\r\n \r\nsearch_key = ['Cybersecurity Course Syllabus',\r\n 'Computer Security Course Syllabus',\r\n 'Network Security Course Syllabus',\r\n 'Information Security Course Syllabus',\r\n 'Cybersecurity Law Course Syllabus']\r\n\r\n\r\nwith open('domains2.csv', 'r') as csvfile:\r\n num_page = 1\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n row = str(row[0]) \r\n row = row.replace(\"https://www.\",\"site:\")\r\n row = row.replace(\"http://www.\",\"site:\")\r\n row = row.replace('/','')\r\n for key in search_key:\r\n add = []\r\n print(row+' '+ key)\r\n search_results = google.search(row+' '+ key,num_page)\r\n print(search_results)\r\n for res in search_results:\r\n \r\n i = str(res.link)\r\n print(res.description)\r\n \r\n while '&sa' in i:\r\n a = i.find('&sa')\r\n i = i[:a]\r\n \r\n\r\n if '.edu' not in i:\r\n continue\r\n if i in add:\r\n continue\r\n csvv.write(str(i))\r\n csvv.write('\\n')\r\n print(i)\r\n #break\r\n \r\n \r\n \r\n \r\n '''\r\n timenow = random.randrange(15,30)\r\n print(timenow)\r\n time.sleep(timenow)\r\n '''\r\n #break\r\n \r\ncsvv.close()\r\n\r\n","sub_path":"BRUCE-Cybersecurity-Search-Engine/DataCollection/googlesearch.py","file_name":"googlesearch.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"436529968","text":"import json\n\n#Part 1\n# station code for Seattle is GHCND:US1WAKG0038\n#creating a new dictionary with the Seattle date using the station code\n\nSeattle = []\nwith open('precipitation.json', 'r') as file:\n station_dictionary = json.load(file)\n for line in station_dictionary:\n if line['station']=='GHCND:US1WAKG0038':\n Seattle.append(line)\n \n# Clean the Dictionary from the datatype and station\nfor line in Seattle:\n del[line['datatype']],\n del[line['station']]\n\n\n#get just months and values in the Seattle dictionary\n\nfor line in Seattle:\n line['year'], line['month'], line['day'] = line['date'].split(\"-\")\n del[line['year']],\n del[line['date']],\n del[line['day']]\n# create a new list that is going to have values averages and months as indexes\nfinal_list = [0,0,0,0,0,0,0,0,0,0,0,0]\nfor line in Seattle:\n month = int(line['month']) \n final_list[month-1] = line['value'] + final_list[month-1]\nprint(final_list)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ass.py","file_name":"ass.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"6030989","text":"from flask import Blueprint, jsonify\n\nfrom backend.auth import requires_auth\nfrom command.network import *\n\nnetwork_endpoints = Blueprint(\"network_api\", __name__)\n\n\n@network_endpoints.route(\"/api/network/checkVPN\", methods = [\"GET\"])\n@requires_auth\ndef check_vpn_endpoint():\n \"\"\"Delivers a summary of the VPN connectivity of the system.\"\"\"\n check_vpn_message, check_vpn_status = check_vpn()\n\n return jsonify(\n check_vpn_status = check_vpn_status,\n check_vpn_message = check_vpn_message\n )\n\n\n@network_endpoints.route(\"/api/network/checkInternet\", methods = [\"GET\"])\n@requires_auth\ndef check_internet_endpoint():\n \"\"\"Delivers a summary of the internet connectivity of the system.\"\"\"\n check_internet_message, check_internet_status = check_internet()\n\n return jsonify(\n check_internet_status = check_internet_status,\n check_internet_message = check_internet_message\n )\n\n\n@network_endpoints.route(\"/api/network/restartModem\", methods = [\"GET\"])\n@requires_auth\ndef restart_modem_endpoint():\n \"\"\"Restarts the modem network interface.\"\"\"\n restart_modem_message = restart_modem()\n check_internet_message, check_internet_status = check_internet()\n\n return jsonify(\n check_internet_status = check_internet_status,\n restart_modem_message = restart_modem_message,\n check_internet_message = check_internet_message\n )\n\n\n@network_endpoints.route(\"/api/network/restartVPN\", methods = [\"GET\"])\n@requires_auth\ndef restart_vpn_endpoint():\n \"\"\"Restarts the system's VPN daemon.\"\"\"\n restart_vpn_message = restart_vpn()\n check_vpn_message, check_vpn_status = check_vpn()\n\n return jsonify(\n check_vpn_status = check_vpn_status,\n restart_vpn_message = restart_vpn_message,\n check_vpn_message = check_vpn_message\n )\n","sub_path":"backend/api/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"435561637","text":"\"\"\"Utils.\"\"\"\n\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta\nfrom typing import Optional\nfrom base64 import b64encode\n\nimport numpy as np # type: ignore\nimport pandas as pd # type: ignore\n\n# from .parameters import Parameters\n\n\n# (0.02, 7) is 2%, 7 days\n# be sure to multiply by 100 when using as a default to the pct widgets!\nRateLos = namedtuple(\"RateLos\", (\"rate\", \"length_of_stay\"))\n\n\ndef add_date_column(\n df: pd.DataFrame, drop_day_column: bool = False, date_format: Optional[str] = None,\n) -> pd.DataFrame:\n \"\"\"Copies input data frame and converts \"day\" column to \"date\" column\n\n Assumes that day=0 is today and allocates dates for each integer day.\n Day range can must not be continous.\n Columns will be organized as original frame with difference that date\n columns come first.\n\n Arguments:\n df: The data frame to convert.\n drop_day_column: If true, the returned data frame will not have a day column.\n date_format: If given, converts date_time objetcts to string format specified.\n\n Raises:\n KeyError: if \"day\" column not in df\n ValueError: if \"day\" column is not of type int\n \"\"\"\n if not \"day\" in df:\n raise KeyError(\"Input data frame for converting dates has no 'day column'.\")\n if not pd.api.types.is_integer_dtype(df.day):\n raise KeyError(\"Column 'day' for dates converting data frame is not integer.\")\n\n df = df.copy()\n # Prepare columns for sorting\n non_date_columns = [col for col in df.columns if not col == \"day\"]\n\n # Allocate (day) continous range for dates\n n_days = int(df.day.max())\n start = datetime.now()\n end = start + timedelta(days=n_days + 1)\n # And pick dates present in frame\n dates = pd.date_range(start=start, end=end, freq=\"D\")[df.day.tolist()]\n\n if date_format is not None:\n dates = dates.strftime(date_format)\n\n df[\"date\"] = dates\n\n if drop_day_column:\n df.pop(\"day\")\n date_columns = [\"date\"]\n else:\n date_columns = [\"day\", \"date\"]\n\n # sort columns\n df = df[date_columns + non_date_columns]\n\n return df\n\ndef dataframe_to_base64(df: pd.DataFrame) -> str:\n \"\"\"Converts a dataframe to a base64-encoded CSV representation of that data.\n\n This is useful for building datauris for use to download the data in the browser.\n\n Arguments:\n df: The dataframe to convert\n \"\"\"\n csv = df.to_csv(index=False)\n b64 = b64encode(csv.encode()).decode()\n return b64\n","sub_path":"src/penn_chime/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"95738789","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom .views import listRuleSet, updateRuleSet, searchRuleset, detailRuleSet, reorderRuleset\n\napp_name = 'ruleset'\nurlpatterns = [\n url(r'^ruleset/$', listRuleSet),\n url(r'^ruleset/(?P[0-9]+)/detail/$', detailRuleSet),\n url(r'^ruleset/(?P[0-9]+)/$', updateRuleSet),\n url(r'^ruleset/search/$', searchRuleset),\n url(r'^ruleset/(?P[0-9]+)/reorder/$', reorderRuleset),\n]","sub_path":"ruleset/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"285447583","text":"import numpy as np\nfrom typing import Optional, Callable\nimport pandas as pd\nfrom statsmodels.stats.weightstats import DescrStatsW\n\nfrom ...utils.statistics import bootstrap_stats\nfrom .eval_metric import EvalMetric\nfrom ..data.fold_yielder import FoldYielder\n\n__all__ = ['RegPull', 'RegAsProxyPull']\n\n\nclass RegPull(EvalMetric):\n r'''\n Compute mean or standard deviation of delta or pull of some feature which is being directly regressed to.\n Optionally, use bootstrap resampling on validation data.\n\n Arguments:\n return_mean: whether to return the mean or the standard deviation\n use_bootstrap: whether to bootstrap resamples validation fold when computing statisitic\n use_weights: whether to actually use weights if wgt_name is set\n use_pull: whether to return the pull (differences / targets) or delta (differences)\n targ_name: name of group in fold file containing regression targets\n wgt_name: name of group in fold file containing datapoint weights\n\n Examples::\n >>> mean_pull = RegPull(return_mean=True, use_bootstrap=True,\n ... use_pull=True)\n >>>\n >>> std_delta = RegPull(return_mean=False, use_bootstrap=True,\n ... use_pull=False)\n >>>\n >>> mean_pull = RegPull(return_mean=True, use_bootstrap=False,\n ... use_pull=True, wgt_name='weights')\n '''\n\n # TODO: Check how this handels multi-target regression, may need to adjust averaging axis & DescrStatsW may not handle multi-dimensional data well.\n # TODO: Remove use_weights and rely on whether wgt_name is None\n\n def __init__(self, return_mean:bool, use_bootstrap:bool=False, use_weights:bool=True, use_pull:bool=True, targ_name:str='targets',\n wgt_name:Optional[str]=None):\n super().__init__(targ_name=targ_name, wgt_name=wgt_name)\n self.use_bootstrap,self.use_weights,self.return_mean,self.use_pull = use_bootstrap,use_weights,return_mean,use_pull\n\n def _compute(self, df:pd.DataFrame) -> float:\n df['diff'] = df['pred']-df['gen_target']\n if self.use_pull: df['diff'] /= df['gen_target']\n if self.use_weights and 'gen_weight' in df.columns:\n weights = df['gen_weight'].values.astype('float64')/df['gen_weight'].values.astype('float64').sum()\n else:\n weights = None\n \n if self.use_bootstrap:\n bs_args = {'data': df['diff'], 'mean': self.return_mean, 'std': True, 'n':100}\n if self.use_weights and 'gen_weight' in df.columns: bs_args['weights'] = weights\n bs = bootstrap_stats(bs_args)\n return np.mean(bs['_mean']) if self.return_mean else np.mean(bs['_std'])\n else:\n if self.return_mean:\n return np.average(df['diff'], weights=weights)\n else:\n return DescrStatsW(df['diff'].values, ddof=1, weights=weights*len(weights) if weights is not None else None).std\n \n def evaluate(self, fy:FoldYielder, idx:int, y_pred:np.ndarray) -> float:\n r'''\n Compute statisitic on fold using provided predictions.\n\n Arguments:\n fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` interfacing to data\n idx: fold index corresponding to fold for which y_pred was computed\n y_pred: predictions for fold\n\n Returns:\n Statistic set in initialisation computed on the chsoen fold\n\n Examples::\n >>> mean = mean_pull.evaluate(train_fy, val_id, val_preds)\n '''\n\n return self._compute(self.get_df(fy, idx, y_pred))\n\n\nclass RegAsProxyPull(RegPull):\n r'''\n Compute mean or standard deviation of delta or pull of some feature which is being indirectly regressed to via a proxy function.\n Optionally, use bootstrap resampling on validation data.\n\n Arguments:\n proxy_func: function which acts on regression predictions and adds pred and gen_target columns to the Pandas DataFrame it is passed which contains\n prediction columns pred_{i}\n return_mean: whether to return the mean or the standard deviation\n use_bootstrap: whether to bootstrap resamples validation fold when computing statisitic\n use_weights: whether to actually use weights if wgt_name is set\n use_pull: whether to return the pull (differences / targets) or delta (differences)\n targ_name: name of group in fold file containing regression targets\n wgt_name: name of group in fold file containing datapoint weights\n\n Examples::\n >>> def reg_proxy_func(df):\n >>> df['pred'] = calc_pair_mass(df, (1.77682, 1.77682),\n ... {targ[targ.find('_t')+3:]:\n ... f'pred_{i}' for i, targ\n ... in enumerate(targ_feats)})\n >>> df['gen_target'] = 125\n >>> \n >>> std_delta = RegAsProxyPull(proxy_func=reg_proxy_func,\n ... return_mean=False, use_pull=False)\n '''\n\n def __init__(self, proxy_func:Callable[[pd.DataFrame],None], return_mean:bool, use_bootstrap:bool=False, use_weights:bool=True, \n use_pull:bool=True, targ_name:str='targets', wgt_name:Optional[str]=None):\n super().__init__(use_bootstrap=use_bootstrap, use_weights=use_weights, return_mean=return_mean, use_pull=use_pull, targ_name=targ_name,\n wgt_name=wgt_name)\n self.proxy_func = proxy_func\n \n def evaluate(self, fy:FoldYielder, idx:int, y_pred:np.ndarray) -> float:\n r'''\n Compute statisitic on fold using provided predictions.\n\n Arguments:\n fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` interfacing to data\n idx: fold index corresponding to fold for which y_pred was computed\n y_pred: predictions for fold\n\n Returns:\n Statistic set in initialisation computed on the chsoen fold\n\n Examples::\n >>> mean = mean_pull.evaluate(train_fy, val_id, val_preds)\n '''\n\n df = self.get_df(fy, idx, y_pred)\n self.proxy_func(df)\n return self._compute(df)\n","sub_path":"lumin/nn/metrics/reg_eval.py","file_name":"reg_eval.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"454657864","text":"import unittest\n\nfrom Stores.Hole import Hole\nfrom Stores.Patch import Patch\nfrom Stores.PatchWriter import FinalPatchStore\n\n\nclass TestPatchWriter(unittest.TestCase):\n \"\"\"\n Tests the PatchWriter.py script in the Stores.PatchWriter\n \"\"\"\n\n def test_patch(self):\n \"\"\"\n Test to add multiple patches in the final patch store in the patch writer.\n \"\"\"\n hole1 = Hole(1, 100, [\"a == b\"], \"ARI\", '==')\n hole2 = Hole(2, 100, [\"c and d\"], \"LOG\", 'and')\n hole3 = Hole(3, 100, [\"e + f\"], \"ARI\", '+')\n hole4 = Hole(4, 100, [\"g + h\"], \"ARI\", '+')\n hole5 = Hole(5, 100, [\"i - j\"], \"ARI\", '-')\n\n # Sketch 1\n # Patch 1\n patch1 = Patch(\"sketchName1\", \"test1\")\n patch1.add_hole(hole1)\n # Patch 2\n patch2 = Patch(\"sketchName1\", \"test1\")\n patch2.add_hole(hole2)\n patch2.add_hole(hole3)\n valid_patches1 = [patch1, patch2]\n\n # Add the patches to the PatchWriter's FinalPatchStore.\n FinalPatchStore.collect_patches(valid_patches1)\n self.assertEqual(FinalPatchStore.number_of_total_patches, 2)\n self.assertEqual(FinalPatchStore.number_of_duplicate_patches, 0)\n # Check the counter\n for collection in FinalPatchStore.patch_collection:\n patch = collection[0]\n counter = collection[1]\n if patch.is_equal(patch1):\n self.assertEqual(counter, 1)\n if patch.is_equal(patch2):\n self.assertEqual(counter, 1)\n FinalPatchStore.collect_patches(valid_patches1)\n self.assertEqual(FinalPatchStore.number_of_total_patches, 2)\n self.assertEqual(FinalPatchStore.number_of_duplicate_patches, 2)\n # Check the counter\n for collection in FinalPatchStore.patch_collection:\n patch = collection[0]\n counter = collection[1]\n if patch.is_equal(patch1):\n print(\"Patch1\")\n print(counter)\n self.assertEqual(counter, 2)\n if patch.is_equal(patch2):\n print(\"Patch2\")\n print(counter)\n self.assertEqual(counter, 2)\n\n # Sketch 2\n # Patch 1 has hole 2 equal to patch 2.\n patch3 = Patch(\"sketchName2\", \"test2\")\n patch3.add_hole(hole2)\n # Patch 1 has one hole equal to patch 1\n patch4 = Patch(\"sketchName2\", \"test2\")\n patch4.add_hole(hole1)\n patch4.add_hole(hole4)\n patch4.add_hole(hole5)\n valid_patches2 = [patch3, patch4]\n FinalPatchStore.collect_patches(valid_patches2)\n self.assertEqual(FinalPatchStore.number_of_total_patches, 4)\n self.assertEqual(FinalPatchStore.number_of_duplicate_patches, 2)\n # Check the counter\n for collection in FinalPatchStore.patch_collection:\n patch = collection[0]\n counter = collection[1]\n if patch.is_equal(patch1):\n print(\"Patch1\")\n print(counter)\n self.assertEqual(counter, 2)\n if patch.is_equal(patch2):\n print(\"Patch1\")\n print(counter)\n self.assertEqual(counter, 2)\n if patch.is_equal(patch3):\n print(\"Patch3\")\n print(counter)\n self.assertEqual(counter, 1)\n if patch.is_equal(patch4):\n print(\"Patch4\")\n print(counter)\n self.assertEqual(counter, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"PySketchFix/Tests/Stores/test_PatchWriter.py","file_name":"test_PatchWriter.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"346231451","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport csv\n\nwith open('lis.csv', newline='') as fileImport:\n reader = csv.reader(fileImport)\n data = list(reader)\n\nbCandidat = []\nbCandidat.append(data[0])\nfor candidat in data:\n bCandidat.append(candidat)\n\n\nwith open('export/candidates.csv', 'w') as fileExport:\n writer = csv.writer(fileExport)\n writer.writerows(bCandidat)\n","sub_path":"run_csv.py","file_name":"run_csv.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"521057229","text":"#직사각형 네 개의 합집합의 면적 구하기\n\nimport sys\nsys.stdin = open('input.txt', 'r')\n\nmatrix = [list(map(int, input().split())) for i in range(4)]\n#inMap = 범위 110으로 하면 거의 모든 범위 커버할 수 있음.\n\n#최대 x값\nlst = [] #두번째 x값 리스트\nfor i in range(4):\n lst.append(matrix[i][2])\nmax_x_range = max(lst)\n#최대 y값\nlst_a = [] #두번째 y값 리스트\nfor i in range(4):\n lst_a.append(matrix[i][3])\nmax_y_range = max(lst_a)\n#최소 x값\nlst_b = [] #첫번째 x값 리스트\nfor i in range(4):\n lst_b.append(matrix[i][0])\nmin_x_range = min(lst_b)\n#최소 y값\nlst_c=[] #첫번째 y값 리스트\nfor i in range(4):\n lst_c.append(matrix[i][1])\nmin_y_range = min(lst_c)\n\n# for b in range(max_x_range):\n# for c in range(max_y_range):\n\nmatrix_b = []\ncount = 0\nfor i in range(max_y_range - min_y_range): # 0으로 채워진 8X7 매트릭스\n matrix_b.append([0 for j in range(max_x_range - min_x_range)])\n# 1번 사각형을 보고 여기에 좌표가 속하면 matrix_b에서의 그 좌표값을 1로\n# 1갯수\n\nfor idy in range(min_y_range,max_y_range):\n for idx in range(min_x_range,max_x_range):\n for i in range(4):\n if (matrix[i][0] <= idx < matrix[i][2]) and (matrix[i][1] <= idy < matrix[i][3]):\n #(첫번째 사각형 x 좌표 중 작은거 < idx < 첫번째 사각형 x 좌표 중 큰거) and (첫번째 사각형 y 좌표 중 작은거 < idy < 첫번째 사각형 y 좌표 중 큰거):\n matrix_b[idy-min_y_range][idx-min_x_range] = 1 #1로 채워줌\nfor i in range(max_y_range - min_y_range):\n for j in range(max_x_range - min_x_range):\n if matrix_b[i][j] == 1:\n count +=1\nprint(matrix_b)\nprint(count)\n\n# for i in range(4):\n# matrix[0][i]\n# if min_y_rangematrix[i][3]:\n# matrix_b[i][1] = 1\n# if matrix_b[i][1] = 1: #이미 1이 채워져있어도 1 넣기\n\n\n\n \n\n","sub_path":"IM/BJ2669직사각형면적구하기_mj.py","file_name":"BJ2669직사각형면적구하기_mj.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"135556625","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/dunfield/snappy/build/lib.macosx-10.6-intel-2.7/snappy/numericOutputChecker.py\n# Compiled at: 2019-07-15 23:56:54\nfrom builtins import range\n\nclass NumericExample:\n \"\"\"\n The actual result (3.141592653589793) differs from the given result,\n but the difference is less than 1e-6, so it still passes::\n\n >>> 1.5707963267948966 * 2 # doctest: +NUMERIC6\n 3.14159285\n\n The text pieces between the numbers are also compared, performing\n white-space normalization::\n\n >>> ['a', 4.5, 6.9] # doctest: +NUMERIC6\n ['a', 4.5, 6.9000000001]\n\n Intervals in the notation emitted by sage are allows::\n\n >>> print(\"4.5?e-1\") # doctest: +NUMERIC6\n 4.50000000001?e-1\n\n \"\"\"\n pass\n\n\nimport doctest, re, decimal\nNUMERIC_LIST = []\nNUMERIC_DICT = {}\nALL_NUMERIC = 0\n\ndef init_precisions(precisions):\n \"\"\"\n Register flags for given precisions with doctest module.\n\n Unfortunately, doctest doesn't seem to support a more generic mechanism\n such as \"# doctest: +NUMERIC: 6\" to specify the precision and we need to\n unroll each precision we want to its own flag.\n \"\"\"\n global ALL_NUMERIC\n global NUMERIC_DICT\n global NUMERIC_LIST\n for precision in precisions:\n if precision not in NUMERIC_DICT:\n flag = doctest.register_optionflag('NUMERIC%d' % precision)\n NUMERIC_LIST.append((precision, flag))\n NUMERIC_DICT[precision] = flag\n ALL_NUMERIC |= flag\n\n\ninit_precisions(range(0, 33, 3))\n\ndef get_precision(optionflags):\n \"\"\"\n Get precision from optionflags\n \"\"\"\n for precision, flag in NUMERIC_LIST:\n if optionflags & flag:\n return precision\n\n\nmantissa_pat = '([0-9]+(?:\\\\.[0-9]+)?)'\ninterval_pat = '(\\\\?)?'\nexponent_pat = '(?:\\\\ ?([eE][+-]?[0-9]+))?'\nnumber_re = re.compile('(' + mantissa_pat + interval_pat + exponent_pat + ')')\nnumber_group_count = 4\nnumber_split_stride = number_group_count + 1\nNUMERIC_DEFAULT_OPTIONFLAGS = doctest.NORMALIZE_WHITESPACE\n\ndef to_decimal(groups):\n number, mantissa, interval, exponent = groups\n if exponent:\n n = mantissa + exponent\n else:\n n = mantissa\n return decimal.Decimal(n)\n\n\nclass NumericOutputChecker(doctest.OutputChecker):\n r\"\"\"\n Implements doctest's OutputChecker, see documentation of\n NumericExample for examples.\n\n >>> N = NumericOutputChecker()\n\n >>> a = \"[3.499e-8, 4.5?e-8]\"\n >>> b = \"[3.499e-8, 4.5?e-8]\"\n\n >>> N.check_output(a, b, NUMERIC_DICT[12])\n True\n\n >>> b = \"[3.499999e-8, 3.2?e-8]\"\n >>> N.check_output(a, b, NUMERIC_DICT[6])\n True\n >>> N.check_output(a, b, NUMERIC_DICT[9])\n False\n >>> N.formatted_compare_numeric(a, b, NUMERIC_DICT[9])\n 'Numbers differed by 1.3E-8\\n\\nExpected : 3.499e-8\\nGot : 3.499999e-8\\nDifference : 9.99E-12\\n\\nExpected : 4.5?e-8\\nGot : 3.2?e-8\\nDifference (FAILURE): 1.3E-8\\n'\n >>> N.compare_numeric(a, b, NUMERIC_DICT[12])\n ('NUMERIC', ([('3.499e-8', '3.499999e-8', True, Decimal('9.99E-12')), ('4.5?e-8', '3.2?e-8', True, Decimal('1.3E-8'))], Decimal('1.3E-8')))\n\n >>> b = \"[3.4999e-8, 4.5e-8]\"\n >>> N.formatted_compare_numeric(a, b, NUMERIC_DICT[6])\n 'Expected interval, but got 4.5e-8.'\n\n >>> b = \"[3.4999?e-8, 4.5e-8]\"\n >>> N.formatted_compare_numeric(a, b, NUMERIC_DICT[6])\n 'Expected number, but got 3.4999?e-8.'\n\n >>> b = \"a = [3.4999e-8, 4.5?e-8]\"\n >>> N.formatted_compare_numeric(a, b, NUMERIC_DICT[6])\n 'Text between numbers differs'\n\n >>> b = \"[3.4999e-8, 4.5?e-8, 5.63]\"\n >>> N.formatted_compare_numeric(a, b, NUMERIC_DICT[6])\n 'Expected 2 numbers but got 3 numbers.'\n\n >>> a = \"[4.5, 6.7e1, 2e+3]\"\n >>> b = \"[4.5000001, 67.00000001, 2.0000000000000000001e+3]\"\n >>> N.compare_numeric(a, b, NUMERIC_DICT[6])\n ('OK', None)\n >>> N.compare_numeric(a, b, NUMERIC_DICT[12]) \n ('NUMERIC', ([('4.5', '4.5000001', True, Decimal('1E-7')), ('6.7e1', '67.00000001', True, Decimal('1E-8')), ('2e+3', '2.0000000000000000001e+3', False, Decimal('1E-16'))], Decimal('1E-7')))\n\n Account for pari adding a space before the E::\n\n >>> a = \"4.5e-9\"\n >>> b = \"4.5 E-9\"\n >>> N.compare_numeric(a, b, NUMERIC_DICT[12])\n ('OK', None)\n\n \"\"\"\n\n def compare_numeric(self, want, got, optionflags):\n \"\"\"\n Compares want and got by scanning for numbers. The numbers are\n compared using an epsilon extracted from optionflags. The text\n pieces between the numbers are compared falling back to the\n default implementation of OutputChecker.\n\n Returns a pair (status, data) where status is 'OK' if the\n comparison passed or indicates how it failed with data containing\n information that can be used to format the text explaining the\n differences.\n \"\"\"\n split_want = re.split(number_re, want)\n split_got = re.split(number_re, got)\n if len(split_want) != len(split_got):\n return ('COUNT',\n (\n len(split_want) // number_split_stride,\n len(split_got) // number_split_stride))\n else:\n flags = optionflags | NUMERIC_DEFAULT_OPTIONFLAGS\n for i in range(0, len(split_want), number_split_stride):\n if not doctest.OutputChecker.check_output(self, split_want[i], split_got[i], flags):\n return ('TEXT', None)\n\n epsilon = decimal.Decimal(0.1) ** get_precision(optionflags)\n rows = []\n max_diff = 0\n for i in range(1, len(split_want), number_split_stride):\n number_want = split_want[i]\n number_got = split_got[i]\n is_interval_want = bool(split_want[(i + 2)])\n is_interval_got = bool(split_got[(i + 2)])\n if is_interval_want != is_interval_got:\n return ('TYPE', (is_interval_want, number_got))\n decimal_want = to_decimal(split_want[i:i + number_group_count])\n decimal_got = to_decimal(split_got[i:i + number_group_count])\n diff = abs(decimal_want - decimal_got)\n failed = diff > epsilon\n max_diff = max(max_diff, diff)\n rows.append((number_want, number_got, failed, diff))\n\n if max_diff > epsilon:\n return (\n 'NUMERIC', (rows, max_diff))\n return ('OK', None)\n\n def format_compare_numeric_result(self, status, data):\n \"\"\"\n Formats a nice text from the result of compare_numeric.\n \"\"\"\n if status == 'COUNT':\n return 'Expected %d numbers but got %d numbers.' % data\n if status == 'TEXT':\n return 'Text between numbers differs'\n if status == 'TYPE':\n is_interval_want, number_got = data\n if is_interval_want:\n k = 'interval'\n else:\n k = 'number'\n return 'Expected %s, but got %s.' % (k, number_got)\n if status == 'NUMERIC':\n rows, max_diff = data\n result = 'Numbers differed by %s\\n' % max_diff\n for number_want, number_got, failed, diff in rows:\n if result:\n result += '\\n'\n result += 'Expected : %s\\n' % number_want\n result += 'Got : %s\\n' % number_got\n if failed:\n result += 'Difference (FAILURE): %s\\n' % diff\n else:\n result += 'Difference : %s\\n' % diff\n\n return result\n raise Exception('Internal error in OutputChecker.')\n\n def formatted_compare_numeric(self, want, got, optionflags):\n \"\"\"\n Performs comparison of compare_numeric and returns formatted\n text.\n\n Only supposed to be used if comparison failed.\n \"\"\"\n status, data = self.compare_numeric(want, got, optionflags)\n return self.format_compare_numeric_result(status, data)\n\n def check_output(self, want, got, optionflags):\n \"\"\"\n Implementation of OutputChecker method.\n \"\"\"\n if want == got:\n return True\n else:\n if optionflags & ALL_NUMERIC:\n status, data = self.compare_numeric(want, got, optionflags)\n return status == 'OK'\n return doctest.OutputChecker.check_output(self, want, got, optionflags)\n\n def output_difference(self, example, got, optionflags):\n \"\"\"\n Implementation of OutputChecker method.\n \"\"\"\n if not optionflags & ALL_NUMERIC or example.exc_msg:\n return doctest.OutputChecker.output_difference(self, example, got, optionflags)\n else:\n flags = optionflags | NUMERIC_DEFAULT_OPTIONFLAGS\n base_result = doctest.OutputChecker.output_difference(self, example, got, flags)\n compare_result = self.formatted_compare_numeric(example.want, got, optionflags)\n return base_result + '\\nReason for failure: ' + compare_result + '\\n'\n\n\ndef run_doctests(verbose=False):\n failed, attempted = (0, 0)\n finder = doctest.DocTestFinder()\n runner = doctest.DocTestRunner(verbose=verbose)\n for test in finder.find(NumericOutputChecker):\n runner.run(test)\n\n result = runner.summarize()\n failed += result.failed\n attempted += result.attempted\n runner = doctest.DocTestRunner(checker=NumericOutputChecker(), verbose=verbose)\n for test in finder.find(NumericExample):\n runner.run(test)\n result = runner.summarize()\n failed += result.failed\n attempted += result.attempted\n\n return doctest.TestResults(failed, attempted)\n\n\nrun_doctests.__name__ = 'NumericOutputChecker'","sub_path":"pycfiles/snappy-2.7-cp27-cp27m-macosx_10_6_intel/numericOutputChecker.py","file_name":"numericOutputChecker.py","file_ext":"py","file_size_in_byte":9956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"300149499","text":"import sys\nsys.path.append('.\\\\Controlleurs')\nfrom regle import *\nfrom listeregle import *\nfrom renommage import *\nfrom tkinter import *\nfrom tkinter import messagebox, filedialog\nfrom tkinter.ttk import Combobox\n\n\nclass MaFenetre(Tk):\n def __init__(self):\n super(MaFenetre, self).__init__()\n self.geom_fen(1050, 450)\n self.resizable(width=False, height=False)\n self.menu()\n self.main_window()\n self.mainloop()\n\n def geom_fen(self, larg, haut):\n \"\"\"\n Permet de dimensionner la fenetre en fonction de la résolution, et de la centrer\n :param larg: Largeur de la fenetre générée\n :param haut: Hauteur de la fenetre générée\n :return:\n \"\"\"\n if self.winfo_screenwidth() < 1920:\n larg *= 0.8\n haut *= 0.8\n self.geometry(\n \"%dx%d%+d%+d\" % (larg, haut, (self.winfo_screenwidth() - larg) // 2,\n (self.winfo_screenheight() - haut) // 2))\n\n def menu(self):\n \"\"\"\n Menu de la fenetre, contenant Regle>Lister>Creer et ?>A propos\n \"\"\"\n main_menu = Menu(self)\n roll_menu = Menu(main_menu, tearoff=0) # Menu déroulant\n roll_menu.add_command(label=\"Lister\", command=self.window_liste) # Option lister, renvoie sur une fenêtre listant les règles\n roll_menu.add_command(label=\"Créer\", command=self.window_creer) # Option créer, renvoie sur une fenêtre de creation\n\n help_menu = Menu(main_menu, tearoff=0)\n help_menu.add_command(label=\"À propos\", command=self.help)\n\n main_menu.add_cascade(label=\"Règles\", menu=roll_menu)\n main_menu.add_cascade(label=\"?\", menu=help_menu)\n\n self.config(menu=main_menu)\n\n def main_window(self):\n \"\"\"\n Fenetre principale permettant de renommer des fichiers d'un dossier en fonction des parametre mit dans les champs\n \"\"\"\n self.title(\"Renommage de fichier\")\n ### Label ###\n title_lab = Label(self, text=\"Renommer en lot\").grid(row=0, column=2)\n Label(self, text=\"Nom de répertoire\").grid(row=1, column=1)\n Label(self, text=\"Amorce\").grid(row=3, column=0, pady=10)\n Label(self, text=\"Prefixe\").grid(row=3, column=1)\n Label(self, text=\"Nom du fichier\").grid(row=3, column=2)\n Label(self, text=\"Postfixe\").grid(row=3, column=3)\n Label(self, text=\"Extension concernée\").grid(row=3, column=4)\n Label(self, text=\"A partir de : \").grid(row=5, column=0, pady=10)\n\n ### Picture ###\n picture = PhotoImage(file=\"./Modeles/Rename.gif\")\n label1 = Label(self, image=picture)\n label1.image = picture\n label1.grid(row=1, column=5)\n\n ### Entry ###\n self.prefix = StringVar()\n Entry(self, textvariable=self.prefix).grid(row=4, column=1, padx=15)\n\n self.nom_fich = StringVar()\n Entry(self, textvariable=self.nom_fich).grid(row=4, column=2, padx=15)\n\n self.apartirde = StringVar()\n Entry(self, textvariable=self.apartirde).grid(row=6, column=0, padx=15)\n\n self.postfix = StringVar()\n Entry(self, textvariable=self.postfix).grid(row=4, column=3, padx=15)\n\n self.ext = StringVar()\n Entry(self, textvariable=self.ext).grid(row=4, column=4, padx=15)\n\n ### Combobox ###\n self.amorce_select = StringVar()\n amorce_choice = ('Aucune', 'Lettres', 'Chiffres')\n Combobox(self, textvariable=self.amorce_select,\n values=amorce_choice, state='readonly').grid(row=4, column=0, padx=15)\n self.amorce_select.set(amorce_choice[0])\n\n ### Button ###\n Button(self, text=\"Browse\", command= self.browse).grid(row=1, column=2)\n Button(self, text=\"Renommer\", command=self.renommer_fichiers).grid(row=6, column=4)\n\n def renommer_fichiers(self):\n \"\"\"\n Methode permettant de renommer avec la regle passee\n \"\"\"\n nomrep = self.file_path\n amorce = self.amorce_select.get()\n a_partir_de = self.apartirde.get()\n prefixe = self.prefix.get()\n nom_fichier = self.nom_fich.get()\n postfixe = self.postfix.get()\n extension = self.ext.get()\n ma_regle = Regle(\" \", amorce, a_partir_de, prefixe, nom_fichier, postfixe, extension)\n rename = Renommage(ma_regle, nomrep)\n rename.renommer(rename)\n\n def window_creer(self):\n \"\"\"\n Fenetre associee a la creation de regle\n \"\"\"\n win_creer = Toplevel(self)\n win_creer.geometry(\n \"%dx%d%+d%+d\" % (1050, 450, (self.winfo_screenwidth() - 1050) // 2,\n (self.winfo_screenheight() - 450) // 2))\n win_creer.title(\"Création nouvelle règle\")\n Label(win_creer, text=\"Nom de règle\").grid(row=1, column=1)\n Label(win_creer, text=\"Amorce\").grid(row=3, column=0, pady=10)\n Label(win_creer, text=\"Prefixe\").grid(row=3, column=1)\n Label(win_creer, text=\"Nom du fichier\").grid(row=3, column=2)\n Label(win_creer, text=\"Postfixe\").grid(row=3, column=3)\n Label(win_creer, text=\"Extension concernée\").grid(row=3, column=4)\n Label(win_creer, text=\"A partir de : \").grid(row=5, column=0, pady=10)\n\n ## Picture ####\n # picture = PhotoImage(file=\"./Modeles/New-Rule.gif\")\n # label1 = Label(self.frame, image=picture)\n # label1.image = picture\n # label1.grid(row=1, column=5)\n\n ### Entry ###\n self.nomregle = StringVar()\n Entry(win_creer, textvariable=self.nomregle).grid(row=1, column=2)\n self.prefix = StringVar()\n Entry(win_creer, textvariable=self.prefix).grid(row=4, column=1, padx=15)\n self.apartirde = StringVar()\n Entry(win_creer, textvariable=self.apartirde).grid(row=6, column=0, padx=15)\n self.postfix = StringVar()\n Entry(win_creer, textvariable=self.postfix).grid(row=4, column=3, padx=15)\n self.nom_fich = StringVar()\n Entry(win_creer, textvariable=self.nom_fich).grid(row=4, column=2, padx=15)\n self.ext = StringVar()\n Entry(win_creer, textvariable=self.ext).grid(row=4, column=4, padx=15)\n\n ### Combobox ###\n self.amorce_select = StringVar()\n amorce_choice = ('Aucune', 'Lettres', 'Chiffres')\n Combobox(win_creer, textvariable=self.amorce_select,\n values=amorce_choice, state='readonly').grid(row=4, column=0, padx=15)\n self.amorce_select.set(amorce_choice[0])\n\n ### Button ###\n Button(win_creer, text=\"Créer\", command=self.creer_nouvelle_regle).grid(row=6, column=4)\n\n def creer_nouvelle_regle(self):\n \"\"\"\n Methode créant une nouvelle regle via methode sauvegarder de ListeRegle en recuperant les données des champs de texte\n \"\"\"\n nomrep = self.nomregle.get()\n amorce = self.amorce_select.get()\n apartirde = self.apartirde.get()\n prefixe = self.prefix.get()\n nomfich = self.nom_fich.get()\n postfixe = self.postfix.get()\n ext = self.ext.get()\n ma_regle = Regle(nomrep, amorce, apartirde, prefixe, nomfich, postfixe, ext)\n ma_liste = ListeRegle()\n ma_liste.sauvegarder(ma_regle)\n messagebox.showinfo(\"Ok\",\"Règle \" + nomrep + \" créée\")\n\n def window_liste(self):\n \"\"\"\n Fenetre associee a la selection de regles existantes\n \"\"\"\n win_liste = Toplevel(self)\n win_liste.geometry(\n \"%dx%d%+d%+d\" % (1050, 450, (self.winfo_screenwidth() - 1050) // 2,\n (self.winfo_screenheight() - 450) // 2))\n win_liste.title(\"Liste des règles\")\n liste_regle = ListeRegle()\n liste_regle.charger()\n regles = liste_regle.get_regle()\n ### Combobox ###\n self.amorce_select = StringVar()\n Combobox(win_liste, textvariable=self.amorce_select,\n values=regles, state='readonly').grid(row=0, column=0)\n self.amorce_select.set(regles[0])\n ### Button ###\n Button(win_liste, text=\"valider\", command=self.previsu_selection).grid(row=1, column=0)\n\n def previsu_selection(self):\n \"\"\"\n Fenetre associee à la previsualisation d'une regle selectionnee\n \"\"\"\n win_liste_select = Toplevel(self)\n win_liste_select.geometry(\n \"%dx%d%+d%+d\" % (1050, 450, (self.winfo_screenwidth() - 1050) // 2,\n (self.winfo_screenheight() - 450) // 2))\n am = self.amorce_select.get()\n nom_regle = am.split(\" \")[0]\n amorce = am.split(\" \")[1]\n apartirde = am.split(\" \")[2]\n prefixe = am.split(\" \")[3]\n nom_fichier = am.split(\" \")[4]\n postfixe = am.split(\" \")[5]\n ext = am.split(\" \")[6]\n\n ### Label ###\n Label(win_liste_select, text=\"Nom de règle\").grid(row=1, column=1)\n Label(win_liste_select, text=\"Amorce\").grid(row=3, column=0, pady=10)\n Label(win_liste_select, text=\"Prefixe\").grid(row=3, column=1)\n Label(win_liste_select, text=\"Nom du fichier\").grid(row=3, column=2)\n Label(win_liste_select, text=\"Postfixe\").grid(row=3, column=3)\n Label(win_liste_select, text=\"Extension concernée\").grid(row=3, column=4)\n Label(win_liste_select, text=\"A partir de : \").grid(row=5, column=0, pady=10)\n\n ## Picture ####\n # picture = PhotoImage(file=\"./Modeles/New-Rule.gif\")\n # label1 = Label(self.frame, image=picture)\n # label1.image = picture\n # label1.grid(row=1, column=5)\n\n ## Entry ###\n self.nomR = Entry(win_liste_select)\n self.nomR.grid(row=1, column=2)\n self.nomR.insert(0, nom_regle)\n\n self.amorce = Entry(win_liste_select)\n self.amorce.grid(row=4, column=0)\n self.amorce.insert(0, amorce)\n\n self.apart = Entry(win_liste_select)\n self.apart.grid(row=6, column=0, padx=15)\n self.apart.insert(0, apartirde)\n\n self.prefixe = Entry(win_liste_select)\n self.prefixe.grid(row=4, column=1, padx=15)\n self.prefixe.insert(0, prefixe)\n\n self.nomFichier = Entry(win_liste_select)\n self.nomFichier.grid(row=4, column=2, padx=15)\n self.nomFichier.insert(0, nom_fichier)\n\n self.postfixe = Entry(win_liste_select)\n self.postfixe.grid(row=4, column=3)\n self.postfixe.insert(0, postfixe)\n\n self.extens = Entry(win_liste_select)\n self.extens.grid(row=4, column=4)\n self.extens.insert(0, ext)\n Button(win_liste_select, text=\"choisir\", command=self.regle_choisie).grid(row=6, column=4)\n\n def regle_choisie(self):\n \"\"\"\n Fenetre associee a la regle choisie dans la liste afin de renommer en fonction de celle-ci\n \"\"\"\n win_regle_choisie = Toplevel(self)\n win_regle_choisie.geometry(\n \"%dx%d%+d%+d\" % (1050, 450, (self.winfo_screenwidth() - 1050) // 2,\n (self.winfo_screenheight() - 450) // 2))\n win_regle_choisie.title(\"Renommage de fichier\")\n # Label\n Label(win_regle_choisie, text=\"Renommer en lot\").grid(row=0, column=2)\n Label(win_regle_choisie, text=\"Nom de répertoire\").grid(row=1, column=1)\n Label(win_regle_choisie, text=\"Amorce\").grid(row=3, column=0, pady=10)\n Label(win_regle_choisie, text=\"Prefixe\").grid(row=3, column=1)\n Label(win_regle_choisie, text=\"Nom du fichier\").grid(row=3, column=2)\n Label(win_regle_choisie, text=\"Postfixe\").grid(row=3, column=3)\n Label(win_regle_choisie, text=\"Extension concernée\").grid(row=3, column=4)\n Label(win_regle_choisie, text=\"A partir de : \").grid(row=5, column=0, pady=10)\n\n # Picture\n picture = PhotoImage(file=\"./Modeles/Rename.gif\")\n label1 = Label(win_regle_choisie, image=picture)\n label1.image = picture\n label1.grid(row=1, column=5)\n\n # Entry\n self.nom_dossier = StringVar()\n Entry(win_regle_choisie, textvariable=self.nom_dossier).grid(row=1, column=2)\n\n amorce = Entry(win_regle_choisie)\n amorce.grid(row=4, column=0, padx=15)\n amorce.insert(0,self.amorce.get())\n\n prefixe = Entry(win_regle_choisie)\n prefixe.grid(row=4, column=1, padx=15)\n prefixe.insert(0, self.prefixe.get())\n\n nom_fich = Entry(win_regle_choisie)\n nom_fich.grid(row=4, column=2, padx=15)\n nom_fich.insert(0, self.nomFichier.get())\n\n apartirde = Entry(win_regle_choisie)\n apartirde.grid(row=6, column=0, padx=15)\n apartirde.insert(0, self.apart.get())\n\n postfixe = Entry(win_regle_choisie)\n postfixe.grid(row=4, column=3, padx=15)\n postfixe.insert(0, self.postfixe.get())\n\n extension = Entry(win_regle_choisie)\n extension.grid(row=4, column=4, padx=15)\n extension.insert(0, self.extens.get())\n\n ### Button ###\n Button(win_regle_choisie, text=\"Renommer\", command=self.renommer_fichers_regle_choisie).grid(row=6, column=4)\n\n def renommer_fichers_regle_choisie(self):\n \"\"\"\n Methode permettant de renommer des fichiers après la selection d'une regle sauvegardee et selectionnee\n \"\"\"\n nomrep = self.nom_dossier.get()\n amorce = self.amorce.get()\n a_partir_de = self.apart.get()\n prefixe = self.prefixe.get()\n nom_fichier = self.nomFichier.get()\n postfixe = self.postfixe.get()\n extension = self.extens.get()\n ma_regle2 = Regle(self.nomR.get(), amorce, a_partir_de, prefixe, nom_fichier, postfixe, extension)\n rename = Renommage(ma_regle2, nomrep)\n rename.renommer(rename)\n\n def browse(self):\n \"\"\"\n Methode permettant de choisir quel dossier selectionner pour le rename\n \"\"\"\n self.file_path = filedialog.askdirectory()\n\n @staticmethod\n def help():\n \"\"\"\n Popup a propos :\n \"\"\"\n messagebox.showinfo(\"À propos\", \"Créé par Quentin Nicolas\\nVersion 1.0\")\n","sub_path":"Vues/fenetre.py","file_name":"fenetre.py","file_ext":"py","file_size_in_byte":14099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"214355777","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# code adapted from \n# https://github.com/SSchwoebel/BalancingControl/blob/master/agent.py\n# (commit f1d86ba)\n\"\"\"This module contains the class that defines the interaction between\ndifferent modules that govern agent's behavior.\n\"\"\"\nimport numpy as np\nfrom perception import HierarchicalPerception\nfrom misc import ln, softmax\nimport scipy.special as scs\n\n \nclass BayesianPlanner(object):\n \n def __init__(self, perception, action_selection, policies,\n prior_states = None, prior_policies = None, \n prior_context = None,\n learn_habit = False,\n trials = 1, T = 10, number_of_states = 6, \n number_of_rewards = 2,\n number_of_policies = 10):\n \n #set the modules of the agent\n self.perception = perception\n self.action_selection = action_selection\n \n #set parameters of the agent\n self.nh = number_of_states #number of states\n self.npi = number_of_policies #number of policies\n self.nr = number_of_rewards\n \n self.T = T\n self.trials = trials\n \n if policies is not None:\n self.policies = policies\n else:\n #make action sequences for each policy\n self.policies = np.eye(self.npi, dtype = int)\n \n self.possible_polcies = self.policies.copy()\n \n self.actions = np.unique(self.policies)\n self.na = len(self.actions)\n \n if prior_states is not None:\n self.prior_states = prior_states\n else:\n self.prior_states = np.ones(self.nh)\n self.prior_states /= self.prior_states.sum()\n \n if prior_context is not None:\n self.prior_context = prior_context\n self.nc = prior_context.shape[0]\n else:\n self.prior_context = np.ones(1)\n self.nc = 1\n \n if prior_policies is not None:\n self.prior_policies = np.tile(prior_policies, (1,self.nc)).T\n else:\n self.prior_policies = np.ones((self.npi,self.nc))/self.npi\n \n self.learn_habit = learn_habit\n \n #set various data structures\n self.actions = np.zeros((trials, T), dtype = int)\n self.posterior_states = np.zeros((trials, T, self.nh, T, self.npi, self.nc))\n self.posterior_policies = np.zeros((trials, T, self.npi, self.nc))\n self.posterior_dirichlet_pol = np.zeros((trials, self.npi, self.nc))\n self.posterior_dirichlet_rew = np.zeros((trials, T, self.nr, self.nh, self.nc))\n self.observations = np.zeros((trials, T), dtype = int)\n self.rewards = np.zeros((trials, T), dtype = int)\n self.posterior_context = np.ones((trials, T, self.nc))\n self.posterior_context[:,:,:] = self.prior_context[np.newaxis,np.newaxis,:]\n self.likelihood = np.zeros((trials, T, self.npi, self.nc))\n self.prior_policies_all = np.zeros((trials, self.npi, self.nc))\n self.prior_policies_all[0] = prior_policies\n \n\n def reset_beliefs(self, actions):\n self.actions[:,:] = actions \n self.posterior_states[:,:,:] = 0.\n self.posterior_policies[:,:,:] = 0.\n \n self.perception.reset_beliefs()\n self.planning.reset_beliefs()\n self.action_selection.reset_beliefs()\n \n \n def update_beliefs(self, tau, t, observation, reward, response):\n self.observations[tau,t] = observation\n self.rewards[tau,t] = reward\n \n if t == 0:\n self.possible_polcies = np.arange(0,self.npi,1).astype(np.int32)\n else:\n possible_policies = np.where(self.policies[:,t-1]==response)[0]\n self.possible_polcies = np.intersect1d(self.possible_polcies, possible_policies)\n \n self.posterior_states[tau, t] = self.perception.update_beliefs_states(\n tau, t,\n observation,\n reward,\n self.policies,\n self.possible_polcies)\n \n #update beliefs about policies\n self.posterior_policies[tau, t], self.likelihood[tau,t] = self.perception.update_beliefs_policies(tau, t)\n \n if t == self.T-1 and self.learn_habit:\n if tau < self.trials - 1:\n self.posterior_dirichlet_pol[tau], self.prior_policies_all[tau+1] = self.perception.update_beliefs_dirichlet_pol_params(tau, t, \\\n self.posterior_policies[tau,t], \\\n self.posterior_context[tau,t])\n else:\n self.posterior_dirichlet_pol[tau] = self.perception.update_beliefs_dirichlet_pol_params(tau, t, \\\n self.posterior_policies[tau,t], \\\n self.posterior_context[tau,t])[0]\n \n def generate_response(self, tau, t):\n \n #get response probability\n posterior_states = self.posterior_states[tau, t]\n posterior_policies = np.dot(self.posterior_policies[tau, t], self.posterior_context[tau, t])\n #print(self.posterior_context[tau, t])\n posterior_policies /= posterior_policies.sum()\n non_zero = posterior_policies > 0\n controls = self.policies[:, t][non_zero]\n posterior_policies = posterior_policies[non_zero]\n actions = np.unique(controls)\n\n self.actions[tau, t] = self.action_selection.select_desired_action(tau, \n t, posterior_policies, controls)\n \n \n return self.actions[tau, t]\n \n\n","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"170652734","text":"import os\nimport numpy as np\nfrom skimage.transform import resize\nfrom PIL import Image\nimport tensorflow as tf\nfrom numpy import asarray\n\n'''Splits MNIST dataset into distinct subfolders according to their classes.'''\n\n\ndef scale_images(images, new_shape):\n images_list = list()\n for image in images:\n # resize with nearest neighbor interpolation\n new_image = resize(image, new_shape, 0)\n # store\n images_list.append(new_image)\n return asarray(images_list)\n\n\ndef MNIST_split():\n simulation_dir = 'MNIST_Simulations'\n if not os.path.exists(simulation_dir):\n os.mkdir(simulation_dir)\n\n all_directory = 'MNIST_all_data'\n if not os.path.exists(all_directory):\n os.mkdir(all_directory)\n directory = 'MNIST_data'\n if not os.path.exists(directory):\n os.mkdir(directory)\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print('Scaling images... This may take a moment...')\n x_train = 255 * scale_images(x_train, (32, 32))\n x_train = x_train.astype(np.uint8)\n x_test = 255 * scale_images(x_test, (32, 32))\n x_test = x_test.astype(np.uint8)\n print('Images are scaled.')\n for all in range(len(x_train)):\n image_name = all_directory + '/Label_' + str(all) + '_.png'\n im = Image.fromarray(x_train[all, :, :])\n im.save(image_name)\n for i in range(np.max(y_train)+1):\n sub_direc = directory + '/Class_' + str(i)\n if not os.path.exists(sub_direc):\n os.mkdir(sub_direc)\n\n train_ind = y_train[:] == i\n train_index = np.arange(len(train_ind), dtype=int)[train_ind]\n sub_direc_train = sub_direc + '/Train'\n if not os.path.exists(sub_direc_train):\n os.mkdir(sub_direc_train)\n for j in train_index:\n image_name = sub_direc_train + '/Label_' + str(i) + '_Train_' + str(j) + '_.png'\n im = Image.fromarray(x_train[j, :, :])\n im.save(image_name)\n print(str(j) + 'th training image in '+str(i)+'th class is saved.')\n\n test_ind = y_test[:] == i\n test_index = np.arange(len(test_ind), dtype=int)[test_ind]\n sub_direc_test = sub_direc + '/Test'\n if not os.path.exists(sub_direc_test):\n os.mkdir(sub_direc_test)\n for j in test_index:\n image_name = sub_direc_test + '/Label_' + str(i) + '_Test_' + str(j) + '_.png'\n im = Image.fromarray(x_test[j, :, :])\n im.save(image_name)\n print(str(j) + 'th testing image in '+str(i)+'th class is saved.')\n\n\nif __name__ == '__main__':\n MNIST_split()\n","sub_path":"Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"503295549","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#tfc.sqliteにアクセスして、在庫表・検討表用に入荷予定表を作ります\n#南濃取り込み基準日を決めて、それ以降のdelivery納期のpoデータを\n#作成します。\n\nimport sqlite3\nimport csv\nfrom index_tool import get_xindex, get_yindex\n\nDB_FILE = 'tfc.sqlite'\n\nclass MakeBalance:\n def __init__(self):\n con = sqlite3.connect(DB_FILE)\n cur = con.cursor()\n\n begin_day = input(\"南濃着日でいつより後の予定を出しますか。(例)20190601:\")\n begin_day = begin_day[:4] + \"-\" + begin_day[4:6] + \"-\" + begin_day[6:8]\n #begin_day ='2020-01-06'\n\n\n #インボイス残データ用\n #コードの在庫区分が1のインボイス行で、南濃取り込み日が\n #bigin_day より新しいものを抽出\n cur.execute(\"select i.delivery, i.etd, i.invn, c.hcode, v.qty from ((invline v inner join inv i on v.inv_id = i.id) inner join tfc_code c on c.id = v.code_id) where i.delivery > ? and (c.zaiko=1 or c.kento =1)\", (begin_day,))\n\n invlines = cur.fetchall()\n #self.invlines = invlines\n\n #インボイスの最新のdeliveryを求めます。\n #(インボイスデータは在庫区分1)\n #POデータはmaxdeli以降\n\n #cur.execute(\"select max(i.delivery) from (((invline v inner join poline o on v.poline_id = o.id) inner join po p on p.id = o.po_id) inner join inv i on v.inv_id = i.id) where p.comment = 'To Hukla Japan/Nanno ' and o.om=''\")\n cur.execute(\"select max(i.delivery) from ((invline v inner join inv i on v.inv_id = i.id) inner join tfc_code c on v.code_id = c.id ) where c.zaiko=1 or c.kento=1\")\n maxdeli = cur.fetchone()[0]\n\n #po内容情報の取得/フクラ南濃向けバイオーダー除く\n #id, 着日, etd, PO No. コード 残数\n cur.execute(\"select o.id, p.delivery, p.etd, p.pon, c.hcode, o.balance from ((poline o inner join po p on o.po_id = p.id) inner join tfc_code c on o.code_id = c.id) where p.delivery > ? and p.comment like '%Nanno%' and o.om = ''\", (maxdeli, ))\n\n polines = cur.fetchall()\n #polines.sort()\n\n self.zan_hyo =[] #PO情報にinv情報を加えた表を作成\n bal_hyo =[] #残が0より大きいデータのみ格納\n\n for row in polines: #PO情報をtupleからlistに変換\n #polines情報をzan_hyoに格納\n self.zan_hyo.append(list(row))\n\n for zan in self.zan_hyo:\n if zan[5] > 0 and not zan[4] =='' : #hcodeがないものは飛ばす\n #残数量がゼロより大きい時、着日, etd, PO No. コード 残数\n bal_hyo.append([zan[1], zan[2], zan[3], zan[4], zan[5]])\n\n invlist = [] #インボイス情報をtupleからlistに変換\n for row in invlines:\n invlist.append(list(row))\n\n self.invlist = invlist\n #PO残情報とinv情報を合わせる\n self.totallist = self.sum_list(invlist + bal_hyo)\n #self.totallist.sort()\n\n cur.close()\n con.close()\n\n\n def make_nolist(self):\n #インボイスナンバー、POナンバー,delivery, etdの重複しないリスト\n nos = set()\n for row in self.totallist:\n nos.add((row[0], row[1], row[2]))\n\n nolist=list(nos)\n nolist.sort()\n #print('nolist:', nolist)\n return nolist\n\n def make_codelist(self):\n #コードのの重複しないリスト\n codes = set()\n for row in self.totallist:\n codes.add(row[3])\n\n codelist=list(codes)\n codelist.sort()\n return codelist\n\n def make_yotei(self):\n nolist = self.make_nolist()\n codelist = self.make_codelist()\n #print('nolist:', nolist)\n #print('codelist:', codelist)\n\n #予定表用の2次元配列を初期化してデータを代入する\n yotei_hyo = [['' for i in range(len(nolist)+1)] for j in range(len(codelist)+1)]\n\n #codelist.insert(0, \"\") #先頭行はタイトル行なので空けておく\n #1列目にコードを代入\n for i, code in enumerate(codelist):\n yotei_hyo[i+1][0] = code\n\n #1行目にINV no. PO no. を代入\n for i, num in enumerate(nolist):\n yotei_hyo[0][i+1] = num[2]\n\n #print('self.totallist', self.totallist)\n\n for row in self.totallist: #着日, etd, PO No. コード 残数\n yotei_hyo[get_yindex(yotei_hyo, row[3])][get_xindex(yotei_hyo, row[2])] = row[4]\n\n return yotei_hyo\n\n\n def write_balance(self, hyo):\n with open('balance.csv', 'w', encoding='CP932') as f:\n writer = csv.writer(f)\n writer.writerows(hyo)\n\n #ETD, delivery, PO/inv, code, qty\n def sum_list(self, data):\n #コード,PO/INVが同じデータの入荷予定数を加算して一つにまとめる。\n matome ={} #tuple をキーの辞書\n c_data = [] #まとめたデータ保管用変数\n\n for row in data:\n matome.setdefault((row[0], row[1], row[2], row[3]), 0 )\n matome[(row[0], row[1], row[2], row[3])] += row[4]\n\n #辞書からリストに戻す\n for k, v in matome.items():\n c_data.append(list(k) + [v])\n\n return c_data\n\n\n\n#mb = MakeBalance()\n#mb.write_balance( mb.make_yotei())\n#mb.write_balance(mb.totallist)\n\n","sub_path":"make_balance.py","file_name":"make_balance.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"604350614","text":"\n\"\"\"A demo script showing how to DIARIZATION ON WAV USING UIS-RNN.\"\"\"\n\nimport numpy as np\nimport soundfile as sf\nimport uisrnn\nimport librosa\nimport sys\nsys.path.append('ghostvlad')\nsys.path.append('visualization')\nimport toolkits\nimport model as spkModel\nimport os\nfrom viewer import PlotDiar\nimport pickle\nfrom collections import defaultdict\n \n# ===========================================\n# Parse the argument\n# ===========================================\nimport argparse\nparser = argparse.ArgumentParser()\n# set up training configuration.\nparser.add_argument('--gpu', default='', type=str)\nparser.add_argument('--resume', default=r'ghostvlad/pretrained/weights.h5', type=str)\nparser.add_argument('--data_path', default='4persons', type=str)\n# set up network configuration.\nparser.add_argument('--net', default='resnet34s', choices=['resnet34s', 'resnet34l'], type=str)\nparser.add_argument('--ghost_cluster', default=2, type=int)\nparser.add_argument('--vlad_cluster', default=8, type=int)\nparser.add_argument('--bottleneck_dim', default=512, type=int)\nparser.add_argument('--aggregation_mode', default='gvlad', choices=['avg', 'vlad', 'gvlad'], type=str)\n# set up learning rate, training loss and optimizer.\nparser.add_argument('--loss', default='softmax', choices=['softmax', 'amsoftmax'], type=str)\nparser.add_argument('--test_type', default='normal', choices=['normal', 'hard', 'extend'], type=str)\n\nparser.add_argument('--save_wavpath', type=str)\nparser.add_argument('--save_pklpath', type=str)\n\n\nglobal args\nargs_all = parser.parse_args()\n\nargs = argparse.Namespace(**{k: v for k, v in args_all._get_kwargs()\n if not k.startswith(\"save_\")})\n\n#print(args,args_all)\n\n#SAVED_MODEL_NAME = 'pretrained/saved_model.uisrnn_benchmark'\n#SAVED_MODEL_NAME = 'pretrained/saved_model_vctk_muda_updated.uisrnn_benchmark'\nSAVED_MODEL_NAME = '/scratch/hh2263/VCTK/saved_model_vctk_muda_updated.uisrnn_benchmark'\n\ndef append2dict(speakerSlice, spk_period):\n key = list(spk_period.keys())[0]\n value = list(spk_period.values())[0]\n timeDict = {}\n timeDict['start'] = int(value[0]+0.5)\n timeDict['stop'] = int(value[1]+0.5)\n if(key in speakerSlice):\n speakerSlice[key].append(timeDict)\n else:\n speakerSlice[key] = [timeDict]\n\n return speakerSlice\n\ndef arrangeResult(labels, time_spec_rate): # {'1': [{'start':10, 'stop':20}, {'start':30, 'stop':40}], '2': [{'start':90, 'stop':100}]}\n lastLabel = labels[0]\n speakerSlice = {}\n j = 0\n for i,label in enumerate(labels):\n if(label==lastLabel):\n continue\n speakerSlice = append2dict(speakerSlice, {lastLabel: (time_spec_rate*j,time_spec_rate*i)})\n j = i\n lastLabel = label\n speakerSlice = append2dict(speakerSlice, {lastLabel: (time_spec_rate*j,time_spec_rate*(len(labels)))})\n return speakerSlice\n\ndef genMap(intervals): # interval slices to maptable\n slicelen = [sliced[1]-sliced[0] for sliced in intervals.tolist()]\n mapTable = {} # vad erased time to origin time, only split points\n idx = 0\n for i, sliced in enumerate(intervals.tolist()):\n mapTable[idx] = sliced[0]\n idx += slicelen[i]\n mapTable[sum(slicelen)] = intervals[-1,-1]\n\n keys = [k for k,_ in mapTable.items()]\n keys.sort()\n return mapTable, keys\n\ndef fmtTime(timeInMillisecond):\n millisecond = timeInMillisecond%1000\n minute = timeInMillisecond//1000//60\n second = (timeInMillisecond-minute*60*1000)//1000\n time = '{}:{:02d}.{}'.format(minute, second, millisecond)\n return time\n\ndef load_wav(in_file,file_length, current_time,block_sec, sr): #in_file as a soundfile object, block_sec is chunks in seconds\n if current_time + block_sec >= file_length: #in seconds, preview if reading 2 min is too much\n wav = np.frombuffer(in_file.buffer_read(-1, dtype='float32'),dtype=\"float32\")\n current_time = file_length\n else:\n wav = np.frombuffer(in_file.buffer_read(sr*block_sec, dtype='float32'),dtype=\"float32\") #load 2 minutes of audio at a time\n current_time += block_sec\n #wav, _ = librosa.load(in_file, sr=sr)\n intervals = librosa.effects.split(wav, top_db=20)\n wav_output = []\n for sliced in intervals:\n wav_output.extend(wav[sliced[0]:sliced[1]])\n return np.array(wav_output), (intervals/sr*1000).astype(int),current_time\n\ndef lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):\n linear = librosa.stft(wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length) # linear spectrogram\n return linear.T\n\n\n# 0s 1s 2s 4s 6s\n# |-------------------|-------------------|-------------------|\n# |-------------------|\n# |-------------------|\n# |-------------------|\n# |-------------------|\ndef load_data(in_file, file_length, current_time, win_length=400, sr=16000, hop_length=160, n_fft=512, embedding_per_second=0.5, overlap_rate=0.5,block_sec=180):\n #get current block's audio, and update file's read pointer\n wav, intervals,current_time = load_wav(in_file, file_length, current_time,block_sec, sr=sr) \n linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n freq, time = mag_T.shape\n spec_mag = mag_T\n\n spec_len = sr/hop_length/embedding_per_second\n spec_hop_len = spec_len*(1-overlap_rate)\n\n cur_slide = 0.0\n utterances_spec = []\n\n while(True): # slide window.\n if(cur_slide + spec_len > time):\n break\n spec_mag = mag_T[:, int(cur_slide+0.5) : int(cur_slide+spec_len+0.5)]\n \n # preprocessing, subtract mean, divided by time-wise var\n mu = np.mean(spec_mag, 0, keepdims=True)\n std = np.std(spec_mag, 0, keepdims=True)\n spec_mag = (spec_mag - mu) / (std + 1e-5)\n utterances_spec.append(spec_mag)\n\n cur_slide += spec_hop_len\n\n return utterances_spec, intervals, current_time\n\ndef main(wav_path,saved_file_pkl, embedding_per_second=1.0, overlap_rate=0.5):\n\n # gpu configuration\n toolkits.initialize_GPU(args)\n\n\n params = {'dim': (257, None, 1), #included\n 'nfft': 512,\n 'spec_len': 250, #included\n 'win_length': 400,\n 'hop_length': 160,\n 'n_classes': 5994, #included\n 'sampling_rate': 16000,\n 'normalize': True,\n }\n \n #loading the pretrained embeddings model (a resnet) \n network_eval = spkModel.vggvox_resnet2d_icassp(input_dim=params['dim'],\n num_class=params['n_classes'],\n mode='eval', args=args)\n \n network_eval.load_weights(args.resume, by_name=True)\n\n\n model_args, _, inference_args = uisrnn.parse_arguments()\n #inference_args.look_ahead = 3\n #print(inference_args)\n\n model_args.observation_dim = 512\n uisrnnModel = uisrnn.UISRNN(model_args)\n\n #load the retrained uisrnn speaker diarization model\n uisrnnModel.load(SAVED_MODEL_NAME,'cpu')\n\n #open the input file\n in_file = sf.SoundFile(wav_path) \n file_length = sf.info(wav_path).duration\n #only output dictionary of speakers and their start/stop time (speakerSlice)\n out_file=open(saved_file_pkl, 'wb')\n current_time = 0\n\n \n feats = []\n all_intervals = None\n num_loop = 0 \n while True:\n start_time = current_time\n print(current_time,file_length)\n #load data, get the most updated current time\n specs, intervals,current_time = load_data(in_file, file_length, current_time, \n win_length=params['win_length'],hop_length=params['hop_length'], \n n_fft=params['nfft'], embedding_per_second=embedding_per_second, \n overlap_rate=overlap_rate)\n\n \n if num_loop == 0:\n all_intervals = intervals\n else:\n all_intervals = np.vstack((all_intervals,intervals+start_time*1000))\n #obtain features of these data, predicted via resnet model - have to find streaming option here as well\n #print(intervals+start_time*1000)\n for spec in specs:\n spec = np.expand_dims(np.expand_dims(spec, 0), -1)\n #print(spec.shape)\n v = network_eval.predict(spec)\n feats += [v]\n\n \n #print(len(feats))\n #print(intervals.shape,all_intervals.shape)\n if current_time >= file_length: #break when there's nothing else to read from the file\n break\n num_loop += 1\n\n mapTable, keys = genMap(all_intervals)\n feats = np.array(feats)[:,0,:].astype(float) # [splits, embedding dim] \n #print(feats.shape)\n #predict on accumulated features\n predicted_label,predicted_score,avg_predicted_score = uisrnnModel.predict(feats, inference_args)\n all_scores = {'score':predicted_score,'score_normalized':avg_predicted_score}\n #print(predicted_label)\n time_spec_rate = 1000*(1.0/embedding_per_second)*(1.0-overlap_rate) # speaker embedding every ?ms\n center_duration = int(1000*(1.0/embedding_per_second)//2)\n speakerSlice = arrangeResult(predicted_label, time_spec_rate)\n #print(speakerSlice)\n \n #the mapable and keys fucked things up here\n for spk,timeDicts in speakerSlice.items(): # time map to orgin wav(contains mute)\n for tid,timeDict in enumerate(timeDicts):\n s = 0\n e = 0\n for i,key in enumerate(keys):\n if(s!=0 and e!=0):\n break\n if(s==0 and key>timeDict['start']):\n offset = timeDict['start'] - keys[i-1]\n s = mapTable[keys[i-1]] + offset \n if(e==0 and key>timeDict['stop']):\n offset = timeDict['stop'] - keys[i-1]\n e = mapTable[keys[i-1]] + offset\n\n speakerSlice[spk][tid]['start'] = s \n speakerSlice[spk][tid]['stop'] = e \n\n \n #keep writing to the dictionary\n #print(speakerSlice)\n \n\n \n print(speakerSlice)\n pickle.dump([speakerSlice,all_scores], out_file, pickle.HIGHEST_PROTOCOL)\n in_file.close()\n out_file.close()\n\nif __name__ == '__main__':\n #main('./wavs/atwood_trimmed2_enhanced.wav', 'atwood_trimmed2_pretrain_0.8emb_0.1over.pkl',embedding_per_second=0.5, overlap_rate=0.5)\n main(args_all.save_wavpath,args_all.save_pklpath,embedding_per_second=0.8, overlap_rate=0.2)\n\n","sub_path":"speakerDiarization_longfiles_ver2.py","file_name":"speakerDiarization_longfiles_ver2.py","file_ext":"py","file_size_in_byte":10517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"248818540","text":"from tkinter import *\nfrom tkinter.ttk import *\nimport feedparser\n\nclass currConv:\n def __init__(self,mast):\n str=\"(▀̿Ĺ̯▀̿ ̿)\"\n #element init\n note=Notebook(mast)\n master=Frame(note)\n tab2=Notebook(note)\n note.add(master,text=\"Currency convert\")\n note.add(tab2,text=\"News Feed\")\n link = \"https://www.reddit.com/r/CURRENCY/.rss\" # rss link\n d = feedparser.parse(link)\n text = Text(tab2,font=(\"Georgia\", \"10\"))\n text.insert(INSERT,'NEWS\\n\\n')\n text.pack()\n for post in d.entries:\n to_print = post.title + \" \" + post.link\n text.insert('end',to_print + \"\\n\\n\",)\n text.pack()\n text.tag_add(\"here\", \"1.0\",'end')\n #setting the text color in tab2\n text.tag_config(\"here\", foreground=\"blue\") \n head=Label(master,text=\"Currency Convertor\",font=(\"palatino\", 30))\n inputField=Entry(master,width=30,font=(\"garamond\",20))\n options = [\"US $\",\"UK £\",\"EUR €\",\"IND ₹\",\"JAP ¥\"]\n var1=StringVar(master)\n var1.set(\"From\")\n list1=OptionMenu(master,var1,*options)\n list1.config(width=10)\n var2=StringVar(master)\n var2.set(\"To\")\n list2=OptionMenu(master,var2,*options)\n list2.config(width=10)\n but=Button(master,text=\"convert\")\n result=Label(master,text=str,font=(\"garamond\",20))\n #grid declarations\n note.grid()\n head.grid(row=0,columnspan=2,padx=15,pady=15)\n inputField.grid(row=1,columnspan=2,sticky=N+S+E+W,padx=5,pady=5)\n list1.grid(row=2,columnspan=1,sticky=N+S+W,padx=70,pady=10)\n list2.grid(row=2,column=1,columnspan=1,sticky=N+S+E,padx=70,pady=10)\n but.grid(row=3,columnspan=2,padx=20,pady=20)\n result.grid(row=4,columnspan=2,padx=20,pady=20)\n\nroot =Tk()\n#root.columnconfigure(0,weight=1)\n#root.rowconfigure(0,weight=1)\nroot.resizable(0,0)\nx = currConv(root)\nroot.mainloop()\n","sub_path":"Currency-Convertor-master/Currency-Convertor-master/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"446041117","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2013 - Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nSAMPLE_TASK_SPEC = {\n 'action': 'MyRest:create-vm',\n 'name': 'create-vms',\n 'on-success': [\"format-volumes\", {'task1': 'expression'}, {'task2': ''}],\n 'on-finish': \"attach-volumes\",\n 'on-error': [\"task1\", \"task2\"]\n}\n\nfrom mistral.tests import base\nfrom mistral.workbook import tasks\n\n\nclass GetOnStateTest(base.BaseTest):\n def setUp(self):\n super(GetOnStateTest, self).setUp()\n self.task = tasks.TaskSpec(SAMPLE_TASK_SPEC)\n\n def test_state_finish(self):\n on_finish = self.task.get_on_finish()\n self.assertIsInstance(on_finish, dict)\n self.assertIn(\"attach-volumes\", on_finish)\n\n def test_state_error(self):\n on_error = self.task.get_on_error()\n self.assertIsInstance(on_error, dict)\n self.assertEqual(len(on_error), 2)\n self.assertIn(\"task1\", on_error)\n\n def test_state_success(self):\n on_success = self.task.get_on_success()\n self.assertIsInstance(on_success, dict)\n self.assertEqual(len(on_success), 3)\n self.assertIn(\"task1\", on_success)\n self.assertIsNotNone(on_success[\"task1\"])\n","sub_path":"mistral/tests/unit/workbook/test_get_on_state.py","file_name":"test_get_on_state.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"485534631","text":"# coding: utf-8\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# -------------------------------------------------------------------------\nfrom testcase import PurviewScanningPowerShellPreparer\nfrom testcase_async import PurviewScanningTestAsync\nfrom azure.purview.scanning.rest import data_sources\n\nclass PurviewScanningSmokeTestAsync(PurviewScanningTestAsync):\n\n @PurviewScanningPowerShellPreparer()\n async def test_basic_smoke_test(self, purviewscanning_endpoint):\n request = data_sources.build_list_all_request()\n\n client = self.create_async_client(endpoint=purviewscanning_endpoint)\n response = await client.send_request(request)\n response.raise_for_status()\n assert response.status_code == 200\n json_response = response.json()\n assert set(json_response.keys()) == set(['value', 'count'])\n assert len(json_response['value']) == json_response['count']\n","sub_path":"sdk/purview/azure-purview-scanning/tests/test_smoke_async.py","file_name":"test_smoke_async.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"634941527","text":"import os\nfrom flask import Flask, render_template, request, redirect, url_for, flash, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import create_engine\nfrom database_setup import Base, Company, Applicant, Job, MatchScore\nfrom sqlalchemy.orm import sessionmaker\nimport dbOperations, magic\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\napp.secret_key = os.environ['tesis_secret_key']\n\n#app.config.from_object(os.environ['APP_SETTINGS'])\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nUPLOAD_FOLDER = '/static/cv'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndb_user = os.environ['db_user']\ndb_pass = os.environ['db_pass']\ndb_host = os.environ['db_host']\ndb_port = os.environ['db_port']\n\ndb = create_engine(\"postgresql+psycopg2://{}:{}@{}:{}/{}?sslmode=require\".format(db_user, db_pass, db_host, db_port, 'postgres'))\nBase.metadata.bind = db\n\nDBSession = sessionmaker(bind=db)\nsession = DBSession()\n\n@app.route('/', methods=['GET', 'POST'])\ndef showMain():\n if request.method == 'POST':\n mail = request.form['email']\n password = request.form['password']\n client_type = request.form['radiosTipodecliente']\n action = \"\"\n try:\n action = request.form['login']\n except:\n pass\n try:\n action = request.form['signup']\n except:\n pass\n if action == 'login':\n if client_type == \"applicant\":\n if dbOperations.validateApplicant(mail, password):\n return redirect(url_for('showApplicant', applicant_id=dbOperations.getApplicantID(mail)))\n else:\n flash(\"El correo o contraseña son incorrectos. Por favor intenta de nuevo\")\n else:\n if dbOperations.validateCompany(mail, password):\n return redirect(url_for('showCompany', company_id=dbOperations.getCompanyID(mail)))\n else:\n flash(\"El correo o contraseña son incorrectos. Por favor intenta de nuevo\")\n else:\n if client_type == \"applicant\":\n if dbOperations.validateMail(mail):\n flash(\"Ya existe un aplicante registrado con ese correo\")\n else:\n return redirect(url_for('newApplicant', app_mail=mail))\n else:\n if dbOperations.validateMail(mail):\n flash(\"Ya existe una compañía registrado con ese correo\")\n else:\n return redirect(url_for('newCompany', mail = mail))\n return render_template(\"main.html\")\n\n\n@app.route('/applicant/new', methods=['GET', 'POST'])\n@app.route('/applicant/new/', methods=['GET', 'POST'])\ndef newApplicant(app_mail=\"\"):\n try:\n if request.method == 'POST':\n mail = request.form['email']\n password = request.form['password']\n name = request.form['name']\n dbOperations.createApplicant(name, mail, password)\n applicantID = session.query(Applicant).filter(Applicant.mail == mail).one().id\n cv = request.files['file']\n\n #Guarda su CV en el folder static/cv con el nombre CV + el id del usuario\n filename = \"CV\" + str(applicantID) + \".pdf\"\n cv.save(secure_filename(filename))\n\n return redirect(url_for('demoTestApplicant', applicant_id = applicantID))\n return render_template('signUpApplicant.html', mail = app_mail)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función newApplicant de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/applicant/new/demotest/', methods=['GET', 'POST'])\ndef demoTestApplicant(applicant_id):\n try:\n if request.method == 'POST':\n birthdate = request.form['date']\n zipcode = request.form['zipCode']\n gender = request.form['gender']\n civil = request.form['civil']\n dependientes = request.form['dependientes']\n estudios = request.form['estudios']\n dbOperations.addDemo(birthdate, zipcode, gender, civil, dependientes, estudios, applicant_id)\n return redirect(url_for('personalityTestApplicant', applicant_id = applicant_id))\n return render_template(\"demoApplicants.html\", applicant_id = applicant_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función demoTestApplicant de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/applicant/new/perstest/', methods=['GET', 'POST'])\ndef personalityTestApplicant(applicant_id):\n try:\n if request.method == 'POST':\n aux = request.form.to_dict()\n dbOperations.addPersonality(aux, applicant_id)\n return redirect(url_for('mathTestApplicant', applicant_id = applicant_id))\n return render_template(\"personalityApplicants.html\", applicant_id = applicant_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función personalityTestApplicant de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/applicant/new/mathtest/', methods=['GET', 'POST'])\ndef mathTestApplicant(applicant_id):\n try:\n if request.method == 'POST':\n aux = request.form.to_dict()\n dbOperations.addMath(aux, applicant_id)\n return redirect(url_for('showApplicant', applicant_id = applicant_id))\n return render_template(\"mathApplicants.html\", applicant_id = applicant_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función mathTestApplicant de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/company/new', methods=['GET', 'POST'])\n@app.route('/company/new/', methods=['GET', 'POST'])\ndef newCompany(mail=\"\", password=\"\"):\n try:\n if request.method == 'POST':\n mail = request.form['email']\n password = request.form['up1']\n name = request.form['name']\n description = request.form['description']\n dbOperations.createCompany(name, mail, password, description)\n companyID = session.query(Company).filter(Company.mail == mail).one().id\n return redirect(url_for('showCompany', company_id = companyID))\n return render_template('signUpCompany.html', mail = mail)\n except Exception as e:\n print(e)\n print(\"El error ocurrión en la función newCompany de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/job//new', methods=['GET', 'POST'])\ndef newJob(company_id):\n try:\n if request.method == 'POST':\n title = request.form['title']\n description = request.form['description']\n openings = request.form['openings']\n salary = request.form['salary']\n activa = request.form['radiosactiva']\n status = False\n zipcode = request.form['zipcode']\n if activa == 'activa':\n status = True\n dbOperations.createJob(title, salary, description, company_id, openings, status, zipcode)\n job_id = session.query(Job).filter(Job.company_id == company_id, Job.title == title).one().id\n return redirect(url_for('demoTestJob', company_id = company_id, job_id = job_id))\n else:\n return render_template('createJob.html', company_id = company_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrión en la función newJob de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/job/new/demotest//', methods=['GET', 'POST'])\ndef demoTestJob(company_id, job_id):\n try:\n if request.method == 'POST':\n birthdate = request.form['date']\n zipcode = request.form['zipCode']\n gender = request.form['gender']\n civil = request.form['civil']\n dependientes = request.form['dependientes']\n estudios = request.form['estudios']\n dbOperations.addDemoJob(birthdate, zipcode, gender, civil, dependientes, estudios, company_id, job_id)\n return redirect(url_for('personalityTestJob', company_id = company_id, job_id = job_id))\n return render_template(\"demoJob.html\", company_id = company_id, job_id = job_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función demoTestJob de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/job/new/perstest//', methods=['GET', 'POST'])\ndef personalityTestJob(company_id, job_id):\n try:\n if request.method == 'POST':\n aux = request.form.to_dict()\n dbOperations.addPersonalityJob(aux, company_id, job_id)\n return redirect(url_for('mathTestJob', company_id = company_id, job_id = job_id))\n return render_template(\"personalityJob.html\", company_id = company_id, job_id = job_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función personalityTestJob de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/job/new/mathtest//', methods=['GET', 'POST'])\ndef mathTestJob(company_id, job_id):\n try:\n if request.method == 'POST':\n aux = request.form.to_dict()\n dbOperations.addMathJob(aux, job_id)\n return redirect(url_for('showCompany', company_id = company_id))\n return render_template(\"mathJob.html\", company_id = company_id, job_id = job_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en la función mathTestJob de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/company//feed')\ndef showCompany(company_id):\n try:\n jobs = session.query(Job).filter(Job.company_id == company_id).all()\n company = session.query(Company).filter(Company.id == company_id).one()\n return render_template(\"showCompany.html\", jobs = jobs, company = company)\n except Exception as e:\n print(e)\n print(\"El error ocurrió en showCompany de main.py\")\n flash(\"Ocurrió un error, por favor vuelve a intentarlo\")\n return render_template(\"main.html\")\n\n\n@app.route('/applicant//feed')\ndef showApplicant(applicant_id):\n try:\n matches = magic.getListOfMatchesForApplicant(applicant_id)\n applicant = session.query(Applicant).filter(Applicant.id == applicant_id).one()\n return render_template(\"showApplicant.html\", matches = matches, applicant = applicant)\n except Exception as e:\n print(e)\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\") \n\n\n@app.route('/job//feed')\ndef showJob(job_id):\n try:\n matches = magic.getListOfMatchesForJob(job_id)\n job = session.query(Job).filter(Job.id == job_id).one()\n company = session.query(Company).filter(Company.id == job.company_id).one()\n return render_template(\"showJob.html\", matches = matches, job = job, company = company)\n except Exception as e:\n print(e)\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n \n\n@app.route('/applicant//edit')\ndef editApplicant(applicant_id):\n return \"This should show the option to edit an applicant's information\"\n\n\n@app.route('/applicant//delete')\ndef deleteApplicant(applicant_id):\n return \"This should show the option to delete an applicant's information\"\n\n\n@app.route('/company//edit')\ndef editCompany(company_id):\n return \"This should show the option to edit a company's information\"\n\n\n@app.route('/company//delete')\ndef deleteCompany(company_id):\n return \"This should show the option to delete a company's information\"\n\n\n@app.route('/job//edit')\ndef editJob(job_id):\n return \"This should show the option to edit a job's information\"\n\n\n@app.route('/job//delete')\ndef deleteJob(job_id):\n return \"This should show the option to delete a job's information\"\n\n\n@app.route('/job///interest')\ndef showInterestCompany(job_id, applicant_id):\n try:\n offer = session.query(MatchScore).filter(MatchScore.job_id == job_id, MatchScore.applicant_id == applicant_id).one()\n offer.interest_job = True\n session.commit()\n applicant = session.query(Applicant).filter(Applicant.id == applicant_id).one()\n job = session.query(Job).filter(Job.id == job_id).one()\n companyId = job.company_id\n if dbOperations.sendInfoToCompany(applicant_id, job_id):\n return render_template('showInterestJob.html', company_id = companyId, applicant = applicant , job = job)\n else:\n print(\"El error ocurrión en la función showInterestJob de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n except Exception as e:\n print(e)\n print(\"El error ocurrión en la función showInterestJob de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/applicant///interest')\ndef showInterestApplicant(job_id, applicant_id):\n try:\n offer = session.query(MatchScore).filter(MatchScore.job_id == job_id, MatchScore.applicant_id == applicant_id).one()\n offer.interest_applicant = True\n session.commit()\n #return\"This should be the confirmation of a job interest by applicant \"+str(applicant_id)+ \" in job \" +str(job_id)\n return render_template('showInterestApplicant.html', applicantID = applicant_id)\n except Exception as e:\n print(e)\n print(\"El error ocurrión en la función showInterestApplicant de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\n@app.route('/contact') \ndef showContact():\n return render_template('contact.html')\n\n\n@app.route('/about') \ndef showAbout():\n return render_template('about.html')\n\n\n@app.route('/company//myApplicants') \ndef showMyApplicants(company_id):\n '''\n This function will take a company id and return a two dimensional list\n with all the jobs that company has posted followed by the applicants that\n have matched to it. For example:\n [[job1, applicant1, applicant2],\n [job2, applicant1, applicant3, applicant 6],\n [job3]\n [job4, applicant 2, applicant3, applicant4]]\n '''\n try:\n jobs_matches = []\n jobs = session.query(Job).filter(Job.company_id == company_id).all()\n for i in jobs:\n # create a list that starts with the job (as a job object) and follows with all of that jobs matches (as applicant objects)\n job = [i]\n matches = session.query(MatchScore).filter(MatchScore.job_id == i.id, MatchScore.interest_job == True)\n for j in matches:\n applicant = [session.query(Applicant).filter(Applicant.id == j.applicant_id).one(), j.scores]\n job.append(applicant)\n jobs_matches.append(job)\n company = session.query(Company).filter(Company.id == company_id).one()\n return render_template('myApplicants.html', jobsMatches = jobs_matches, company = company)\n except Exception as e:\n print(e)\n print(\"El error ocurrión en la función showMyApplicants de main.py\")\n flash(\"Ocurrió un error, por favor intentalo de nuevo\")\n return render_template(\"main.html\")\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()#(host='0.0.0.0', port=5000)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"100045556","text":"#!/usr/bin/env python\n#coding=utf-8\n# Filename: smtp.py\n\n'''\nemail发送\n@author: 15th\n@data: 2017.2.28\n'''\n\nimport re\nimport time\nimport smtplib\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import parseaddr, formataddr\nfrom simpleat.conf import settings\nfrom . import template\n\n_SMTP_SERVER = settings.SMTP_SERVER # [type:str] SMTP服务器地址\n_SMTP_PORT = settings.SMTP_PORT # [type:int] SMTP服务器端口\n_SMTP_SSL = settings.SMTP_SSL # [type:bool] STMP服务器是否加密SMTP会话\n_SENDER_EMAIL = settings.SENDER_EMAIL # [type:str] 结果报告单发信人邮箱地址\n_SENDER_ID = settings.SENDER_ID # [type:str] 结果报告单发件人邮箱登录名\n_SENDER_PWD = settings.SENDER_PWD # [type:str] 结果报告单发件人邮箱登录密码\n_RECVER_EMAIL = settings.RECVER_EMAIL # [type:str/list] 结果报告单收件人\n\n_RE_VARIABLE = re.compile(r'\\{\\{ .*? \\}\\}')\n_RE_TOKENS = re.compile(r'(\\{\\{ .*? \\}\\})')\n\ndef _format_addr(strs):\n '''\n 对中文进行转码\n '''\n name, addr = parseaddr(strs)\n return formataddr(( \\\n Header(name, 'utf-8').encode(), \\\n addr.encode('utf-8') if isinstance(addr, unicode) else addr))\n\ndef parse_template(templatestr):\n '''\n 模板内容转义\n\n Args:\n templatestr: 需要转义的模板字符串\n\n Returns:\n 转义后的字符串\n '''\n vardict = {\n 'SENDER_EMAIL' : _SENDER_EMAIL,\n 'TIME' : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),\n 'DURATION' : '',\n }\n\n tokens = _RE_TOKENS.split(templatestr)\n buffered = []\n for token in tokens:\n if _RE_VARIABLE.match(token):\n variable = token.strip('{} ')\n buffered.append(vardict.get(variable, ''))\n else:\n buffered.append(token)\n return ''.join(buffered)\n\ndef send_mail(attfile=None):\n '''\n 发送邮件\n\n Args:\n attfile: [type:str/list] 附件文件\n 当其为str时为单独文件, 为list时其内部所有文件都会加入附件\n '''\n recver_addr = _RECVER_EMAIL if isinstance(_RECVER_EMAIL, list) else [_RECVER_EMAIL]\n\n msg = MIMEMultipart('multipart')\n msg['From'] = _format_addr(parse_template(template.EMAIL_FROM))\n msg['To'] = _format_addr(u','.join([('<%s>' % addr) for addr in recver_addr]))\n msg['Subject'] = Header(parse_template(template.EMAIL_TITLE), 'utf-8').encode()\n\n #邮件正文\n plain = MIMEText(parse_template(template.EMAIL_CONTENT), 'plain', 'utf-8')\n msg.attach(plain)\n\n # 构造附件\n if isinstance(attfile, str):\n attfile = [attfile]\n if attfile and isinstance(attfile, list):\n for attf in attfile:\n att = MIMEText(open(attf, 'rb').read(), 'base64', 'utf-8')\n att[\"Content-Type\"] = 'application/octet-stream'\n att[\"Content-Disposition\"] = 'attachment; filename=\"'+attf+'\"'\n msg.attach(att)\n\n\n server = smtplib.SMTP(_SMTP_SERVER, _SMTP_PORT)\n if _SMTP_SSL:\n # 建立加密连接\n server.starttls()\n server.set_debuglevel(1)\n server.login(_SENDER_ID, _SENDER_PWD)\n server.sendmail(_SENDER_EMAIL, recver_addr, msg.as_string())\n server.quit()\n","sub_path":"simpleat/core/mail/smtp.py","file_name":"smtp.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"617014610","text":"#!/usr/bin/env python3\n\nimport speech_recognition as sr\n\n# get audio from the microphone\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n print(\"Speak:\")\n audio=r.listen(source)\n\ntry:\n print(\"You said \" + r.recognize_google(audio))\n if(audio == \"left\"):\n print(\"Car on your left\")\n elif(audio == \"right\"):\n print(\"Car on your right\")\n \nexcept sr.UnknownValueError:\n print(\"Could not understand audio\")\nexcept sr.RequestError as e:\n print(\"Could not request results; {0}\".format(e))\n","sub_path":"speechrecognition.py","file_name":"speechrecognition.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"272409364","text":"def solve(a):\n even = 0\n odd = 0\n for num in a:\n try:\n testNum = int(num)\n print(num)\n except:\n continue\n if num % 2 == 0:\n even += 1\n else:\n odd += 1\n total = even - odd\n print(total)\n\nsolve([13, 6, 8, 15, 4, 8, 13])\n","sub_path":"codewars.py","file_name":"codewars.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"519438298","text":"from djangocg.contrib.messages.storage.base import BaseStorage\n\n\nclass SessionStorage(BaseStorage):\n \"\"\"\n Stores messages in the session (that is, djangocg.contrib.sessions).\n \"\"\"\n session_key = '_messages'\n\n def __init__(self, request, *args, **kwargs):\n assert hasattr(request, 'session'), \"The session-based temporary \"\\\n \"message storage requires session middleware to be installed, \"\\\n \"and come before the message middleware in the \"\\\n \"MIDDLEWARE_CLASSES list.\"\n super(SessionStorage, self).__init__(request, *args, **kwargs)\n\n def _get(self, *args, **kwargs):\n \"\"\"\n Retrieves a list of messages from the request's session. This storage\n always stores everything it is given, so return True for the\n all_retrieved flag.\n \"\"\"\n return self.request.session.get(self.session_key), True\n\n def _store(self, messages, response, *args, **kwargs):\n \"\"\"\n Stores a list of messages to the request's session.\n \"\"\"\n if messages:\n self.request.session[self.session_key] = messages\n else:\n self.request.session.pop(self.session_key, None)\n return []\n","sub_path":"djangocg/contrib/messages/storage/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"450283614","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport debug\nimport sys\nimport numpy as np\nimport pickle\nfrom tools import SwallowInitArgs\n\nlogger = debug.get_logger(__name__)\n\n\nclass PulseModel(SwallowInitArgs):\n\n def __init__(self, **kwargs):\n super(PulseModel, self).__init__(**kwargs)\n\n def quality(self, **kwargs):\n raise NotImplementedError\n\n def extend(self, **kwargs):\n raise NotImplementedError\n\n def reduce(self, **kwargs):\n raise NotImplementedError\n\n def get_parameters(self):\n raise NotImplementedError\n\n def set_parameters(self, parameters):\n raise NotImplementedError\n\n def to_file(self, file_path):\n with open(file_path, 'wb') as file:\n pickle.dump(self, file)\n\n @staticmethod\n def from_file(file_path):\n with open(file_path, 'rb') as file:\n return pickle.load(file)\n\n\nclass PulseLearner:\n\n def __init__(self, model, optimizer, verbosity=0):\n self.model = model\n self.optimizer = optimizer\n self.outer_loop_iterations = -1\n self.verbosity = verbosity\n\n def optimize(self, max_iterations=0, min_improvement=1e-10, pickle_file=None, **kwargs):\n self.outer_loop_iterations = 1\n model_quality = self.model.quality()\n if self.verbosity > 0:\n print(\"Starting outer loop\")\n print(\" max_iterations={}\".format(max_iterations))\n print(\" min_improvement={}\".format(min_improvement))\n print(\" initial model quality={}\".format(model_quality))\n while True:\n if self.verbosity > 1:\n print(\"extend model: {}\".format(self.model))\n sys.stdout.flush()\n self.model.extend()\n if self.verbosity > 1:\n print(\"optimize model: {}\".format(self.model))\n sys.stdout.flush()\n if 'optimizer_kwargs' in locals():\n optimizer_kwargs = locals()['optimizer_kwargs']\n else:\n optimizer_kwargs = {}\n new_parameters = self.optimizer.optimize(model=self.model,\n init_weights=self.model.get_parameters(),\n **optimizer_kwargs)\n self.model.set_parameters(new_parameters)\n if self.verbosity > 1:\n print(\"reduce model: {}\".format(self.model))\n self.model.reduce()\n if pickle_file is not None:\n self.model.to_file(pickle_file.format(str(self.outer_loop_iterations).rjust(4, '0')))\n if self.verbosity > 0:\n print(\" iteration {}:\".format(self.outer_loop_iterations))\n print(\" model quality = {}\".format(self.model.quality()))\n print(\" mean likelihood = {}\".format(np.exp(self.model.quality())))\n if self.model.quality() - model_quality < min_improvement:\n if self.verbosity > 0:\n print(\"Stopping (quality improvement={}, min_improvement={})\".format(\n self.model.quality() - model_quality,\n min_improvement\n ))\n break\n else:\n model_quality = self.model.quality()\n if self.outer_loop_iterations >= max_iterations > 0:\n if self.verbosity > 0:\n print(\"Stopping (max_iterations={})\".format(max_iterations))\n break\n else:\n self.outer_loop_iterations += 1\n if self.verbosity > 1:\n print(\"final model: {}\".format(self.model))\n","sub_path":"pulse.py","file_name":"pulse.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"315619112","text":"import json\nfrom enemy import Enemy\nfrom location import Location\nfrom race import Race\nfrom weapon import Weapon\nfrom spell import Spell\n\n\ndef load_json_files():\n \"\"\"Load all of the json files\"\"\"\n load_enemies()\n load_locations()\n load_races()\n load_weapons()\n load_spells()\n\n\ndef get_json_data(file_name):\n \"\"\" Open json file of given name and return data in the form of a dict \"\"\"\n file_name = \"json/\" + file_name\n with open(file_name) as data_file:\n data = json.load(data_file)\n return data\n\n\ndef load_enemies():\n \"\"\" Load all enemies from json/enemies.json \"\"\"\n enemy_data = get_json_data(\"enemies.json\")\n for enemy in enemy_data:\n Enemy.add_enemy(Enemy(enemy_data[enemy]))\n\n\ndef load_locations():\n \"\"\" Load all locations from json/locations.json \"\"\"\n location_data = get_json_data(\"locations.json\")\n for location in location_data:\n Location.add_location(Location(location_data[location]))\n\n\ndef load_races():\n \"\"\" Load all races from json/races.json \"\"\"\n race_data = get_json_data(\"races.json\")\n for race in race_data:\n Race.add_race(Race(race_data[race]))\n\n\ndef load_weapons():\n \"\"\" Load all weapons from json/weapons.json \"\"\"\n weapon_data = get_json_data(\"weapons.json\")\n for weapon in weapon_data:\n Weapon.add_weapon(Weapon(weapon_data[weapon]))\n\n\ndef load_spells():\n \"\"\" Load all spells from json/spells.json \"\"\"\n spell_data = get_json_data(\"spells.json\")\n for spell in spell_data:\n new_spell = Weapon\n","sub_path":"json_loader.py","file_name":"json_loader.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"208759302","text":"# Sales Tax Calculator\r\nprint ('Ariel Merriman\\'s Sales Tax Calculator')\r\n# Declare tax amounts\r\nSTATE_RATE = 0.05\r\nCOUNTY_RATE = 0.025\r\nSubtotal = 0\r\nStateTax = 0\r\nCountyTax = 0\r\nTotalTax = 0\r\nTotalPurchase = 0\r\n# Receive user input\r\nSubtotal = float(input('What is your total? '))\r\n# Calculate tax amounts for subtotal\r\nStateTax = Subtotal * STATE_RATE\r\nCountyTax = Subtotal * COUNTY_RATE\r\nTotalTax = StateTax + CountyTax\r\n# Calculate grand total\r\nTotalPurchase = Subtotal + StateTax + CountyTax\r\n# Display results\r\nprint ('Subtotal: $',format(Subtotal,'.2f'),sep='')\r\nprint ('State Tax: $',format(StateTax,'.2f'),sep='')\r\nprint ('County Tax: $',format(CountyTax,'.2f'),sep='')\r\nprint ('Total Tax: $',format(TotalTax,'.2f'),sep='')\r\nprint ('Total: $',format(TotalPurchase,'.2f'),sep='')\r\n","sub_path":"M02_Ex6.py","file_name":"M02_Ex6.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"324036881","text":"#! /usr/bin/env python3\n\n# pylint: disable = no-self-use, unused-argument\n\nimport unittest\nimport unittest.mock\n\nfrom tests import util\n\nimport yakserver.__main__\n\n\nMAIN_LOOP_PATCH_TARGET = 'yakserver.__main__.Application.main_loop_iteration'\n\n\nclass TestApplication(util.TestCase):\n @util.run_for_iterations(0)\n def test_main_loop_does_nothing_if_server_not_running(self):\n main_loop_iterator_mock = self.start_patch(MAIN_LOOP_PATCH_TARGET).mock\n application = yakserver.__main__.Application()\n\n application.main_loop()\n\n main_loop_iterator_mock.assert_not_called()\n\n @util.run_for_iterations(3)\n def test_main_loop_iterates_untill_server_stopped(self):\n main_loop_iterator_mock = self.start_patch(MAIN_LOOP_PATCH_TARGET).mock\n application = yakserver.__main__.Application()\n\n application.main_loop()\n\n self.assertEqual(main_loop_iterator_mock.call_count, 3)\n\n def test_main_loop_iteration_gets_event(self):\n application_mock = unittest.mock.Mock()\n\n yakserver.__main__.Application.main_loop_iteration(application_mock)\n\n application_mock.get_event.assert_called_once()\n\n def test_main_loop_iteration_handles_event(self):\n application_mock = unittest.mock.Mock()\n\n yakserver.__main__.Application.main_loop_iteration(application_mock)\n\n expected_arg = application_mock.get_event.return_value\n application_mock.handle_event.assert_called_once_with(expected_arg)\n\n\nclass TestMainFunction(util.TestCase):\n def setUp(self):\n application_patch = self.start_patch('yakserver.__main__.Application')\n self.application_mock = application_patch.mock.return_value\n\n def test_main_function_calls_main_loop(self):\n yakserver.__main__.main()\n\n self.application_mock.main_loop.assert_called_once()\n\n def test_main_function_calls_setup_before_main_loop(self):\n expected_calls = (unittest.mock.call.setup(),\n unittest.mock.call.main_loop())\n\n yakserver.__main__.main()\n\n self.application_mock.assert_has_calls(expected_calls)\n","sub_path":"tests/unit/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"46260801","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 16 09:45:45 2017\r\n\r\n@author: uids6325\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport threading, logging, time\r\n\r\nimport csv\r\nimport json\r\n\r\nfrom kafka import KafkaConsumer, KafkaProducer\r\n\r\n\r\nclass Producer(threading.Thread):\r\n daemon = True\r\n\r\n def run(self):\r\n \r\n csvfile = open('D:\\\\Hoses\\\\data\\\\experiment#1\\\\test0','r')\r\n \r\n fieldname = ('ID','acc_x','acc_y','acc_z','gyro_x','gyro_y','gyro_z','temperaure','timestamp')\r\n reader = csv.DictReader(csvfile,fieldname)\r\n# producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))\r\n# producer.send('json_topic1',{'a':'*************************************'})\r\n for row in reader:\r\n jd = json.dumps(row)\r\n producer.send('json_topic1',jd)\r\n \r\n \r\n \r\n producer.flush()\r\n time.sleep(1)\r\n\r\n \r\n \r\n \r\n\r\nclass Consumer(threading.Thread):\r\n daemon = True\r\n\r\n def run(self):\r\n consumer = KafkaConsumer(bootstrap_servers='localhost:9092',\r\n auto_offset_reset='earliest')\r\n consumer.subscribe(['json-topic'])\r\n\r\n for message in consumer:\r\n print (message)\r\n\r\n\r\ndef main():\r\n threads = [\r\n Producer(),\r\n Consumer()\r\n ]\r\n\r\n for t in threads:\r\n t.start()\r\n\r\n time.sleep(10)\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(\r\n format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',\r\n level=logging.INFO\r\n )\r\n main()","sub_path":"test_kafka.py","file_name":"test_kafka.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"382845779","text":"import os\nimport sys\nimport time\n\nprint(\"AlanS Pyhon Startup (.pythonrc)\")\n\nsys.stdout.write(\"Executing hello_world.py in \")\nsys.stdout.flush()\ntime.sleep(1)\n\ni=5\nSleep = 1\n\nwhile i >= Sleep:\n\tsys.stdout.write(str(i) + str(\" \"))\n\tsys.stdout.flush()\n\ttime.sleep(1)\n\ti=i-1\n\nprint(\"\")\n\nif os.path.isfile('/Users/alans/Scripts/Python/hello_world.py'):\n\texec(open('/Users/alans/Scripts/Python/hello_world.py').read())\n","sub_path":"Python/.pythonrc.py","file_name":".pythonrc.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"11757143","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 et:\n\nimport numpy as np\nfrom . import tools\nimport copy\nimport random\n\ndef extract_edge_from_faces(faces):\n \"\"\"\n Transfer faces relationship into edge relationship\n \n Parameters:\n ----------\n faces: faces array\n \n Return:\n -------\n edge: edge, format as [(i1,j1), (i2,j2), ...]\n\n Example:\n -------\n >>> edge = extract_edge_from_faces(faces)\n \"\"\"\n import itertools\n edge = []\n for i,fa in enumerate(faces):\n c_gen = itertools.combinations(fa, 2)\n for j in c_gen:\n if j not in edge:\n edge.append(j)\n print('{} finished'.format(i))\n return edge\n\nclass GenAdjacentMatrix(object):\n \"\"\"\n Generate adjacency matrix from edge or ring_list\n \n Return:\n --------\n ad_matrix: adjacent matrix\n \n Example:\n --------\n >>> gamcls = GenAdjacentMatrix()\n >>> admatrix = gamcls.from_edge(edge)\n \"\"\"\n def __init__(self):\n pass\n\n def from_edge(self, edge):\n \"\"\"\n Generate adjacent matrix from edge\n \n Parameters:\n -----------\n edge: edge list, which have the format like below, \n [(i1,j1), (i2,j2), ...] \n note that i,j is the number of vertex/node\n \n Return:\n -----------\n adjmatrix: adjacent matrix\n \"\"\" \n assert isinstance(edge, list), \"edge should be a list\"\n edge_node_num = [len(i) for i in edge]\n assert edge_node_num.count(edge_node_num[0]) == len(edge_node_num), \"One edge should only contain 2 nodes\"\n node_number = np.max(edge)+1\n ad_matrix = np.zeros((node_number, node_number))\n for eg in edge:\n ad_matrix[eg] = 1\n ad_matrix = np.logical_or(ad_matrix, ad_matrix.T)\n ad_matrix = ad_matrix.astype('int')\n return ad_matrix\n\n def from_ring(self, ring):\n \"\"\"\n Generate adjacent matrix from ringlist\n \n Parameters:\n ----------\n ring: list of ring node, the format of ring list like below\n [{i1,j1,k1,...}, {i2,j2,k2,...}, ...]\n each element correspond to a index (index means a vertex)\n \n Return:\n ----------\n adjmatrix: adjacent matrix \n \"\"\"\n assert isinstance(ring, list), \"ring should be a list\"\n node_number = len(ring)\n adjmatrix = np.zeros((node_number, node_number))\n for i,e in enumerate(ring):\n for j in e:\n adjmatrix[i,j] = 1\n return adjmatrix\n\ndef get_masksize(mask, labelnum = None):\n \"\"\"\n Compute mask size in surface space\n \n Parameters:\n ----------\n mask: label image (mask)\n labelnum: mask's label number, use for group analysis\n\n Return:\n --------\n masksize: mask size of each roi\n\n Example:\n --------\n >>> masksize = get_masksize(mask)\n \"\"\"\n if mask.ndim == 3:\n mask = mask[:,0,0]\n labels = np.unique(mask)[1:]\n masksize = []\n if len(labels) != 0:\n if labelnum is None:\n labelnum = int(np.max(labels))\n for i in range(labelnum):\n masksize.append(len(mask[mask == i+1]))\n else:\n masksize.append(0)\n return np.array(masksize)\n \ndef get_signals(atlas, mask, method = 'mean', labelnum = None):\n \"\"\"\n Extract roi signals of atlas from mask\n \n Parameters:\n -----------\n atlas: atlas\n mask: mask, a label image\n method: 'mean', 'std', 'ste', 'max', 'vertex', etc.\n labelnum: mask's label numbers, add this parameters for group analysis\n\n Return:\n -------\n signals: signals of specific roi\n \n Example:\n -------\n >>> signals = get_signals(atlas, mask, 'mean')\n \"\"\"\n if atlas.ndim == 3:\n atlas = atlas[:,0,0]\n if mask.ndim == 3:\n mask = mask[:,0,0]\n \n \n labels = np.unique(mask)[1:]\n if labelnum is None:\n try:\n labelnum = int(np.max(labels))\n except ValueError as e:\n print('value in mask are all zeros')\n labelnum = 1\n if method == 'mean':\n calfunc = np.nanmean\n elif method == 'std':\n calfunc = np.nanstd\n elif method == 'max':\n calfunc = np.max\n elif method == 'vertex':\n calfunc = np.array\n elif method == 'ste':\n calfunc = tools.ste\n else:\n raise Exception('Miss paramter of method')\n signals = []\n for i in range(labelnum):\n if np.any(mask==i+1):\n signals.append(atlas[mask==i+1])\n else:\n signals.append(np.array([np.nan]))\n return [calfunc(sg) for sg in signals]\n\ndef get_vexnumber(atlas, mask, method = 'peak', labelnum = None):\n \"\"\"\n Get vertex number of rois from surface space data\n \n Parameters:\n -----------\n atlas: atlas\n mask: mask, a label image\n method: 'peak' ,'center', or 'vertex', \n 'peak' means peak vertex number with maximum signals from specific roi\n 'center', center of mass of roi\n 'vertex' means extract all vertex of roi\n labelnum: mask's label numbers, add this parameters for group analysis\n \n Return:\n -------\n vexnumber: vertex number\n\n Example:\n --------\n >>> vexnumber = get_vexnumber(atlas, mask, 'peak')\n \"\"\"\n if atlas.ndim == 3:\n atlas = atlas[:,0,0]\n if mask.ndim == 3:\n mask = mask[:,0,0]\n labels = np.unique(mask)[1:]\n if labelnum is None:\n try:\n labelnum = int(np.max(labels))\n except ValueError as e:\n labelnum = 0\n\n extractpeak = lambda x: np.unravel_index(x.argmax(), x.shape)[0]\n extractcenter = _extractcenter\n extractvertex = lambda x: x[x!=0]\n \n if method == 'peak':\n calfunc = extractpeak\n elif method == 'center':\n calfunc = extractcenter\n elif method == 'vertex':\n calfunc = extractvertex\n else:\n raise Exception('Miss parameter of method')\n\n vexnumber = []\n for i in range(labelnum):\n roisignal = atlas*(mask==(i+1))\n if np.any(roisignal):\n vexnumber.append(calfunc(roisignal))\n else:\n vexnumber.append(np.array([np.nan]))\n return vexnumber\n\ndef _extractcenter(roisignal):\n \"\"\"\n Compute center of mass from ROI which included magnitudes in each vertex\n Surface has its special data construction, they're not consistent across whole ROI. It's better to use mapping method to solve this problem.\n\n Parameters:\n -----------\n roisignal: ROI that included magnitudes in each vertex\n\n Returns:\n --------\n center_point: center of mass from ROI\n \"\"\"\n raw_idx = np.where(roisignal!=0)[0] \n new_idx = tools.convert_listvalue_to_ordinal(raw_idx)\n new_center = sum(roisignal[e]*new_idx[i] for i,e in enumerate(raw_idx))/sum(roisignal[e] for _,e in enumerate(raw_idx))\n new_center = int(new_center)\n center_point = (raw_idx[i] for i,e in enumerate(new_idx) if e == new_center).next()\n return center_point\n\n\ndef surf_dist(vtx_src, vtx_dst, one_ring_neighbour):\n \"\"\"\n Distance between vtx_src and vtx_dst\n Measured by edge number\n \n Parameters:\n -----------\n vtx_src: source vertex, int number\n vtx_dst: destinated vertex, int number\n one_ring_neighbour: one ring neighbour matrix, computed from get_n_ring_neighbour with n=1\n the format of this matrix:\n [{i1,j1,...}, {i2,j2,k2}]\n each element correspond to a vertex label\n\n Return:\n -------\n dist: distance between vtx_src and vtx_dst\n\n Example:\n --------\n >>> dist = surf_dist(vtx_src, vtx_dst, one_ring_neighbour)\n \"\"\"\n if len(one_ring_neighbour[vtx_dst]) == 1:\n return np.inf\n \n noderep = copy.deepcopy(one_ring_neighbour[vtx_src])\n dist = 1\n while vtx_dst not in noderep:\n temprep = set()\n for ndlast in noderep:\n temprep.update(one_ring_neighbour[ndlast])\n noderep.update(temprep)\n dist += 1\n return dist\n \ndef hausdoff_distance(imgdata1, imgdata2, label1, label2, one_ring_neighbour):\n \"\"\"\n Compute hausdoff distance between imgdata1 and imgdata2\n h(A,B) = max{max(i->A)min(j->B)d(i,j), max(j->B)min(i->A)d(i,j)}\n \n Parameters:\n -----------\n imgdata1: surface image data1\n imgdata2: surface image data2\n label1: label of image data1\n label2: label of image data2\n one_ring_neighbour: one ring neighbour matrix, similar description of surf_dist, got from get_n_ring_neighbour\n\n Return:\n -------\n hd: hausdorff distance\n \n Example:\n --------\n >>> hd = hausdoff_distance(imgdata1, imgdata2, 1, 1, one_ring_neighbour)\n \"\"\"\n imgdata1 = tools.get_specificroi(imgdata1, label1)\n imgdata2 = tools.get_specificroi(imgdata2, label2)\n hd1 = _hausdoff_ab(imgdata1, imgdata2, one_ring_neighbour) \n hd2 = _hausdoff_ab(imgdata2, imgdata1, one_ring_neighbour)\n return max(hd1, hd2)\n \ndef _hausdoff_ab(a, b, one_ring_neighbour):\n \"\"\"\n Compute hausdoff distance of h(a,b)\n part unit of function hausdoff_distance\n \n Parameters:\n -----------\n a: array with 1 label\n b: array with 1 label\n one_ring_neighbour: one ring neighbour matrix\n\n Return:\n -------\n h: hausdoff(a,b)\n\n \"\"\"\n a = np.array(a)\n b = np.array(b)\n h = 0\n for i in np.flatnonzero(a):\n hd = np.inf\n for j in np.flatnonzero(b):\n d = surf_dist(i,j, one_ring_neighbour) \n if dh:\n h = hd\n return h\n\ndef median_minimal_distance(imgdata1, imgdata2, label1, label2, one_ring_neighbour):\n \"\"\"\n Compute median minimal distance between two images\n mmd = median{min(i->A)d(i,j), min(j->B)d(i,j)}\n for detail please read paper:\n Groupwise whole-brain parcellation from resting-state fMRI data for network node identification\n \n Parameters:\n -----------\n imgdata1, imgdata2: surface data 1, 2\n label1, label2: label of surface data 1 and 2 used to comparison\n one_ring_neighbour: one ring neighbour matrix, similar description of surf_dist, got from get_n_ring_neighbour\n \n Return:\n -------\n mmd: median minimal distance\n\n Example:\n --------\n >>> mmd = median_minimal_distance(imgdata1, imgdata2, label1, label2, one_ring_neighbour)\n \"\"\"\n imgdata1 = tools.get_specificroi(imgdata1, label1)\n imgdata2 = tools.get_specificroi(imgdata2, label2)\n dist1 = _mmd_ab(imgdata1, imgdata2, one_ring_neighbour)\n dist2 = _mmd_ab(imgdata2, imgdata1, one_ring_neighbour)\n return np.median(dist1 + dist2)\n\ndef _mmd_ab(a, b, one_ring_neighbour):\n \"\"\"\n Compute median minimal distance between a,b\n \n part computational completion of median_minimal_distance\n\n Parameters:\n -----------\n a, b: array with 1 label\n one_ring_neighbour: one ring neighbour matrix\n\n Return:\n -------\n h: minimal distance\n \"\"\"\n a = np.array(a)\n b = np.array(b)\n h = []\n for i in np.flatnonzero(a):\n hd = np.inf\n for j in np.flatnonzero(b):\n d = surf_dist(i, j, one_ring_neighbour)\n if d>> ringlist = get_n_ring_neighbour(24, faces, n)\n \"\"\"\n if isinstance(vertx, int):\n vertx = [vertx]\n nth_ring = [set([vx]) for vx in vertx]\n nring = [set([vx]) for vx in vertx]\n\n while n != 0:\n n = n - 1\n for idx, neighbor_set in enumerate(nth_ring):\n neighbor_set_tmp = [_get_connvex_neigh(vx, faces) for vx in neighbor_set]\n neighbor_set_tmp = set().union(*neighbor_set_tmp)\n neighbor_set_tmp.difference_update(nring[idx])\n nth_ring[idx] = neighbor_set_tmp\n nring[idx].update(nth_ring[idx])\n if ordinal is True:\n return nth_ring\n else:\n return nring\n\ndef get_connvex(seedvex, faces, valuemask = None, labelmask = None, label = 1):\n \"\"\"\n Get connected vertices that contain in mask\n We firstly need a start point to acquire connected vertices, then do region growing until all vertices in mask were included\n That means, output should satisfied two condition:\n 1 overlap with mask\n 2 connected with each other\n \n Parameters:\n -----------\n seedvex: seed point (start point)\n faces: faces array, vertex relationship\n valuemask: mask with values. if it exists, connection will be covered only region with decrease gradient\n mask: overlapping mask, a label mask\n masklabel: specific mask label used as restriction\n\n Return:\n -------\n connvex: connected vertice set\n\n Example:\n --------\n >>> connvex = get_connvex(24, faces, mask)\n \"\"\"\n connvex = set()\n connvex.add(seedvex)\n neighbor_set = _get_connvex_neigh(seedvex, faces, labelmask, label)\n\n if valuemask is None:\n connvex_temp = neighbor_set\n else:\n assert valuemask.shape[0] == np.max(faces) + 1, \"valuemask should has the same vertex number as faces connection relatonship\"\n if valuemask.ndim != 2:\n valuemask = valuemask.reshape(valuemask.shape[0], 1)\n refpt = 1*seedvex\n connvex_temp = _mask_by_gradient(refpt, neighbor_set, valuemask)\n\n while not connvex_temp.issubset(connvex):\n connvex_dif = connvex_temp.difference(connvex)\n connvex.update(connvex_dif)\n connvex_temp = set()\n for sx in connvex_dif: \n if valuemask is None:\n connvex_temp.update(_get_connvex_neigh(sx, faces, labelmask, label))\n else:\n refpt = 1*sx\n neighbor_set = _get_connvex_neigh(refpt, faces, labelmask, label)\n connvex_temp.update(_mask_by_gradient(refpt, neighbor_set, valuemask))\n print('Size of sulcus {0}'.format(len(connvex)))\n return connvex \n\ndef _mask_by_gradient(refpt, neighbor_set, valuemask):\n \"\"\"\n mask neighbor set by valuemask that choose vertices with value smaller than value of vertex refpt\n \"\"\"\n return set([i for i in neighbor_set if valuemask[i]>> parcel_mask = cutrg2parcels(orig_mask, faces, 1)\n \"\"\"\n if not isinstance(label, int):\n raise Exception('label need to be an int')\n lbl_orig_mask = tools.get_specificroi(orig_mask, label)\n parcel_mask = np.zeros_like(orig_mask)\n\n orig_maskset = set(np.where(lbl_orig_mask!=0)[0])\n connvex = set()\n dif_connvex = orig_maskset.difference(connvex)\n\n parcel_num = 0\n while len(dif_connvex) != 0:\n seedpt = random.choice(tuple(dif_connvex))\n parcel_connvex = get_connvex(seedpt, faces, labelmask = lbl_orig_mask, label = label)\n connvex.update(parcel_connvex)\n dif_connvex = orig_maskset.difference(connvex)\n parcel_num += 1\n parcel_mask = tools.make_lblmask_by_loc(parcel_mask, tuple(parcel_connvex), parcel_num)\n print('parcel number: {0}'.format(parcel_num))\n return parcel_mask\n\n\n\n\n\n\n\n\n","sub_path":"algorithm/surf_tools.py","file_name":"surf_tools.py","file_ext":"py","file_size_in_byte":18202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"391376684","text":"def is_sum_of_cubes(s):\n def isLucky(x):\n if x == \"\":\n return False\n return sum([int(c) ** 3 for c in x]) == int(x)\n\n s += \"a\"\n cur = \"\"\n lucky = []\n digits = {c for c in \"0123456789\"}\n _sum = 0\n for c in s:\n if c not in digits or len(cur) == 3:\n if isLucky(cur):\n lucky.append(cur)\n _sum += int(cur)\n cur = \"\"\n if c in digits:\n cur += c\n if len(lucky) > 0:\n return \"{} Lucky\".format(\" \".join(lucky) + \" \" + str(_sum))\n return \"Unlucky\"\n\n\nprint(is_sum_of_cubes(\"aqdf&0#1xyz!22[153(777.777\"))\n","sub_path":"codewar/2022/6/Hidden_Cubic_numbers.py","file_name":"Hidden_Cubic_numbers.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"414577394","text":"import glob\nimport importlib\nfrom pathlib import Path\n\nfrom datetime import datetime\n\nfrom lib.adb import Device\nfrom lib.model import Application, Url\nfrom lib.model.Analysis import Analysis\nfrom lib.model.database.Database import Database\nfrom lib.receiver.Frida import Frida\nfrom lib.receiver.Proxy import Proxy\n\nimport logging\nimport time\nimport uuid\n\nfrom lib.report.ReportGenerator import ReportGenerator\n\n\nclass Core:\n\n def __init__(self, configuration, device: Device, module, path: str):\n self.configuration = configuration\n self.device = device\n self.path = path\n self.module = module\n self.session = None\n self.timeout = int(configuration['ANALYSIS'].get('analysis_timeout'))\n self.plugins = self.load_plugins()\n\n # Object used by the core\n self.current_application = None\n self.receivers = []\n\n # Database initialisation\n session = Database.get_session()\n self.analysis = Analysis(uuid=str(uuid.uuid4()), date=datetime.now())\n session.add(self.analysis)\n session.commit()\n\n def select_applications(self):\n '''\n Return the applications that will be analyzed\n :return:\n '''\n logging.debug(\"Core:select_applications()\")\n return self.module.select(self.path, )\n\n def start_analysis(self):\n '''\n Launch the analysis process and loop trough all apk\n then generate the according report\n :return:\n '''\n logging.debug(\"Core:start_analysis()\")\n\n apk_paths = self.select_applications()\n\n if self.configuration['ANALYSIS'].getboolean('use_proxy'):\n self.device.install_certificate(self.configuration['PROXY'].get('proxy_certificate'))\n\n if self.configuration['ANALYSIS'].getboolean('use_frida'):\n plugin_code = map(lambda x:x.get_frida_script(),self.plugins)\n Frida.compile(configuration=self.configuration,plugin_code=plugin_code)\n self.device.install_frida()\n\n for apk in apk_paths:\n try:\n self.analyse_sample(Path(apk).absolute())\n except Exception:\n pass\n\n # Rapport generating\n generator = ReportGenerator()\n report_path = generator.generate(self.analysis)\n path = str(Path(report_path).absolute())\n logging.info(f\"Report generated to {path}\")\n\n\n def check_apk_is_valid(self):\n files = self.current_application.get_files()\n arch = self.device.get_device_arch()\n is_apk_valid = False\n found = False\n for i in files:\n if (\"lib/\" in i):\n print(i)\n found = True\n if (arch in i):\n is_apk_valid = True\n if (found == False):\n is_apk_valid = True\n\n return is_apk_valid\n\n def analyse_sample(self, apk_path):\n '''\n Analyze the given apk\n :param apk_path:\n :return:\n '''\n logging.debug(\"Core:analyse_sample()\")\n self.current_application = Application.Application(apk_path)\n\n\n if not self.check_apk_is_valid():\n logging.error(f\"The apk architecture and the device architecture ({self.device.get_device_arch()}) doesn't match for application : {self.current_application.filename}\")\n return\n\n # Database storing\n session = Database.get_session()\n self.analysis.application.append(self.current_application)\n session.add(self.current_application)\n session.commit()\n \n logging.info(f\"Package name: {self.current_application.package}\")\n logging.info(f\"Main activity: {self.current_application.get_main_activity()}\")\n logging.info(f\"Path : {self.current_application.path}\")\n logging.info(f\"SHA256 : {self.current_application.get_sha256_hash()}\")\n\n time_init = time.time()\n module = self.module(self.current_application,self.plugins)\n try:\n if(self.device.type==\"Physical\" and self.current_application.package in self.device.list_third_party()):\n self.device.uninstall_application(self.current_application.package)\n self.device.install_application(self.current_application.path)\n self.start_receivers(module)\n\n current_time = 0\n while current_time < self.timeout or self.module.stop == False or ( self.timeout == -1 and self.device.check_is_up() ) :\n current_time = time.time() - time_init\n logging.debug(current_time)\n time.sleep(1)\n \n self.stop_receivers()\n if(self.timeout!= -1):\n self.device.uninstall_application(self.current_application.package)\n except Exception as e:\n self.device.uninstall_application(self.current_application.package)\n self.stop_receivers()\n print(e)\n\n\n\n def start_receivers(self, module):\n '''\n Start handler that handles the devices interactions\n :param module:\n :return:\n '''\n logging.debug(\"Core:start_receivers()\")\n\n use_frida = self.configuration['ANALYSIS'].getboolean('use_frida')\n use_proxy = self.configuration['ANALYSIS'].getboolean('use_proxy')\n\n if use_proxy:\n proxy = Proxy(self.configuration['PROXY'], module)\n proxy.start()\n self.receivers.append(proxy)\n\n if use_frida:\n frida = Frida(self.configuration['FRIDA'], module, self.device)\n frida.start()\n self.receivers.append(frida)\n else:\n self.device.launch_application(self.current_application.package)\n\n logging.debug(\"Core:start_receivers() -> Started\")\n\n\n\n\n def stop_receivers(self):\n '''\n Stop all current handlers\n :return:\n '''\n logging.debug(\"Core:stop_receivers()\")\n\n for i in range(len(self.receivers)):\n self.receivers[i].stop()\n\n self.receivers = []\n\n\n\n def load_plugins(self):\n \"\"\"\n Import all plugins in plugins folder\n :return:\n \"\"\"\n plugins = glob.glob(\"plugins/**/plugin.py\", recursive=True)\n\n normalize = lambda x: x.replace('/', '.')[:-3]\n plugins = map(normalize, plugins)\n _plugins = []\n for plugin in plugins:\n if not plugin.startswith('__'):\n module = importlib.import_module(plugin)\n module.onload()\n _plugins.append(module)\n return _plugins","sub_path":"lib/analysis/Core.py","file_name":"Core.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"460748520","text":"#!/usr/bin/env python\nfrom dbsetup import Base, User, Item, ItemCategory\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\n\nengine = create_engine('sqlite:///item_catalog.db',\n connect_args={'check_same_thread': False})\n\n# Bind the above engine to a session.\nSession = sessionmaker(bind=engine)\n\n# Create a Session object.\nsession = Session()\n\nuser1 = User(\n name='test01',\n email='test01.usery@gmail.com',\n profile_image=''\n)\n\nsession.add(user1)\nsession.commit()\n\ncategory1 = ItemCategory(\n name='Beds & Mattresses',\n users=user1\n)\n\nsession.add(category1)\nsession.commit()\n\nitem1 = Item(\n title='Bed Frames',\n description='There are lots of beds, but feeling good when you wake up starts with finding the right one.',\n item_categories=category1,\n users=user1\n)\n\nsession.add(item1)\nsession.commit()\n\nprint('Complete seed into database!')\n","sub_path":"seeder.py","file_name":"seeder.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"607113337","text":"# -*- coding: utf-8 -*-\n__author__ = \"Evgeny Markov\"\n__created_date__ = \"07.09.14\"\n\n# Builtins\nimport configparser\nfrom tkinter import *\nfrom tkinter.filedialog import askdirectory\nimport os\nimport logging\nimport time\n\n# My modules\nfrom lib.classic_daemon import set_process_name\n\nlogging.basicConfig(format='%(filename)s[LINE:%(lineno)d]#'\n ' %(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.WARNING,\n filename='errors.log')\n\n\nclass Defaults:\n \"\"\"\n Class that contain default settings\n \"\"\"\n SEARCH_PATHS = os.path.expanduser(\"~\")\n MUSIC_PATH = os.path.join(SEARCH_PATHS, \"Music\") + os.sep\n TIMER = 2\n SORT_TYPE = \"None\"\n FILE_TYPES = \"mp3;flac\"\n\n\nclass SettingsWindow:\n def __init__(self):\n \"\"\"\n Settings window initialization\n \"\"\"\n self.settings_window = Tk()\n self.settings_window.config(bg=\"#2B2B2B\")\n self.settings_window.geometry(\"260x480\")\n self.settings_window.resizable(False, False)\n self.settings_window.title(\"Settings\")\n self.settings_window.iconphoto(True, PhotoImage(file=\"res/favicon.png\"))\n\n self.config = configparser.ConfigParser()\n try:\n self.config.read(\"settings.ini\")\n except FileNotFoundError:\n self.write_default_config()\n self.config.read(\"settings.ini\")\n\n #init settings\n self.music_path = None\n self.search_paths = None\n self.scan_timer = None\n self.sort_type = None\n self.file_types = None\n\n #read config from ini\n try:\n self.music_path = self.config.get(\"paths\", \"music_path\")\n self.search_paths = self.config.get(\"paths\", \"search_paths\")\n self.scan_timer = self.config.get(\"common\", \"timer\")\n self.sort_type = self.config.get(\"common\", \"sort_type\")\n self.file_types = self.config.get(\"common\", \"file_types\")\n except configparser.NoSectionError:\n logging.error(\"Configuration file damaged or not exists\")\n SettingsWindow.write_default_config()\n\n #Make layout in parent window\n frame_paths = Frame(self.settings_window, width=300, height=50, bg=\"#2B2B2B\")\n frame_dest_path = Frame(self.settings_window, width=300, height=50, bg=\"#2B2B2B\")\n separator1 = Frame(self.settings_window, height=3, width=280, bg=\"#323232\")\n frame_timer = Frame(self.settings_window, width=300, height=200, bg=\"#2B2B2B\")\n frame_sort = Frame(self.settings_window, width=300, height=50, bg=\"#2B2B2B\")\n\n search_label = Label(frame_paths, text=\"Search folders\", bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\")\n\n self.path_search_list = Listbox(frame_paths, bg=\"#474C4E\", fg=\"#BBBBBB\",\n font=\"Helvetica 12\", highlightcolor=\"#2B2B2B\",\n highlightthickness=0, width=25, height=6)\n\n for path in str(self.search_paths).rsplit(\";\"):\n self.path_search_list.insert(1, path)\n\n add_path_image = PhotoImage(file=\"res/plus.png\")\n button_add_path = Button(frame_paths, text=\"+\",\n bg=\"#2B2B2B\", fg=\"#2B2B2B\",\n font=\"Helvetica 8\",\n highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B2B2B\",\n activeforeground=\"#2B2B2B\",\n command=self.ask_search_folder,\n bd=0, image=add_path_image)\n\n remove_path_image = PhotoImage(file=\"res/minus.png\")\n button_remove_path = Button(frame_paths, text=\"-\",\n bg=\"#2B2B2B\", fg=\"#2B2B2B\",\n font=\"Helvetica 8\",\n highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B2B2B\",\n activeforeground=\"#2B2B2B\",\n command=self.remove_search_item,\n bd=0, image=remove_path_image)\n\n dest_label = Label(frame_dest_path, text=\"Music folder\", bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\")\n\n self.new_dest_path = StringVar()\n self.new_dest_path.set(self.music_path)\n destination_entry = Entry(frame_dest_path, width=20, bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\",\n insertbackground=\"#BBBBBB\",\n textvariable=self.new_dest_path)\n\n dest_path_image = PhotoImage(file=\"res/storage.png\")\n button_dest_path = Button(frame_dest_path, text=\"...\",\n bg=\"#2B2B2B\", fg=\"#2B2B2B\",\n font=\"Helvetica 8\",\n highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B2B2B\",\n activeforeground=\"#2B2B2B\",\n command=self.ask_destination,\n bd=0, image=dest_path_image)\n\n timer_label = Label(frame_timer, text=\"Search timer\", bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\")\n\n self.timer = IntVar()\n self.timer.set(self.scan_timer)\n timer_entry = Entry(frame_timer, width=6, bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\",\n insertbackground=\"#BBBBBB\",\n textvariable=self.timer)\n\n sec_label = Label(frame_timer, text=\"sec\", bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\")\n\n sort_label = Label(frame_sort, text=\"Pick sort type\", bg=\"#2B2B2B\",\n fg=\"#BBBBBB\", font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\")\n\n self.sort_var = StringVar()\n self.sort_var.set(self.sort_type)\n sort_option_menu = OptionMenu(frame_sort, self.sort_var, \"None\", \"Artist\", \"Year\",\n \"Genre\", \"Likeness\")\n sort_option_menu.config(bg=\"#474C4E\", fg=\"#BBBBBB\",\n font=\"Helvetica 12\", highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B3D53\",\n activeforeground=\"#BBBBBB\")\n\n default_image = PhotoImage(file=\"res/default.png\")\n button_default = Button(self.settings_window, text=\"Default\",\n bg=\"#2B2B2B\", fg=\"#BBBBBB\",\n font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B2B2B\",\n activeforeground=\"#BBBBBB\",\n command=self.set_default,\n bd=0, image=default_image)\n\n save_image = PhotoImage(file=\"res/checkmark.png\")\n button_save = Button(self.settings_window, text=\"Save\",\n bg=\"#2B2B2B\", fg=\"#BBBBBB\",\n font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B2B2B\",\n activeforeground=\"#BBBBBB\",\n command=self.save_settings,\n bd=0, image=save_image)\n\n close_image = PhotoImage(file=\"res/delete.png\")\n button_close = Button(self.settings_window, text=\"Close\",\n bg=\"#2B2B2B\", fg=\"#BBBBBB\",\n font=\"Helvetica 12\",\n highlightbackground=\"#2B2B2B\",\n highlightcolor=\"#2B2B2B\",\n highlightthickness=0,\n activebackground=\"#2B2B2B\",\n activeforeground=\"#BBBBBB\",\n command=lambda: self.settings_window.destroy(),\n bd=0, image=close_image)\n\n search_label.pack(pady=10)\n self.path_search_list.pack(side=TOP, pady=10)\n button_remove_path.pack(side=LEFT, padx=15)\n button_add_path.pack(side=RIGHT, padx=15)\n frame_paths.pack()\n\n dest_label.pack(pady=5)\n destination_entry.pack(side=LEFT, pady=5, padx=10)\n button_dest_path.pack(side=LEFT, pady=10)\n frame_dest_path.pack()\n separator1.pack()\n\n timer_label.pack(side=LEFT, pady=10, padx=15)\n timer_entry.pack(side=LEFT, pady=10)\n sec_label.pack(side=LEFT, pady=10)\n frame_timer.pack()\n\n sort_label.pack(side=LEFT, pady=10)\n sort_option_menu.pack(side=LEFT, pady=10, padx=15)\n frame_sort.pack()\n\n button_save.pack(side=RIGHT, pady=10, padx=10)\n button_close.pack(side=RIGHT, pady=10)\n button_default.pack(side=RIGHT, pady=10, padx=10)\n\n self.settings_window.mainloop()\n\n @staticmethod\n def write_default_config(settings_file=\"settings.ini\"):\n \"\"\"\n Writing a default settings from \"Defaults\" class\n \"\"\"\n config_default = configparser.ConfigParser()\n config_default[\"paths\"] = {\"music_path\": Defaults.MUSIC_PATH,\n \"search_paths\": Defaults.SEARCH_PATHS\n }\n config_default[\"common\"] = {\"timer\": str(Defaults.TIMER),\n \"sort_type\": Defaults.SORT_TYPE,\n \"file_types\": Defaults.FILE_TYPES\n }\n with open(settings_file, \"w\", encoding=\"utf-8\") as configfile:\n config_default.write(configfile)\n\n def set_default(self):\n \"\"\"\n Set default settings in UI\n \"\"\"\n self.timer.set(Defaults.TIMER)\n self.sort_var.set(Defaults.SORT_TYPE)\n self.music_path = Defaults.MUSIC_PATH\n self.search_paths = Defaults.SEARCH_PATHS\n self.new_dest_path.set(Defaults.MUSIC_PATH)\n self.path_search_list.delete(0, self.path_search_list.size())\n self.path_search_list.insert(0, Defaults.SEARCH_PATHS)\n\n def ask_destination(self):\n \"\"\"\n Show directories tree and return selected music destination folder\n \"\"\"\n self.new_dest_path.set(askdirectory(initialdir=os.path.expanduser(\"~\")))\n self.music_path = self.new_dest_path.get() + os.sep\n\n def ask_search_folder(self):\n \"\"\"\n Show directories tree and return search folder\n \"\"\"\n self.path_search_list.insert(self.path_search_list.size(),\n askdirectory(initialdir=os.path.expanduser(\"~\")))\n self.search_paths = \"\"\n for elem in range(0, self.path_search_list.size()):\n self.search_paths += self.path_search_list.get(elem) + os.sep + \";\"\n self.search_paths = self.search_paths[0:len(self.search_paths) - 1]\n\n def remove_search_item(self):\n \"\"\"\n Remove selected search folder\n \"\"\"\n try:\n self.path_search_list.delete(self.path_search_list.curselection())\n self.search_paths = \"\"\n for elem in range(0, self.path_search_list.size()):\n self.search_paths += self.path_search_list.get(elem) + \";\"\n self.search_paths = self.search_paths[0:len(self.search_paths) - 1]\n except TclError:\n pass\n\n def save_settings(self):\n \"\"\"\n Method that write settings to config file, restart daemon\n if it is working at the same time, update common scanning timer\n \"\"\"\n self.config.set(\"paths\", \"music_path\", self.music_path)\n self.config.set(\"paths\", \"search_paths\", self.search_paths)\n self.config.set(\"common\", \"timer\", str(self.timer.get()))\n self.config.set(\"common\", \"sort_type\", self.sort_var.get())\n self.config.set(\"common\", \"modification time\", str(time.time()))\n\n import subprocess\n with open(\"settings.ini\", \"w\") as cfg_file:\n self.config.write(cfg_file)\n try:\n purge_proc = subprocess.Popen(\"python3 album_daemon.py purge\", shell=True)\n purge_proc.wait()\n except OSError:\n logging.error(\"Some problems with database file, please remove .db, .pref, .sim manually\")\n self.settings_window.destroy()\n\n\nif __name__ == \"__main__\":\n set_process_name(\"album_settings\")\n SettingsWindow()","sub_path":"album_settings_gui.py","file_name":"album_settings_gui.py","file_ext":"py","file_size_in_byte":13733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"436947004","text":"import boto3\nimport base64\nimport cv2\nimport json\nimport datetime\nimport random\n\nbucketName = \"knownfacesphotos\"\ncollection_id = \"Collection1\"\nownerPhoneNumber = \"9178499953\"\n\ndef parse_face_search_response(faceSearchResponse):\n matched = 0\n faceId = \"\"\n if len(faceSearchResponse) > 0:\n if faceSearchResponse[0][\"MatchedFaces\"]:\n matched = 1\n faceId = faceSearchResponse[0][\"MatchedFaces\"][0][\"Face\"][\"FaceId\"]\n else:\n matched = -1\n return matched, faceId\n\ndef get_face_details(faceId):\n print(\"Dynamo DB\")\n faceDetails = None\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('visitors')\n response = table.get_item(Key={'faceID' : faceId})\n faceDetails = response[\"Item\"] # parse and store only relevant info\n return faceDetails\n \ndef parse_phone_number(faceDetails):\n phoneNumber = faceDetails[\"phoneNumber\"]\n if \"+1\" not in phoneNumber:\n phoneNumber = '+1'+phoneNumber\n return phoneNumber\n\ndef check_otp_existence(faceId):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('passcodes')\n response = table.get_item(Key={'ID': faceId})\n otp_current_sent = 0\n #print(response)\n if response.get(\"Item\", None):\n return True\n else:\n return False\n \ndef generate_store_send_otp(faceId, phoneNumber):\n otp = random.randint(100000,999999) #TODO: Randomize OTP\n expiration_time = 300\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('passcodes')\n response = table.get_item(Key={'ID': faceId})\n #print(response)\n if response.get(\"Item\", None):\n print(\"OTP already sent\")\n else:\n print(\"Generating OTP and storing to passcodes table\")\n response = table.put_item(\n Item={\n 'createdAtTimestamp': str(datetime.datetime.now()),\n 'ttl': int(datetime.datetime.now().timestamp() + expiration_time),\n 'OTP': otp,\n 'ID': faceId\n }\n )\n msg = \"OTP for access is \" + str(otp)\n send_message(phoneNumber, msg)\n\ndef send_message(phoneNumber, msg):\n client = boto3.client('sns')\n if \"+1\" not in phoneNumber:\n phoneNumber = '+1'+phoneNumber\n print(\"Sending SMS to \" + phoneNumber + \" : \" + msg)\n response = client.publish(PhoneNumber=phoneNumber, Message=msg)\n\ndef collection_insert(image):\n client = boto3.client('rekognition')\n print(\"Adding \" + image + \" from bucket [\" + bucketName + \"] to collection[\" + collection_id + \"]\")\n response = client.index_faces(CollectionId=collection_id,\n Image={'S3Object':{'Bucket':bucketName,'Name':image}},\n ExternalImageId=image,\n MaxFaces=1,\n QualityFilter=\"AUTO\",\n DetectionAttributes=['ALL'])\n faceId = response['FaceRecords'][0]['Face']['FaceId']\n return faceId\n\ndef detect_faces_from_s3(filename):\n print(\"Detecting face in \" + filename)\n s3_client = boto3.client('s3')\n rekognition_client = boto3.client('rekognition')\n print(\"Starting detect_faces call\")\n response = rekognition_client.detect_faces(Image={'S3Object': {'Bucket': bucketName, 'Name': filename}}, Attributes=['ALL'])\n print(\"detect_faces response: \" + str(response))\n if not response['FaceDetails']:\n print(\"Deleting \" + filename + \" from S3\")\n s3_client.delete_object(Bucket=bucketName,Key=filename)\n return False\n print(\"Detected face in \" + filename)\n return True\n\ndef fetch_image(streamARN, fragmentNumber, serverTimestamp):\n #print(streamARN, fragmentNumber, serverTimestamp)\n kvs_client = boto3.client('kinesisvideo')\n kvs_endpoint = kvs_client.get_data_endpoint(\n APIName=\"GET_HLS_STREAMING_SESSION_URL\",\n StreamARN=streamARN\n )['DataEndpoint']\n \n kvam_client = boto3.client('kinesis-video-archived-media', endpoint_url=kvs_endpoint)\n video_stream_url = kvam_client.get_hls_streaming_session_url(\n StreamARN=streamARN,\n PlaybackMode=\"LIVE_REPLAY\",\n HLSFragmentSelector={\n 'FragmentSelectorType': 'SERVER_TIMESTAMP',\n 'TimestampRange': {\n 'StartTimestamp': serverTimestamp\n }\n }\n )['HLSStreamingSessionURL']\n \n #print(\"Capturing video from: \" + video_stream_url)\n video_capture_client = cv2.VideoCapture(video_stream_url)\n filename = fragmentNumber + \".jpg\"\n temp_filename = \"temp_frame.jpg\"\n temp_filename_s3 = \"temp_frame_s3.jpg\"\n s3_client = boto3.client('s3')\n no_faces_detected = False\n success = True\n while(success):\n # Capture frame-by-frame\n success, image_frame = video_capture_client.read()\n \n if image_frame is not None:\n # Display the resulting frame\n video_capture_client.set(1, int(video_capture_client.get(cv2.CAP_PROP_FRAME_COUNT) / 2) - 1)\n #print(\"CV writing to file - \" + temp_filename)\n cv2.imwrite('/tmp/' + temp_filename, image_frame)\n #print(\"Uploading to S3 bucket: \" + bucketName)\n s3_client.upload_file(\n '/tmp/' + temp_filename,\n bucketName,\n temp_filename_s3,\n ExtraArgs={'ACL':'public-read'}\n )\n if detect_faces_from_s3(temp_filename_s3):\n s3_client.upload_file(\n '/tmp/' + temp_filename,\n bucketName,\n filename,\n ExtraArgs={'ACL':'public-read'})\n s3_client.delete_object(Bucket=bucketName,Key=temp_filename_s3)\n print(\"Uploaded to S3\")\n else:\n no_faces_detected = True\n video_capture_client.release()\n break\n else:\n break\n video_capture_client.release()\n cv2.destroyAllWindows()\n if no_faces_detected:\n return None, None\n else:\n # https://knownfacesphotos.s3.amazonaws.com/554880.JPG\n s3_image_link = \"https://%s.s3.amazonaws.com/%s\" % (bucketName, filename)\n print(\"S3 Image Link: \" + s3_image_link)\n return filename, s3_image_link\n\ndef update_visitors_with_image(faceId, image_name):\n data = [{\n \"bucket\" : \"knownfacesphotos\", \n \"createdTimestamp\" : str(datetime.datetime.now()),\n \"objectKey\" : image_name\n }]\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('visitors')\n response = table.update_item(\n Key={\n 'faceID': faceId,\n },\n UpdateExpression=\"SET photos = list_append(photos, :i)\",\n ExpressionAttributeValues={\n ':i': data,\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n #print(response)\n\ndef lambda_handler(event, context):\n #print(event['Records'])\n for record in event['Records']:\n #Kinesis data is base64 encoded so decode here\n payload=json.loads(base64.b64decode(record[\"kinesis\"][\"data\"]))\n print(\"Decoded payload: \" + str(payload))\n streamARN = payload[\"InputInformation\"][\"KinesisVideo\"][\"StreamArn\"]\n fragmentNumber = payload[\"InputInformation\"][\"KinesisVideo\"][\"FragmentNumber\"]\n serverTimestamp = payload[\"InputInformation\"][\"KinesisVideo\"][\"ServerTimestamp\"]\n # Parse face ID. If multiple face IDs matched, go with the one having highest similarity\n faceSearchResponse = payload[\"FaceSearchResponse\"]\n matched, faceId = parse_face_search_response(faceSearchResponse)\n if (matched == -1):\n print(\"No faces detected\")\n elif (matched == 1):\n # Retrieve details from database for the face ID\n print(\"Face ID: \" + faceId)\n faceDetails = get_face_details(faceId)\n print(\"FaceDetails: %s\" % faceDetails)\n if faceDetails:\n print(\"FaceID exists in DynamoDB\")\n phoneNumber = parse_phone_number(faceDetails)\n #print(phoneNumber)\n result = check_otp_existence(faceId)\n if not result:\n image_name, image_link = fetch_image(streamARN, fragmentNumber, serverTimestamp)\n if image_name and detect_faces_from_s3(image_name):\n generate_store_send_otp(faceId, phoneNumber)\n update_visitors_with_image(faceId, image_name)\n print(\"Completed fetching image\")\n else:\n print(\"OTP already sent for this visitor. Not fetching image or updating database\")\n else:\n print(\"FaceID does not exist in DynamoDB\")\n elif (matched == 0):\n # Unrecognized face\n print(\"Unrecognized face\")\n image_name, image_link = fetch_image(streamARN, fragmentNumber, serverTimestamp)\n print(\"Completed fetching image\")\n if image_name and detect_faces_from_s3(image_name):\n faceId = collection_insert(image_name)\n print(\"Assigning new faceID: \" + faceId)\n # https://ownerportal.s3.amazonaws.com/index.html?fragmentNumber=10a\n msg = \"You have a new visitor. Please authorize using the following link - https://ownerportal.s3.amazonaws.com/index.html?fragmentNumber=\" + fragmentNumber\n send_message(ownerPhoneNumber, msg)\n","sub_path":"lambda_functions/RekognitionStream.py","file_name":"RekognitionStream.py","file_ext":"py","file_size_in_byte":9576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"331385771","text":"import spotipy\nimport spotipy.util\nimport credentials\nimport pprint as pp\nimport helpers\nfrom helpers import DataHandler\nimport database\nimport mysql.connector as mysql\nimport logging\nimport sys\n\nhelpers.setLoggingLevel(logging.INFO)\n\n# scope is used to determine what data this script (app) wants to access\nscope = 'playlist-modify-private playlist-read-collaborative user-library-read'\nusername = 't6am47'\n\ntoken = spotipy.util.prompt_for_user_token(username, scope,\n credentials.client_id,\n credentials.client_secret,\n credentials.redirect_uri) # registered in app settings\n\nif token:\n api = spotipy.Spotify(auth=token)\n\n # creates connection to mysql database w/ credentials in separate file\n connection = mysql.connect(user=credentials.user,\n password=credentials.password,\n host=credentials.host,\n database=credentials.database)\n logging.info('connection opened')\n\n # deletes all data from specified table and resets auto_increment\n # database.wipe_table(connection, credentials.table)\n\n # create DataHandler object\n dataHandler = DataHandler(api, connection)\n\n # gets last synced id\n id = database.get_last_id(connection, credentials.table)\n count_local_tracks = database.get_count_local_tracks(connection)\n\n # returns all tracks in specified playlist\n result = api.user_playlist_tracks(username,\n playlist_id=\"spotify:user:t6am47:playlist:4doQ7lGWMlDDltEOQARV1d\",\n fields=None,\n limit=100,\n offset=id + count_local_tracks,\n market=\"DE\")\n\n # logging.debug(pp.pformat(result))\n\n # iterates through all items in the result (some other metadata is left out)\n for item in result['items']:\n dataHandler.process_data(item)\n\n # next block for looping through all tracks (adjust limit in previous request)\n while result['next']:\n result = api.next(result)\n for item in result['items']:\n dataHandler.process_data(item)\n\n connection.close()\n logging.info('connection closed')\n\nelse:\n logging.error(\"Can't get token for\", username)\n","sub_path":"analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"324187027","text":"#Extracting sentences with one or three child elements\n\nimport xml.etree.ElementTree as ET\nimport re\nimport random\nfrom os import path\n\nwith open(\"file.txt\") as f1:\n text1 = f1.read()\n\n\nfilenames = re.split(r'[\\n\\r]+', text1)\n\nfor filename in filenames:\n\tprint (filename)\t\n\ttree = ET.parse(filename)\n\troot = tree.getroot()\n\n\n\tfor sentence in root.findall('sentence'):\n\t\trank = sentence.find('pair')\n\t\tif (len(sentence.getchildren())!=3) and (len(sentence.getchildren())!=1):\n\t\t\troot.remove(sentence)\n\n\tnewfilepath = path.join('Edited',filename)\n\tprint (newfilepath)\n\ttree.write(newfilepath)\n","sub_path":"DDICorpus/Train/MedLine/3childextrac.py","file_name":"3childextrac.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"374727392","text":"\nimport platform\nimport ctypes.util\n\nfrom py_nsbcli.util.gotypes import (\n GoInt32,\n GoString,\n GoBytes,\n GolevelDBptr,\n GoWalletptr\n)\n\nENC = \"utf-8\"\n\nif platform.system() == \"Windows\":\n funcs = ctypes.CDLL(ctypes.util.find_library('pynsbcli_cwallet_windows'))\nelif platform.system() == \"Darwin\":\n funcs = ctypes.CDLL(INCLUDE_PATH + \"/cwallet_mac.dll\")\nelif platform.system() == \"Linux\":\n funcs = ctypes.CDLL(INCLUDE_PATH + \"/pynsbcli_cwallet_linux.dll\")\nelse:\n raise ImportError(\"no corresponding cwallet api on this platform\")\n\n\nfuncs.CDLL_NewLevelDBHandler.argtype = GoString.Type\nfuncs.CDLL_NewLevelDBHandler.restype = GolevelDBptr\n\nfuncs.CDLL_CloseDB.argtype = GolevelDBptr\nfuncs.CDLL_CloseDB.restype = None\n\nfuncs.CDLL_PreCheckWallet.argtype = GoWalletptr\nfuncs.CDLL_PreCheckWallet.restype = GoInt32\n\nfuncs.CDLL_NewWalletHandlerFromDB.argtype = (GolevelDBptr, GoString.Type)\nfuncs.CDLL_NewWalletHandlerFromDB.restype = GoWalletptr\n\nfuncs.CDLL_NewWalletHandler.argtype = (GolevelDBptr, GoString.Type)\nfuncs.CDLL_NewWalletHandler.restype = GoWalletptr\n\nfuncs.CDLL_WalletAddress.argtype = (GoWalletptr, GoInt32)\nfuncs.CDLL_WalletAddress.restype = GoBytes.Type\n\nfuncs.CDLL_WalletSign.argtype = (GoWalletptr, GoInt32, GoBytes.Type, GoInt32)\nfuncs.CDLL_WalletSign.restype = GoBytes.Type\n\nfuncs.CDLL_WalletSignHash.argtype = (GoWalletptr, GoInt32, GoBytes.Type)\nfuncs.CDLL_WalletSignHash.restype = GoBytes.Type\n\nfuncs.CDLL_WalletVerifyByRaw.argtype = (GoWalletptr, GoInt32, GoBytes.Type, GoBytes.Type, GoInt32)\nfuncs.CDLL_WalletVerifyByRaw.restype = GoInt32\n\nfuncs.CDLL_WalletVerifyByHash.argtype = (GoWalletptr, GoInt32, GoBytes.Type, GoBytes.Type)\nfuncs.CDLL_WalletVerifyByHash.restype = GoInt32\n\n\nclass LevelDB:\n def __init__(self, path=None):\n self._handler_num = -1\n if path is not None:\n self.open(path)\n pass\n\n @property\n def handler_num(self):\n return self._handler_num\n\n def open(self, path):\n \"\"\"\n NewLevelDBHandler(dbpath string) (handlerPtr int32)\n \"\"\"\n self.close()\n self._handler_num = funcs.CDLL_NewLevelDBHandler(GoString.trans(path, ENC))\n return self._handler_num\n\n def close(self):\n \"\"\"\n NewLevelDBHandler(dbpath string) (handlerPtr int32)\n \"\"\"\n if self._handler_num > -1:\n funcs.CDLL_CloseDB(self._handler_num)\n\n @staticmethod\n def close_db(handler_num):\n funcs.CDLL_CloseDB(handler_num)\n\n\nclass Wallet:\n def __init__(self, db_handler, name):\n self._handler_num = -1\n self._name = name\n self.open(db_handler, name)\n\n @property\n def handler_num(self):\n return self._handler_num\n\n @property\n def name(self):\n return self._name\n\n def open(self, db_handler, name):\n if isinstance(db_handler, LevelDB):\n self._handler_num = funcs.CDLL_NewWalletHandlerFromDB(db_handler.handler_num, GoString.trans(name, ENC))\n elif isinstance(db_handler, int):\n self._handler_num = funcs.CDLL_NewWalletHandlerFromDB(db_handler, GoString.trans(name, ENC))\n else:\n self._handler_num = -1\n\n def address(self, idx=0):\n ptr = funcs.CDLL_WalletAddress(self._handler_num, idx)\n if ptr is None:\n return\n return GoBytes.convert(ptr, 32)\n\n @staticmethod\n def create(db_handler, name):\n wlt = Wallet(None, name)\n if isinstance(db_handler, LevelDB):\n wlt._handler_num = funcs.CDLL_NewWalletHandler(db_handler.handler_num, GoString.trans(name, ENC))\n elif isinstance(db_handler, int):\n wlt._handler_num = funcs.CDLL_NewWalletHandler(db_handler, GoString.trans(name, ENC))\n else:\n wlt._handler_num = -1\n return wlt\n\n def sign(self, msg: bytes) -> bytes or None:\n ptr = funcs.CDLL_WalletSign(self._handler_num, 0, GoBytes.frombytes(msg), len(msg))\n if ptr is None:\n return\n return GoBytes.convert(ptr, 64)\n\n def sign_hash(self, msg_hash: bytes) -> bytes or None:\n if len(msg_hash) != 64:\n raise ValueError(\"the length of SHA512 Hash(Bytes) must be 64\")\n ptr = funcs.CDLL_WalletSign(self._handler_num, 0, GoBytes.frombytes(msg_hash))\n if ptr is None:\n return\n return GoBytes.convert(ptr, 64)\n\n def verify_by_raw(self, msg: bytes, signature: bytes) -> int:\n if len(signature) != 64:\n raise ValueError(\"the length of signature(Bytes) must be 64\")\n print(msg, len(msg), GoBytes.convert(GoBytes.frombytes(signature), 64).hex())\n return funcs.CDLL_WalletVerifyByRaw(\n self._handler_num, 0,\n GoBytes.frombytes(signature),\n GoBytes.frombytes(msg),\n len(msg)\n )\n\n def verify_by_hash(self, msg_hash: bytes, signature: bytes) -> int:\n if len(msg_hash) != 64:\n raise ValueError(\"the length of SHA512 Hash(Bytes) must be 64\")\n if len(signature) != 64:\n raise ValueError(\"the length of signature(Bytes) must be 64\")\n return funcs.CDLL_WalletVerifyByHash(\n self._handler_num, 0,\n GoBytes.frombytes(signature),\n GoBytes.frombytes(msg_hash)\n )\n\n\n\nif __name__ == '__main__':\n db = LevelDB(\"../kvstore\")\n print(db.handler_num)\n test_wlt = Wallet(db, 'Alice')\n print(test_wlt.handler_num, test_wlt.address(0).hex())\n print(test_wlt.sign(b\"\\x10\\x00\").hex())\n aut = test_wlt.sign(b\"\\x10\\x00\")\n print(test_wlt.verify_by_raw(b\"\\x10\\x00\", aut))\n aut = test_wlt.sign(b\"\\x11\\x00\")\n print(test_wlt.verify_by_raw(b\"\\x10\\x00\", aut))\n\n test_wlt = Wallet(db, 'black_Alice')\n print(test_wlt.handler_num)\n print(test_wlt.sign(b\"\\x10\\x00\") is None)\n print(test_wlt.sign_hash(aut) is None)\n\n db.close()\n","sub_path":"build/lib/py_nsbcli/types/wallet.py","file_name":"wallet.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"477983786","text":"\"Tensorflow running average (not exponential)\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass RunningAverage(object):\n \"\"\"Keeps a running average of the variables given to it. The goal is for this guy\n to behave like tf's built in exponential moving average, just without the\n exponential moving part.\n\n In particular we do:\n M_1 = x_1\n M_k = M_{k-1} + \\frac{x_k - M_{k-1}}{k}\n\n To update the average at each step. The averages are kept in shadow variables\n etc etc, hopefully use should be as per tf's ema.\n \"\"\"\n\n def __init__(self):\n self.shadow_vars = {} # will store a shadow variable and an update count\n\n def apply(self, var_list):\n \"\"\"Applies the running average to a list of variables\n Creates shadow variables and update op. Returns a grouped update op for\n all the averages in the list.\"\"\"\n update_ops = []\n with tf.variable_scope('running_average'):\n for var in var_list:\n # add a shadow var that gets initialized to the same value\n # and a count to keep track of how many times it's been updated\n name = var.op.name\n count = tf.get_variable(\n name+'_count', dtype=tf.float32,\n initializer=tf.constant_initializer(0.0),\n shape=[], trainable=False)\n shadow = tf.get_variable(\n name+'_shadow', dtype=var.dtype,\n initializer=var.initialized_value(),\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.VARIABLES],\n trainable=False)\n # now make the update ops\n # increase the count\n count_update = tf.assign_add(count, 1.0)\n with tf.control_dependencies([count_update]):\n difference = (var - shadow)/count\n update = tf.assign_add(shadow, difference)\n update_ops.append(update)\n self.shadow_vars[var] = (shadow, count)\n\n return update_ops\n\n def average(self, var):\n \"\"\"Get the average for a variable\"\"\"\n return self.shadow_vars[var][0]\n\n def average_name(self, var):\n return self.shadow_vars[var][0].op.name\n\n\nif __name__ == '__main__':\n # quick test\n # average of a normal best be close to the mean\n rando = tf.random_normal(shape=[1], mean=1.0)\n var = tf.Variable(rando)\n averager = RunningAverage()\n update_avge = averager.apply([var])\n with tf.control_dependencies(update_avge):\n update = tf.assign(var, rando)\n\n sess = tf.Session()\n sess.run(tf.initialize_all_variables())\n \n with sess.as_default():\n for i in range(50000):\n sess.run(update)\n print('\\r{}'.format(i), end='')\n print('\\rAverage: {}'.format(averager.average(var).eval()))\n \n","sub_path":"averager.py","file_name":"averager.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"166967271","text":"import os\r\nimport sys\r\nglobal devicename\r\n\r\ndevicename = \"\"\r\ndeviceindex = -1\r\n\r\ntry:\r\n devicename = sys.argv[1]\r\nexcept:\r\n devicename = \"\"\r\n\r\nif(devicename == \"\"):\r\n devicelist = ['127.0.0.1:5555','de496248','192.168.43.1:5555','add new']\r\n for i in range(0,4):\r\n print(i,\":\",devicelist[i])\r\n deviceindex = int(input(\"choose one device: \"))\r\n\r\n if deviceindex < 3 and deviceindex >= 0:\r\n devicename = devicelist[deviceindex]\r\n elif deviceindex == 3:\r\n devicename = input()\r\n \r\nprint(\"Device:\",devicename)\r\noremaster = 'none'\r\nimport retriver\r\nretriver.Start()\r\n \r\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"291101284","text":"from Encoding import *\n\ndef Tso(m):\n events = [e for e in m.events() if isinstance(e, (Load, Store, Init))]\n\n ### All communication relations\n enc = satUnion('co', 'fr', events)\n enc = And(enc, satUnion('(co+fr)', 'rf', events, 'com'))\n\n ### Uniproc\n enc = And(enc, satUnion('poloc', 'com', events))\n \n ### Communication relations for TSO\n enc = And(enc, satUnion('(co+fr)', 'rfe', events, 'com-tso'))\n\n ### Program order for TSO\n enc = And(enc, satMinus('po', 'WR', events))\n enc = And(enc, satUnion('(po\\WR)', 'sync', events, 'po-tso'))\n\n ### Global happens before for TSO\n enc = And(enc, satUnion('po-tso', 'com-tso', events, 'ghb-tso'))\n \n return enc\n\ndef TsoConsistent(m):\n events = [e for e in m.events() if isinstance(e, (Load, Store, Init))]\n\n return And(satAcyclic('(poloc+com)', events), satAcyclic('ghb-tso', events))\n\ndef TsoInconsistent(m):\n events = [e for e in m.events() if isinstance(e, (Load, Store, Init))]\n \n enc = And(satCycleDef('(poloc+com)', events), satCycleDef('ghb-tso', events))\n enc = And(enc, Or(satCycle('(poloc+com)', events), satCycle('ghb-tso', events)))\n return enc","sub_path":"src/mcm/TSO.py","file_name":"TSO.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"98183985","text":"import random\n\nclass Matrix:\n def __init__(self,arr):\n self.__matrix=[]\n rowCount=len(arr)\n if rowCount>0:\n colCount=sum([len(row) for row in arr])//rowCount\n if colCount>0 and len(arr[0])==colCount:\n self.__matrix=arr\n else:\n print(\"Column problem!\")\n else:\n print(\"Row problem!\")\n \n \n def __repr__(self):\n ret=''\n for i in range(len(self.__matrix)):\n for j in range(len(self.__matrix[0])):\n ret+=str(self.__matrix[i][j])+\" \"\n ret+=\"\\n\"\n return ret\n\n def __eq__(self, other):\n return self.__matrix==other.__matrix\n \n def __neg__(self):\n return Matrix([[-num for num in row] for row in self.__matrix])\n\n def __add__(self,other):\n if type(other) is int or type(other) is float:\n other=Matrix.ID(len(self.__matrix))*other\n if len(self.__matrix)==len(other.__matrix) \\\n and len(self.__matrix[0])==len(other.__matrix[0]):\n retMat=[[]*len(self.__matrix[0]) for i in range(len(self.__matrix))]\n rows,cols=len(retMat),len(self.__matrix[0])\n for i in range(rows):\n for j in range(cols):\n retMat[i].append(self.__matrix[i][j] + other.__matrix[i][j])\n return Matrix(retMat)\n\n \n def __sub__(self,other):\n return self+-other\n\n def __mul__(self,other):\n if type(other)==Matrix:\n A,B=self.__matrix, other.__matrix\n if len(A[0])!=len(B):\n return \"Cannot multiply these matrices!\"\n else:\n ret_mat=[[]*len(B[0]) for i in range(len(A))]\n for i in range(len(ret_mat)):\n for j in range(len(B[0])):\n ret_mat[i].append(sum( [A[i][k] * B[k][j] for k in range(len(B))]))\n return Matrix(ret_mat)\n else:\n return Matrix([[other*cell for cell in row] for row in self.__matrix])\n \n def __pow__(self,power):\n if type(power) is int:\n if power>=0:\n retMat=Matrix.ID(len(self.__matrix))\n for i in range(power):\n retMat=retMat*self\n return retMat\n if power==-1:\n return self.inverse()\n if power=='t':\n return self.transpose()\n\n def __round__(self, other=0):\n return Matrix([[round(cell,other) for cell in self.__matrix[row]] \\\n for row in range(len(self.__matrix))])\n\n def transpose(self):\n n,m=self.rowcount(),self.colcount()\n ret_mat=[[0]*m for i in range(n)]\n for i in range(n):\n for j in range(m):\n ret_mat[i][j]=self.__matrix[j][i]\n return Matrix(ret_mat)\n \n def rowcount(self):\n return len(self.__matrix)\n\n def colcount(self):\n return len(self.__matrix[0])\n\n def ID(rank):\n ret_mat=[[0]*rank for i in range(rank)]\n for i in range(rank):\n for j in range(rank):\n if i==j:\n ret_mat[i][j]=1\n return Matrix(ret_mat)\n\n def det(self):\n A=self.__matrix\n if len(A) != len(A[0]):\n return \"Must be square!\"\n else:\n if len(A)==2:\n return A[0][0]*A[1][1] - A[0][1]*A[1][0]\n else:\n return sum([ A[0][i] * Matrix([ [ A[j][k] for k in range(len(A)) if k!=i] \\\n for j in range(1,len(A))]).det()*((-1)**(i+1)) for i in range(len(A))])\n \n\n def generate_matrix(rownum,colnum, minVal=-100,maxVal=100):\n for i in range(colnum):\n r_i=random.Random()\n matrix =[[ r_i.randint(minVal,maxVal) for i in range(colnum) ] for j in range(rownum)]\n return Matrix(matrix)\n\n def c_pol(self):\n def __det4pol(self):\n A=self.__matrix\n if len(A) != len(A[0]):\n return \"Must be square!\"\n else:\n if len(A)==2:\n return A[0][0]*A[1][1] +- A[0][1]*A[1][0]\n else:\n summ=Sparse_polynomial({0:0})\n for i in range(len(A)):\n summ+= A[0][i] * __det4pol(Matrix([ [ A[j][k] for k in range(len(A)) if k!=i] \\\n for j in range(1,len(A))]))*((-1)**(i+1))\n return summ\n A=self.__matrix\n if len(A) != len(A[0]):\n return \"Must be square!\"\n else:\n retMat=[[0]*len(A) for i in range(len(A))]\n for i in range(len(A)):\n for j in range(len(A)):\n if i!=j:\n retMat[i][j]=Sparse_polynomial({0:A[i][j]})\n else:\n retMat[i][j]=Sparse_polynomial({0:A[i][j],1:-1})\n return __det4pol(Matrix(retMat))\n\n def inverse(self):\n if self.det()==0:\n return \"Matrix not inversible!\"\n else:\n char_pol=self.c_pol()\n inv_pol=Sparse_polynomial({key-1:char_pol.coeffs_dict[key] for \\\n key in char_pol.coeffs_dict.keys() if key!=0})\n return inv_pol.evaluate(self) * (-1/char_pol.coeffs_dict[0])\n \n\n\n\nclass Sparse_polynomial():\n def __init__(self, coeffs_dict):\n self.coeffs_dict = coeffs_dict\n\t\t\n \n def __repr__(self):\n terms = [\" + ( \"+str(self.coeffs_dict[k])+\"* x^\" + str(k)+\")\"\\\n for k in sorted(self.coeffs_dict.keys(), reverse=True)]\n terms = \"\".join(terms)\n return terms[3:] #discard leftmost '+ (' \n\n def degree(self):\n ## get max key- that is degree.\n return max(self.coeffs_dict.keys())\n\n def __eq__(self, other):\n ## check if dictionaries are the same.\n return self.coeffs_dict==other.coeffs_dict\n \n def __neg__(self):\n return Sparse_polynomial({k:-self.coeffs_dict[k] for k in self.coeffs_dict.keys()})\n\n def __add__(self, other):\n ## new coeffs dic. copy original dict, dont do reference!\n new_coeffs=dict(self.coeffs_dict)\n ## if int and different than 0, add or insert new coeff of 0 degree.\n if type(other) is int:\n if 0 not in new_coeffs.keys():\n if other!=0:\n new_coeffs[0]=other\n else:\n new_coeffs[0]+=other\n ## else, assume polynomial. go over the other's coeffs, add or insert\n ## to new coeffs as needed. If sum of coeffs is 0, DO NOT ADD!\n else:\n for k in other.coeffs_dict.keys():\n if k not in new_coeffs.keys():\n new_coeffs[k]=other.coeffs_dict[k]\n else:\n val=new_coeffs[k]+other.coeffs_dict[k]\n if val==0:\n ##put in temp because pop prints\n temp=new_coeffs.pop(k)\n else:\n new_coeffs[k]+=other.coeffs_dict[k]\n ## if no coeffs, add 0:0\n if len(new_coeffs)==0:\n new_coeffs[0]=0\n return Sparse_polynomial(new_coeffs)\n \n\n def __sub__(self, other):\n ## uses add and neg\n return self+-other\n\n def __mul__(self,other):\n new_coeffs=dict(self.coeffs_dict)\n if type(other) is int or type(other) is float:\n for key in new_coeffs.keys():\n new_coeffs[key]*=other\n else:\n temp_dic=dict()\n for k in list(other.coeffs_dict.keys()):\n for kk in list(new_coeffs.keys()):\n if temp_dic.get(k+kk)==None:\n temp_dic[k+kk]=other.coeffs_dict[k]*new_coeffs[kk]\n else:\n temp_dic[k+kk]+=other.coeffs_dict[k]*new_coeffs[kk]\n new_coeffs=temp_dic\n return Sparse_polynomial(new_coeffs)\n \n\n def evaluate(self, x):\n ## use polynomial's print func. replace x with given x (in parentheses\n ## in case of negative), '^' sign used in print with '**' for\n ## python's power func, and use python's 'eval'\n if type(x)!=Matrix:\n return eval(str(self).replace('x','('+str(x)+')').replace('^','**'))\n else:\n retMat=Matrix.ID(x.rowcount())*0\n for key in self.coeffs_dict.keys():\n coeff_func=lambda x: (x**key) * self.coeffs_dict[key]\n retMat+=coeff_func(x)\n return retMat\n \n\n\n\n","sub_path":"Python/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":8708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"618792389","text":"from boa.blockchain.vm.Neo.Storage import GetContext, Get, Put\nfrom boa.code.builtins import concat\n\n\"\"\"\nSets {domain}.latest = {newLatest}\nSets {domain}.{newLatestIndex} = {postHash}\n\"\"\"\ndef addToDomain(domain, postHash):\n latestDomain = concat(domain, \".latest\")\n\n # Setting domainLatest = newLatestIndex\n latestIndex = Get(GetContext, latestDomain)\n if latestIndex == '':\n newLatestIndex = 1\n else:\n newLatestIndex = latestIndex + 1\n Put(GetContext, latestDomain, newLatestIndex)\n\n # Setting domain.newLatestIndex = {postHash}\n tempDomain = concat(domain, \".\")\n indexDomain = concat(tempDomain, newLatestIndex)\n Put(GetContext, indexDomain, postHash)\n\n return True\n ","sub_path":"packages/smart-contract/neoblog/lib/addToDomain.py","file_name":"addToDomain.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"260752596","text":"import numpy as np\r\nfrom numba import autojit\r\nimport matplotlib.pyplot as plt\r\n\r\n@autojit\r\ndef mandelbrot_set(real_num,image_num,max_iter):\r\n z = complex(real_num,image_num)\r\n c = 0.331\r\n \r\n for i in range(max_iter):\r\n z = z*z + c\r\n if abs(z) > 2 :\r\n return i\r\n \r\n return 0\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n yn = 2000\r\n xn = 2000\r\n result = np.zeros([xn,yn])\r\n \r\n for xn_index,real_num in enumerate(np.linspace(-2,1,num=xn)):\r\n for yn_index, image_num in enumerate(np.linspace(-1,1,num=yn)):\r\n result[xn_index,yn_index] = mandelbrot_set(real_num,image_num,20)\r\n \r\n plt.figure(dpi=100)\r\n plt.imshow(np.flipud(result.T),cmap='rainbow_r',interpolation='bilinear',extent=[-2,1,-1,1])\r\n plt.show()\r\n fig = plt.gcf()\r\n my_dpi = fig.get_dpi()\r\n fig.set_size_inches(7840.0/float(my_dpi),7840.0/float(my_dpi))\r\n plt.savefig('julia_set.jpg') ","sub_path":"julia_set.py","file_name":"julia_set.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"433677237","text":"import sqlite3\nimport math\n#returns list of row in the following format:\n\"\"\"lockerid_id,name,country,address,zipcode,non_del_days,\n timings_open,timings_closed,status,latitude,longitude,\n dist,dummyRank\"\"\"\n\n\ndef calcDist(lat1,lat2,lon1,lon2):\n R = 6373.0 #radius of the Earth\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n dlon = lon2 - lon1\n #change in coordinates\n dlat = lat2 - lat1\n a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2\n #Haversine formula\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = R * c\n return distance\n\n\ndef getrows(x,y):\n con = sqlite3.connect(\"db.sqlite3\")\n cur = con.cursor()\n cur.execute('delete from locker_rankinglist')\n counter = 0\n lmt=0.1\n while counter<20:\n counter=0\n lmt=lmt+0.1\n qry=\"\"\"select locker_availability.lockerid_id,\n locker_onboard.name,\n locker_onboard.country,\n locker_onboard.address,\n locker_onboard.zipcode,\n locker_availability.non_del_days,\n locker_availability.timings_open,\n locker_availability.timings_closed,\n locker_availability.status,\n locker_onboard.latitude,\n locker_onboard.longitude \n from locker_onboard,locker_availability\n where \n locker_availability.lockerid_id = locker_onboard.lockerid\n and\n locker_onboard.latitude \n between \"\"\"+str(x-lmt)+\"\"\" AND \"\"\"+str(x+lmt)+\"\"\" and \n locker_onboard.longitude \n between \"\"\"+str(y-lmt)+\"\"\" AND \"\"\"+str(y+lmt)+\"\"\";\"\"\"\n lstRow=[]\n dummyRank=0\n for row in cur.execute(qry):\n if row[8]==1:\n dist=calcDist(x,row[9],y,row[10])\n row=row[:9]\n dist=math.sqrt(dist)\n row+=(dist,)\n row+=(dummyRank,)\n if dist<5:\n lstRow.append(row)\n counter+=1\n con.commit()\n con.close()\n print(\"ROW FETCHED\")\n return lstRow","sub_path":"store-ranking-system/algorithm/getRows.py","file_name":"getRows.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"179216114","text":"import argparse\nimport os\nimport sys\n\ntry:\n sys.path.append(os.getcwd())\n from src import const\n from src.config import Config\n from src.log import log\n from src.utils import Util\nexcept ModuleNotFoundError as e:\n raise e\n\n\ndef main():\n parser = argparse.ArgumentParser(description='WalBot docs updater', formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"-o\", \"--out_file\", default=const.COMMANDS_DOC_PATH,\n help=\"Path to output file\")\n args = parser.parse_args()\n log.info(\"Reading config.yaml\")\n config = Util.read_config_file(const.CONFIG_PATH)\n if config is None:\n config = Config()\n config.commands.update()\n log.info(\"Exporting help to {}\".format(args.out_file))\n config.commands.export_help(args.out_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"125211416","text":"# Василий пришёл получать посылку на почту. Разумеется, в почтовом извещении ему нужно было\n# написать свои фамилию, имя и адрес проживания, чтобы кто-нибудь не получил посылку за него,\n# например члены его семьи (а им бы хотелось!).\n#\n# Напишите функцию для вывода фамилии, имени и адреса для конкретного члена семьи. Выведите\n# информацию о нём три раза (без цикла)\n#\n# Пример результата:\n# Фамилия: Иванов\n# Имя: Василий\n# Улица: Пушкина\n# Дом: 32\n#\n# Фамилия: Иванов\n# Имя: Василий\n# Улица: Пушкина\n# Дом: 32\n#\n# Фамилия: Иванов\n# Имя: Василий\n# Улица: Пушкина\n# Дом: 32\ndef card():\n print(f'\\nФамилия: {secondname}')\n print(f'Имя: {firstname}')\n print(f'Улица: {street}')\n print(f'Дом: {home}')\n\nfirstname = 'Василий'\nsecondname = 'Иванов'\nstreet = 'Пушкина'\nhome = 32\n\ncard()\ncard()\ncard()\n","sub_path":"12_def/task_122_3.py","file_name":"task_122_3.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"207203692","text":"from __future__ import division, print_function, absolute_import\nimport logging\n\nimport os\nimport numpy as np\nfrom config import pathDict\nfrom conv_net.train import Train\nfrom conv_net.test import Test\n\nfrom data_transformation.data_prep import get_valid_land_house_ids, dumpStratifiedBatches_balanced_class\n\n\n\n\nimages_per_label = None # normally 5000 each label is good\nassessor_img_type = 'assessor'\naerial_img_type = 'google_aerial' # 'bing_aerial'\noverlayed_img_type = 'google_overlayed'\nstreetside_img_type = None\n\n\n\n\n\nimage_type = overlayed_img_type#assessor_img_type#aerial_img_type###\n\nif image_type == 'assessor':\n inp_image_shape = [260, 260, 3]\nelif image_type == 'google_aerial':\n inp_image_shape = [400, 400, 3]\nelif image_type == 'google_overlayed':\n inp_image_shape = [400, 400, 3]\nelif image_type == 'google_streetside':\n inp_image_shape = [260, 260, 3]\nelse:\n raise ValueError('Not a valid image type provided')\n \nbatch_prepare = False\ntrain = False\ntest = False\nwhich_net = 'resnet'\nmax_batches = 75\n\n\n\nif batch_prepare:\n cmn_land_pins, cmn_house_pins = get_valid_land_house_ids(\n aerial_img_type=aerial_img_type,\n streetside_img_type=streetside_img_type,\n overlayed_img_type=overlayed_img_type,\n images_per_label=images_per_label)\n print (len(cmn_land_pins), len(cmn_house_pins))\n\n tr_batch_size = 128\n cv_batch_size = (len(cmn_land_pins) + len(cmn_house_pins)) // 10\n \n dumpStratifiedBatches_balanced_class(cmn_land_pins, cmn_house_pins, img_resize_shape=inp_image_shape,\n image_type=image_type, cv_batch_size=cv_batch_size, tr_batch_size=tr_batch_size,\n shuffle_seed=873, get_stats=True, max_batches=max_batches)\n\n\nif train:\n Train(dict(inp_img_shape=[400,400,3],\n crop_shape=[96,96,3],\n out_img_shape=[224, 224, 3],\n use_checkpoint=True,\n save_checkpoint=True,\n write_tensorboard_summary=False\n ),\n which_net=which_net, # vgg\n image_type=image_type).run(num_epochs=3,\n num_batches=max_batches)# + 1)\n\nif test:\n Test(params=dict(inp_img_shape=[400,400,3],\n crop_shape=[96, 96, 3],\n out_img_shape=[224, 224, 3]),\n which_net=which_net,\n image_type=image_type).run(dump_stats=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"616967436","text":"### Change the optimizer to SGD\nimport torch\nfrom torch import nn\nfrom torch.nn import ReLU\nfrom data.data_utils import Mydataset\nfrom torch.utils.data import DataLoader\nfrom logger import Logger\ntrain_dataset = Mydataset(training=True)\ntrain_loader = DataLoader(train_dataset, batch_size=16, shuffle = True)\nvalid_dataset = Mydataset(training=False)\nvalid_loader = DataLoader(valid_dataset, batch_size=32, shuffle = False)\nnum_epoch = 3000\nkl_w = 1\nkl_cycle_w = 1\nrecon_w = 1\nrecon_cycle_w = 1\ndevice = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')\nlogger = Logger('./visualdatasetl1')\n\ndef compute_kl(self, mu):\n mu_2 = torch.pow(mu, 2)\n encoding_loss = torch.mean(mu_2)\n return encoding_loss\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.model = nn.Sequential(nn.Conv2d(512, 1024, 3, stride=2, padding = 1),\n nn.BatchNorm2d(1024),\n ReLU(),\n nn.Conv2d(1024, 4096, kernel_size=4),\n nn.BatchNorm2d(4096),\n ReLU(),\n )\n def forward(self, input):\n out = self.model(input)\n return out\n\nclass Decoder(nn.Module):\n def __init__(self):\n super(Decoder, self).__init__()\n self.model = nn.Sequential(\n nn.ConvTranspose2d(4096, 1024, kernel_size=4),\n ReLU(),\n nn.BatchNorm2d(1024),\n nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1),\n\n )\n def forward(self, input):\n out = self.model(input)\n return out\nclass Cycle_VAE(nn.Module):\n def __init__(self):\n super(Cycle_VAE, self).__init__()\n self.encoder_A = Encoder()\n self.encoder_B = Encoder()\n self.decoder_A = Decoder()\n self.decoder_B = Decoder()\n self.share_encoder = nn.Conv2d(4096, 1024, kernel_size=1, stride=1, padding=0)\n self.share_decoder = nn.ConvTranspose2d(1024, 4096, kernel_size=1, stride=1)\n self.share_activation = ReLU()\n self.batchnorm_A = nn.BatchNorm2d(4096)\n self.batchnorm_B = nn.BatchNorm2d(4096)\n def forward(self, input):\n out_A = self.encoder_A(input[0])\n out_B = self.encoder_B(input[1])\n share_A = self.share_encoder(out_A)\n share_B = self.share_encoder(out_B)\n z_A = torch.randn(share_A.size()).to(device) + share_A\n z_B = torch.randn(share_B.size()).to(device) + share_B\n z_A = self.share_decoder(z_A)\n z_A = self.share_activation(z_A)\n z_A =self.batchnorm_A(z_A)\n z_B = self.share_decoder(z_B)\n z_B = self.share_activation(z_B)\n z_B = self.batchnorm_B(z_B)\n feature_A_rec = self.decoder_A(z_A)\n feature_B_rec = self.decoder_B(z_B)\n A_to_B = self.decoder_B(z_A)\n B_to_A = self.decoder_A(z_B)\n B_z = self.encoder_B(A_to_B)\n B_z_share = self.share_encoder(B_z)\n B_z = B_z_share + torch.randn(B_z_share.size()).to(device)\n A_z = self.encoder_A(B_to_A)\n A_z_share = self.share_encoder(A_z)\n A_z = A_z_share + torch.randn(A_z_share.size()).to(device)\n feature_A_cycle_share = self.share_decoder(A_z)\n feature_B_cycle_share = self.share_decoder(B_z)\n feature_A_cycle_share = self.share_activation(feature_A_cycle_share)\n feature_B_cycle_share = self.share_activation(feature_B_cycle_share)\n feature_A_cycle = self.decoder_A(feature_A_cycle_share)\n feature_B_cycle = self.decoder_B(feature_B_cycle_share)\n return (feature_A_rec, feature_B_rec, feature_A_cycle, feature_B_cycle, share_A, share_B,\n A_z_share, B_z_share)\n\nloss = nn.L1Loss()\nmodel = Cycle_VAE()\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\nmodel = model.to(device)\n\n\n\n\ndef compute_kl(mu):\n mu_2 = torch.pow(mu, 2)\n encoding_loss = torch.mean(mu_2)\n return encoding_loss\nfor epoch in range(num_epoch):\n train_loss_list = []\n for batch, input in enumerate(train_loader):\n optimizer.zero_grad()\n input[0] = input[0].squeeze(dim=1)\n input[1] = input[1].squeeze(dim=1)\n input[0] = input[0].to(device)\n input[1] = input[1].to(device)\n cache = model(input)\n A = input[0]\n B = input[1]\n Vae_loss = loss(cache[0], A) + loss(cache[1], B)\n Cycle_loss = loss(cache[2], A) + loss(cache[3], B)\n KL_A_loss = compute_kl(cache[4])\n KL_B_loss = compute_kl(cache[5])\n KL_A_Rec_Loss = compute_kl(cache[6])\n KL_B_Rec_Loss = compute_kl(cache[7])\n total_loss = Vae_loss.item() * recon_w + Cycle_loss.item() * recon_cycle_w + \\\n (KL_A_loss.item() + KL_B_loss.item()) * kl_w + \\\n (KL_A_Rec_Loss + KL_B_Rec_Loss) * kl_cycle_w\n total_loss.backward()\n optimizer.step()\n train_loss_list.append(total_loss)\n logger.scalar_summary('Train_Loss', total_loss, epoch + 1)\n\n if epoch%5 == 0:\n with torch.no_grad():\n loss_list = []\n Vae_loss_list = []\n Cycle_loss_list = []\n KL_B_loss_list = []\n KL_A_loss_list = []\n KL_B_Rec_Loss_list = []\n KL_A_Rec_Loss_list = []\n for batch, input in enumerate(valid_loader):\n input[0] = input[0].squeeze(dim=1)\n input[1] = input[1].squeeze(dim=1)\n A = input[0].to(device)\n B = input[1].to(device)\n cache = model([A, B])\n Vae_loss = loss(cache[0], A) + loss(cache[1], B)\n Cycle_loss = loss(cache[2], A) + loss(cache[3], B)\n KL_A_loss = compute_kl(cache[4])\n KL_B_loss = compute_kl(cache[5])\n KL_A_Rec_Loss = compute_kl(cache[6])\n KL_B_Rec_Loss = compute_kl(cache[7])\n total_loss = Vae_loss.item()*recon_w + Cycle_loss.item()*recon_cycle_w\\\n + (KL_A_loss.item() + KL_B_loss.item())*kl_w + \\\n (KL_A_Rec_Loss.item() + KL_B_Rec_Loss.item())*kl_cycle_w\n loss_list.append(total_loss)\n Vae_loss_list.append(Vae_loss.item())\n Cycle_loss_list.append(Cycle_loss.item())\n KL_A_loss_list.append(KL_A_loss.item())\n KL_B_loss_list.append(KL_B_loss.item())\n KL_A_Rec_Loss_list.append(KL_A_Rec_Loss.item())\n KL_B_Rec_Loss_list.append(KL_B_Rec_Loss.item())\n valid_loss = sum(loss_list)/len(loss_list)\n Vae_loss_data = sum(Vae_loss_list)/len(Vae_loss_list)\n Cycle_loss_data = sum(Cycle_loss_list)/len(Cycle_loss_list)\n KL_A_loss_data = sum(KL_A_loss_list)/len(KL_A_loss_list)\n KL_B_loss_data = sum(KL_B_loss_list)/len(KL_B_loss_list)\n KL_A_Rec_Loss_data = sum(KL_A_Rec_Loss_list)/len(KL_A_Rec_Loss_list)\n KL_B_Rec_Loss_data = sum(KL_B_Rec_Loss_list)/len(KL_B_Rec_Loss_list)\n logger.scalar_summary('Valid_Loss', valid_loss, epoch + 1)\n logger.scalar_summary('Valid_Vae_Loss', Vae_loss_data, epoch + 1)\n logger.scalar_summary('Valid_Cycle_Loss', Cycle_loss_data, epoch + 1)\n logger.scalar_summary('Valid_KL_A_Loss', KL_A_loss_data, epoch + 1)\n logger.scalar_summary('Valid_KL_B_Loss', KL_B_loss_data, epoch + 1)\n logger.scalar_summary('Valid_KL_A_Rec_Loss', KL_A_Rec_Loss_data, epoch+1)\n logger.scalar_summary('Valid_KL_B_Rec_Loss', KL_B_Rec_Loss_data, epoch+1)\n print('when epoch is ' + str(epoch+1)+\" loss is: \" + str(valid_loss))\n model.train()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Cycle_VAE_SGD.py","file_name":"Cycle_VAE_SGD.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"593137924","text":"from thing import Thing\n# # or import *\n# # as opposed to:\n# import thing\n\n# create another thing and save it\nbar = Thing(\"bar\")\nbar.save(\"bar.pickle\")\n\n# load the two things from their files\nbaz = Thing.load(\"bar.pickle\")\n\n# class Thing(object):\n# pass\n\nfoo = Thing.load(\"foo.pickle\")\n\n# show the things\nprint(foo)\nprint(bar)\nprint(baz)\n","sub_path":"coding_web_api_etc/pickle/thingtest_1.py","file_name":"thingtest_1.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"10536439","text":"#!/usr/bin/python3\nfrom configparser import ConfigParser\nimport requests, json, time,os\n\nPATH = os.getcwd()\n\"\"\"------------GET TRM API CREDENTIALS----------- \"\"\"\nPARSER = ConfigParser()\nPARSER.read('conf.ini')\nUSERNAME = PARSER.get('trm_api','trm_username')\nAPIKEY = PARSER.get('trm_api','trm_apikey')\nTRMURL = 'https://api.trumpia.com/rest/v1/'+USERNAME+'/'\nHEADER = {\n 'Content-Type': 'application/json',\n 'x-apikey': APIKEY\n}\nMETHOD = ['PUT','POST','GET','DELETE']\nFUNCTION = ['subscription','report']\n\nclass Trumpia:\n\n def __init__(self):\n pass\n\n def getSearchSubscription(self,mobile_number):\n retry = 0\n if len(mobile_number) <= 0:\n print(mobile_number)\n return\n while retry < 3:\n try:\n response = requests.request(METHOD[2],TRMURL + FUNCTION[0]+'/'+'search?search_type=2&search_data='+mobile_number,headers = HEADER)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('Error: {}'.format(str(err)))\n except requests.exceptions.Timeout as err_time:\n print('Error: {}'.format(str(err_time)))\n\n if response.status_code == 200:\n json_response = response.json()\n if 'status_code' in json_response:\n status_code = json_response['status_code']\n print('trumpia_Utility | Status code: {}'.format(status_code))\n subscription_status = self.subscriptionStatusCodes(status_code)\n print('trumpia_Utility | {}'.format(status_code + ': ' + subscription_status))\n resp = str(status_code) + ': ' +str(subscription_status)\n return resp\n elif 'subscription_id_list' in json_response:\n print('trumpia_Utility | {}'.format(json_response))\n return json_response\n else:\n pass\n\n else:\n retry+=1\n print('Retry: {}'.format(retry))\n\n\n def getSubscription(self,subscription_id):\n retry = 0\n if len(subscription_id) <= 0:\n print(subscription_id)\n return\n while retry < 3:\n try:\n response = requests.request(METHOD[2],TRMURL + FUNCTION[0]+'/'+subscription_id,headers = HEADER)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('trumpia_Utility | Error: {}'.format(str(err)))\n except requests.exceptions.Timeout as err_time:\n print('trumpia_Utility | Error: {}'.format(str(err_time)))\n\n if response.status_code == 200:\n json_response = response.json()\n print(json_response)\n print('trumpia_Utility GET SUB BY ID| json_response: {}'.format(json_response))\n return json_response\n else:\n retry+=1\n print('trumpia_Utility | Retry: {}'.format(retry))\n\n\n def putSubscription(self,body):\n retry = 0\n if len(body) <= 0:\n print(body)\n return\n while retry < 3:\n try:\n response = requests.request(METHOD[0],TRMURL + FUNCTION[0],json = body,headers = HEADER)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('trumpia_Utility | Error: {}'.format(str(err)))\n except requests.exceptions.Timeout as err_time:\n print('trumpia_Utility | Error: {}'.format(str(err_time)))\n\n if response.status_code == 200:\n json_response = response.json()\n print(json_response)\n request_id = json_response['request_id']\n print('trumpia_Utility | Request id: {}'.format(request_id))\n return request_id\n else:\n retry+=1\n print('trumpia_Utility | Retry: {}'.format(retry))\n\n def postSubscription(self,subscription_id,body):\n retry = 0\n if len(str(subscription_id)) <= 0 or len(body) <= 0:\n print(subscription_id)\n return\n while retry < 3:\n try:\n response = requests.request(METHOD[1],TRMURL + FUNCTION[0]+'/'+str(subscription_id),json = body,headers = HEADER)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('trumpia_Utility | Error: {}'.format(str(err)))\n except requests.exceptions.Timeout as err_time:\n print('trumpia_Utility | Error: {}'.format(str(err_time)))\n\n if response.status_code == 200:\n json_response = response.json()\n print(json_response)\n request_id = json_response['request_id']\n print('trumpia_Utility | Request id: {}'.format(request_id))\n return request_id\n else:\n retry+=1\n print('trumpia_Utility | Retry: {}'.format(retry))\n\n def getStatusReport(self,request_id):\n retry = 0\n if len(request_id) <= 0:\n print(request_id)\n return\n while retry < 3:\n try:\n response = requests.request(METHOD[2],TRMURL + FUNCTION[1]+'/'+request_id,headers = HEADER)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('trumpia_Utility | Error: {}'.format(str(err)))\n except requests.exceptions.Timeout as err_time:\n print('trumpia_Utility | Error: {}'.format(str(err_time)))\n\n if response.status_code == 200:\n json_response = response.json()\n if 'status_code' in json_response:\n status_code = json_response['status_code']\n print('trumpia_Utility | Status code: {}'.format(status_code))\n subscription_status = self.subscriptionStatusCodes(status_code)\n print(status_code + ': ' + subscription_status)\n return\n if 'subscription_id' in json_response:\n subscription_id = json_response['subscription_id']\n print('trumpia_Utility |POST SUBSCRIPTION ID: {}'.format(subscription_id))\n return subscription_id\n\n for data in json_response:\n #FAILED PUT SUBSCRIPTION\n if 'status_code' in data:\n status_code = data['status_code']\n print('trumpia_Utility |Status code: {}'.format(status_code))\n subscription_status = self.subscriptionStatusCodes(status_code)\n print(status_code + ': ' + subscription_status)\n #SUCESSFUL PUT SUBSCRIPTION\n if 'subscription_id' in data:\n subscription_id = data['subscription_id']\n print('SUCCESS PUT SUBSCRIPTION ID: {}'.format(subscription_id))\n return subscription_id\n break\n else:\n retry+=1\n print('Retry: {}'.format(retry))\n\n def subscriptionStatusCodes(self,status_code):\n file = open(PATH+'/trumpia_Utility/subscriptionStatusCodes.txt','r')\n subscription_status_codes = {}\n for codes in file:\n key, value = codes.split('\\t')\n subscription_status_codes[key] = value\n if status_code in subscription_status_codes:\n subscription_status = subscription_status_codes[status_code]\n return subscription_status\n if status_code == \"MPCE4001\":\n subscription_status = \"MPCE4001\"\n return subscription_status\n else:\n print('Status code: {} not in subscriptionStatusCodes'.format(status_code))\n subscription_status = \"UKNOWN\"\n return subscription_status\n","sub_path":"trumpia_Utility/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"580355333","text":"#RPSLS By James Haywood\nimport random\nglobal points\npoints = 0\ndef game():\n global points\n print (\"Points:\", points, \"\")\n print(\"Please enter a symbol.\")\n print(\"Options: Rock, Paper, Scissors, Lizard, or Spock\")\n player = input(\"> \")\n\n if player in (\"Rock\", \"rock\"):\n symbol = \"rock\"\n elif player in (\"Paper\", \"paper\"):\n symbol = \"paper\"\n elif player in (\"Scissors\", \"scissors\"):\n symbol = \"scissors\"\n elif player in (\"Lizard\", \"lizard\"):\n symbol = \"lizard\"\n elif player in (\"Spock\", \"spock\"):\n symbol = \"scissors\"\n else:\n print(\"Not a valid option.\")\n game()\n\n print (\"Computer is choosing...\")\n computer = (random.randint(1, 5))\n\n if computer == 1:\n csymbol = \"rock\"\n elif computer == 2:\n csymbol = \"paper\"\n elif computer == 3:\n csymbol = \"scissors\"\n elif computer == 4:\n csymbol = \"lizard\"\n elif computer == 5:\n csymbol = \"spock\"\n\n\n print(\"You chose:\", symbol, \"\")\n print(\"Computer chose:\", csymbol, \"\")\n if symbol == \"rock\" and csymbol in (\"lizard\", \"scissors\"):\n print (\"Player wins!\")\n points = points + 1\n game()\n elif symbol == \"paper\" and csymbol in (\"rock\", \"spock\"):\n print (\"Player wins!\")\n points = points + 1\n game()\n elif symbol == \"scissors\" and csymbol in (\"paper\", \"lizard\"):\n print (\"Player wins!\")\n points = points + 1\n game()\n elif symbol == \"lizard\" and csymbol in (\"paper\", \"spock\"):\n print (\"Player wins!\")\n points = points + 1\n game()\n elif symbol == \"spock\" and csymbol in (\"rock\", \"scissors\"):\n print (\"Player wins!\")\n points = points + 1\n game()\n elif symbol == csymbol:\n print (\"Player and Computer tie!\")\n game()\n else:\n print (\"Computer wins!\")\n points = points - 1\n game()\n\n\n\ngame()\n \n","sub_path":"8th Grade CompSci (Python)/14 RPSLS.py","file_name":"14 RPSLS.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"437400119","text":"from datetime import datetime\n\n# 날짜 포맷\nnow = datetime.now()\nstamp = now.strftime('%Y-%m-%d')\n\n# 학생 명단\nstudents = [\"김용열\", \"김상범\", \"박순우\", \"이준범\", \"정회웅\"]\n\n# 출결 클래스\nclass Student(object):\n\tdef __init__(self, name):\n\t\tself.name = name\n\nclass Attendance(object):\n\tattendance = {}\n\tdef attend(self, name):\n\t\tif name in students:\n\t\t\tself.attendance[name] = stamp\n\t\t\tprint (\" %s님 출석되었습니다. | 출석일자: %s \" % (name, stamp))\n\t\telse:\n\t\t\tprint (\"이름을 확인해 주세요.\")\n\tdef list(self):\n\t\tfor name in self.attendance:\n\t\t\tprint (\"이름: %s, 출석일자: %s\" % (name, stamp))\n\n","sub_path":"peter_attendance2.py","file_name":"peter_attendance2.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"609406371","text":"from tkinter import *\nfrom tkinter import ttk\n\n\nclass LogMatchFrame(Frame):\n def __init__(self, master, stopwatch, log_function):\n super().__init__(master)\n self.stopwatch = stopwatch\n self.function = log_function\n\n self.won_label = Label(self, text='Won?')\n self.won = BooleanVar()\n self.won.set(True)\n self.win = Radiobutton(self, text='Win', variable=self.won, value=True)\n self.lose = Radiobutton(self, text='Lose', variable=self.won, value=False)\n\n self.first_label = Label(self, text='First?')\n self.is_first = BooleanVar()\n self.is_first.set(True)\n self.first = Radiobutton(self, text='First', variable=self.is_first, value=True)\n self.second = Radiobutton(self, text='Second', variable=self.is_first, value=False)\n\n self.against_label = Label(self, text='Against?')\n self.clan = IntVar()\n self.clan.set(1)\n self.clan_frame = Frame(self)\n self.forest = Radiobutton(self.clan_frame, text='Forest', variable=self.clan, value=1)\n self.sword = Radiobutton(self.clan_frame, text='Sword', variable=self.clan, value=2)\n self.rune = Radiobutton(self.clan_frame, text='Rune', variable=self.clan, value=3)\n self.dragon = Radiobutton(self.clan_frame, text='Dragon', variable=self.clan, value=4)\n self.shadow = Radiobutton(self.clan_frame, text='Shadow', variable=self.clan, value=5)\n self.blood = Radiobutton(self.clan_frame, text='Blood', variable=self.clan, value=6)\n self.haven = Radiobutton(self.clan_frame, text='Haven', variable=self.clan, value=7)\n self.portal = Radiobutton(self.clan_frame, text='Portal', variable=self.clan, value=8)\n\n self.duration = self.stopwatch.get_duration()\n self.duration_label = Label(self,\n text='Duration: {:02d}:{:02d}'.format(int(self.duration / 60),\n int(self.duration % 60)))\n\n self.log_button = ttk.Button(self, text='Log', command=self.log)\n\n self.adjust_widgets()\n\n def log(self):\n self.function(self.won.get(), self.clan.get(), self.is_first.get(), self.duration)\n\n def adjust_widgets(self):\n self.won_label.grid(row=0, column=0, columnspan=2, pady=5)\n self.win.grid(row=1, column=0)\n self.lose.grid(row=1, column=1)\n\n self.first_label.grid(row=2, column=0, columnspan=2, pady=5)\n self.first.grid(row=3, column=0)\n self.second.grid(row=3, column=1)\n\n self.against_label.grid(row=4, column=0, columnspan=2, pady=5)\n self.clan_frame.grid(row=5, column=0, columnspan=2)\n\n self.forest.grid(row=0, column=0)\n self.sword.grid(row=0, column=1)\n self.rune.grid(row=0, column=2)\n self.dragon.grid(row=0, column=3)\n self.shadow.grid(row=1, column=0)\n self.blood.grid(row=1, column=1)\n self.haven.grid(row=1, column=2)\n self.portal.grid(row=1, column=3)\n\n self.duration_label.grid(row=6, column=0, columnspan=2, sticky=N+E+W+S, pady=5)\n\n self.log_button.grid(row=7, column=0, columnspan=2, sticky=N+E+W+S, padx=5, pady=5)","sub_path":"sv_tracker/gui/log_match_frame.py","file_name":"log_match_frame.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"358530791","text":"class Solution:\n def smallerNumbersThanCurrent(self, nums: [int]) -> [int]:\n result = [0 for i in nums]\n\n import numpy\n sorted_nums = numpy.argsort(numpy.asarray(nums)) \n\n for i in range( 1, len(sorted_nums) ):\n if nums[ sorted_nums[i] ] == nums[ sorted_nums[i-1] ]:\n result[ sorted_nums[i] ] = result[ sorted_nums[i-1] ]\n else:\n result[ sorted_nums[i] ] = i\n\n return result\n\n\nobj = Solution()\nprint([4,0,1,1,3], obj.smallerNumbersThanCurrent([8,1,2,2,3] ) )\nprint([2,1,0,3], obj.smallerNumbersThanCurrent([6,5,4,8] ) )\nprint([0,0,0,0], obj.smallerNumbersThanCurrent([7,7,7,7] ) ) ","sub_path":"Weekly Contest 178/1365. How Many Numbers Are Smaller Than the Current Number.py","file_name":"1365. How Many Numbers Are Smaller Than the Current Number.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"439139822","text":"import src.encryptor as encryptor\n\n# English letters frequency\nfrequencies = {\n \"a\": 0.0817,\n \"b\": 0.0149,\n \"c\": 0.0278,\n \"d\": 0.0425,\n \"e\": 0.127,\n \"f\": 0.0229,\n \"g\": 0.0202,\n \"h\": 0.0609,\n \"i\": 0.0697,\n \"j\": 0.0015,\n \"k\": 0.0077,\n \"l\": 0.0403,\n \"m\": 0.0241,\n \"n\": 0.0675,\n \"o\": 0.0751,\n \"p\": 0.0193,\n \"q\": 0.001,\n \"r\": 0.0599,\n \"s\": 0.0633,\n \"t\": 0.0906,\n \"u\": 0.0276,\n \"v\": 0.0098,\n \"w\": 0.0236,\n \"x\": 0.0015,\n \"y\": 0.0197,\n \"z\": 0.0007\n}\n\n\ndef frequency_analysis(text, key):\n test_frequency = {}\n text = encryptor.crypt(text, key)\n for letter in text:\n letter = letter.lower()\n if letter in frequencies and letter not in test_frequency:\n test_frequency[letter] = text.count(letter)/len(text)\n delta = 0\n for key, value in test_frequency.items():\n delta += abs(frequencies[key] - value)\n return delta\n\n\ndef analysis(text):\n keys = {}\n for i in range(1, len(encryptor.alphabet)):\n keys[i] = frequency_analysis(text, -i)\n sorted_keys = sorted(keys.items(), key=lambda kv: kv[1])\n return sorted_keys[0][0]\n\n\ndef decrypt():\n text = encryptor.read_file(encryptor.cipher_url)\n key = analysis(text)\n open_text = encryptor.crypt(text, -key)\n encryptor.write_file(encryptor.open_text_url, open_text)\n print(\"Decrypted with key\", key)\n\n\nif __name__ == '__main__':\n decrypt()\n\n\n","sub_path":"src/decryptor.py","file_name":"decryptor.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"65247886","text":"import time\n\nimport razorpay\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.template.context_processors import csrf\n\nfrom .models import Orders\nfrom django.http import HttpResponse\n\n\nclient = razorpay.Client(auth=(\"rzp_test_Vg4TId4a7WErvz\", \"eiTX74Hru52sMVOs2Bp5HPJo\")) #basic authentication , without auth , will never pass the gateway, between merchnact and razorpay\n\n\n\nkey_id ='rzp_test_Vg4TId4a7WErvz'\nkey_secret='eiTX74Hru52sMVOs2Bp5HPJo'\n\n# Create your views here.\ndef index(request):\n return render(request, 'pg/home.html')\n\ndef order(request):\n if request.method=='POST':\n name = request.POST.get('name','')\n address = request.POST.get('address','')\n email = request.POST.get('email','')\n contact = request.POST.get('contact','')\n amount = request.POST.get('amount','')\n order = Orders(name=name, address=address, email=email, contact=contact, amount=amount)\n #print(order.__dict__)\n amount_inr = int(amount)*100\n transfers= [\n {\n \"account\": \"acc_G6Arrf7RMyX7Rf\",\n \"amount\": 1000,\n \"currency\": \"INR\",\n \"notes\": {\n \"branch\": \"Acme Corp Bangalore North\",\n \"name\": \"Gaurav Kumar\"\n },\n \"linked_account_notes\": [\n \"branch\"\n ],\n \"on_hold\": 1,\n \"on_hold_until\": 1671222870\n }\n ]\n\n # context = RequestContext(request)\n # context_dict = {}\n # context_dict.update(csrf(request))\n # print(context_dict, \"----00000-----\")\n\n dict={'order_id':\"1\", 'adress':address} #15 key value pairs optional, if merchant wants to send anything additional\n\n #DATA={'amount':amount_inr, 'currency':'INR', 'receipt':order.order_id, 'payment_capture':'1', 'notes':dict}\n DATA = {'amount': amount_inr, 'currency': 'INR', 'receipt': order.order_id,'payment_capture':'1',\n 'notes': dict, 'transfers':transfers}\n # DATA = {'amount': amount_inr, 'currency': 'INR', 'receipt': order.order_id, 'payment_capture': '1',\n # 'notes': dict}\n\n order_created= client.order.create(data=DATA) #dynamic order create\n\n print(order_created, type(order_created), \"-----order created-----\")\n global order_id_server, amount_paid\n order_id_server= order_created.get('id')\n #order_id_server = client.order.fetch('id')\n #order.save()\n print(order_id_server, \"-----\")\n amount_paid = int(order_created.get('amount')) /100\n para={ 'order_id' : order_created.get('id'),\n 'amount': order_created.get('amount_due'),\n 'currency': order_created.get('currency'),\n 'key_id':key_id,\n 'key_secret':key_secret}\n\n #return render(request, 'pg/callback.html', para, context_dict.get('csrf_token'))\n return render(request, 'pg/handlerfunc.html', para)\n #return render(request, 'pg/paymentpage.html', para)\n #return render(request, 'pg/manual_chcekout.html', para)\n\n\ndef payment(request):\n #payment notes, manual chcekout\n\n if request.method=='POST':\n payment_id = request.POST.get('razorpay_payment_id')\n print(payment_id, \"-----\")\n signature= request.POST.get('razorpay_signature')\n print(payment_id,order_id_server, signature, \"-----\")\n params_dict = {\n 'razorpay_order_id': order_id_server, #server_side if order id not passed still payment will be done and it will fail in verification step\n 'razorpay_payment_id': payment_id,\n 'razorpay_signature': signature\n }\n data = {'payment_id':'pay_G7nH5vQ10tl7S7'\n }\n\n transfers = client.transfer.all(data)\n print(transfers)\n print(dict(transfers), \"--------------transfers dict--------\")\n data={'payment_id':payment_id,\n 'transfers': [\n {\n \"account\": \"acc_G6Arrf7RMyX7Rf\",\n \"amount\": 1000,\n \"currency\": \"INR\",\n \"notes\": {\n \"branch\": \"Acme Corp Bangalore North\",\n \"name\": \"Gaurav Kumar\"\n },\n \"linked_account_notes\": [\n \"branch\"\n ],\n \"on_hold\": 1,\n \"on_hold_until\": 1671222870\n }\n ]}\n transfers= client.transfer.create(data)\n print(\"=====transfer===\", transfers)\n\n verified= client.utility.verify_payment_signature(params_dict) #None\n print(verified, \"----Verified---\")\n user_details ={\n 'order_id_server':order_id_server,\n 'amount_paid':amount_paid,\n 'payment_id':payment_id\n }\n\n return render(request, 'pg/done.html', user_details)\n\ndef manual(request):\n if request.is_ajax():\n message = \"Yes, AJAX!\"\n else:\n message = \"Not Ajax\"\n return HttpResponse(message)\n\ndef reverse(request):\n if request.method=='POST':\n payment_id = request.POST.get('payment_id')\n order_id = request.POST.get('order_id')\n data={'payment_id': payment_id}\n print(payment_id,\"line 147\")\n print(order_id)\n a=client.transfer.all(data)\n print(a,\"line 150\")\n for i in a['items']:\n if(i['source']==order_id):\n print(i['id'])\n trf_id=i['id']\n data1={'amount':1000}\n b=client.transfer.reverse(trf_id,data1)\n print(b)\n print(\"reversed\")\n # data={\"amount\": 100,\n # \"reverse_all\": 1}\n # a=client.transfer.reverse(payment_id, data)\n # print(a)\n return HttpResponse('

found

')\n\ndef create_plan(request):\n data = {\n \"period\": \"weekly\",\n \"interval\": 1,\n \"item\": {\n \"name\": \"Test plan - Weekly\",\n \"amount\": 69900,\n \"currency\": \"INR\",\n \"description\": \"Description for the test plan\"\n },\n \"notes\": {\n \"notes_key_1\": \"Tea, Earl Grey, Hot\",\n \"notes_key_2\": \"Tea, Earl Grey… decaf.\"\n }\n}\n create_plan = client.plan.create(data)\n print(create_plan, \"plan--------\")\n return(create_plan)\n\ndef subscription(request):\n #plan = create_plan(request)\n if request.method=='POST':\n name = request.POST.get('name','')\n address = request.POST.get('address','')\n email = request.POST.get('email','')\n contact = request.POST.get('contact','')\n amount = request.POST.get('amount','')\n Data={\n #\"plan_id\": plan.get('id'), #plan_id dynamic\n \"plan_id\":\"plan_GA43WyGstP08Ve\",\n \"total_count\": 6,\n \"quantity\": 1,\n \"customer_notify\": 1,\n \"addons\": [\n {\n \"item\": {\n \"name\": \"Registration-one time\",\n \"amount\": 10000,\n \"currency\": \"INR\"\n }\n }\n ],\n \"notes\": {\n \"notes_key_1\": \"Tea, Earl Grey, Hot\",\n \"notes_key_2\": \"Tea, Earl Grey… decaf.\"\n }\n }\n\n subscription_create = client.subscription.create(data=Data)\n print(subscription_create, \"=====subscription====\")\n para={'order_id' : subscription_create.get('id'),\n 'plan_id': subscription_create.get('plan_id'),\n 'key_id':key_id,\n 'key_secret':key_secret}\n print(para, \"para-----\")\n return render(request, 'pg/handler_subscription.html', para)\n","sub_path":"paymentgateway/pg/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"630730864","text":"#load_save.py\r\nimport g,ctry\r\n\r\nloaded=[] # list of strings\r\n\r\ndef load(f):\r\n global loaded\r\n try:\r\n for line in f.readlines():\r\n loaded.append(line)\r\n except:\r\n pass\r\n\r\ndef save(f):\r\n for c in g.answers:\r\n f.write(c+'\\n')\r\n\r\n# note need for rstrip() on strings\r\ndef retrieve():\r\n global loaded\r\n if len(loaded)>0:\r\n g.answers=[]; v=65\r\n for line in loaded:\r\n lne=line.rstrip()\r\n g.answers.append(lne)\r\n if lne!='' and lne!='none': ctry.text(chr(v),lne)\r\n v+=1\r\n\r\n\r\n \r\n","sub_path":"load_save.py","file_name":"load_save.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"8911202","text":"model_params = dict(\n image_shape=(1, 256, 256),\n n_part_caps=30,\n n_obj_caps=16,\n scae_regression_params=dict(\n is_active=True,\n loss='mse',\n attention_hp=1,\n ),\n scae_classification_params=dict(\n is_active=False,\n n_classes=1,\n ),\n pcae_cnn_encoder_params=dict(\n out_channels=[128] * 4,\n kernel_sizes=[3, 3, 3, 3],\n strides=[2, 2, 1, 1],\n activate_final=True\n ),\n pcae_encoder_params=dict(\n n_poses=6,\n n_special_features=16,\n similarity_transform=False,\n ),\n pcae_template_generator_params=dict(\n template_size=(32, 32),\n template_nonlin='sigmoid',\n colorize_templates=True,\n color_nonlin='sigmoid',\n ),\n pcae_decoder_params=dict(\n learn_output_scale=False,\n use_alpha_channel=True,\n background_value=True,\n ),\n ocae_encoder_set_transformer_params=dict(\n n_layers=3,\n n_heads=1,\n dim_hidden=16,\n dim_out=256,\n layer_norm=True,\n ),\n obj_age_regressor_params=dict(\n hidden_sizes=[128, 64, 1],\n inner_activation='relu',\n final_activation=None,\n bias=True,\n dropout=0,\n ),\n ocae_decoder_capsule_params=dict(\n dim_caps=32,\n hidden_sizes=(128,),\n caps_dropout_rate=0.0,\n learn_vote_scale=True,\n allow_deformations=True,\n noise_type='uniform',\n noise_scale=4.,\n similarity_transform=False,\n ),\n scae_params=dict(\n vote_type='enc',\n presence_type='enc',\n stop_grad_caps_input=True,\n stop_grad_caps_target=True,\n caps_ll_weight=1.,\n cpr_dynamic_reg_weight=10,\n prior_sparsity_loss_type='l2',\n prior_within_example_sparsity_weight=2.0,\n prior_between_example_sparsity_weight=0.2, #from 0.35 to 0.2 \n posterior_sparsity_loss_type='entropy',\n posterior_within_example_sparsity_weight=0.5, #from 0.7 to 0.5 \n posterior_between_example_sparsity_weight=0.2,\n reconstruct_alternatives=False,\n )\n)\n","sub_path":"torch_scae_experiments/boneage/hparams.py","file_name":"hparams.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"91943504","text":"import os\nimport os.path\nimport numpy as np\nimport copy\n\nimport torch\n\nfrom .base import BaseDataset\nfrom . import augmentation as psp_trsform\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nimport random\n\n\n\nclass city_dset(BaseDataset):\n def __init__(self, data_root, data_list, trs_form, seed, n_sup, split='val', unsup = False, coarse=False, coarse_num=3000, fm=False, acp=False, paste_trs=None, prob=0.5, acm=False):\n super(city_dset, self).__init__(data_list)\n self.data_root = data_root\n self.transform = trs_form\n self.paste_trs = paste_trs\n self.fm = fm\n self.acp = acp and split == 'train'\n self.prob = prob\n self.acm = acm\n random.seed(seed)\n if(len(self.list_sample)>=n_sup) and split =='train':\n if unsup and not coarse:\n self.list_sample_new = random.sample(self.list_sample,n_sup)\n # transform to tuple\n for i in range(len(self.list_sample)):\n self.list_sample[i]=tuple(self.list_sample[i])\n for i in range(len(self.list_sample_new)):\n self.list_sample_new[i]=tuple(self.list_sample_new[i])\n self.list_sample_unsup = list(set(self.list_sample)-set(self.list_sample_new))\n self.list_sample_new = self.list_sample_unsup\n elif unsup and coarse:\n try:\n coarse_list = data_list.replace('fine_train','coarse_train')\n self.list_sample_new = [line.strip().split(\" \") for line in open(coarse_list, 'r')]\n except:\n coarse_list = data_list.replace('fine_trainval','coarse_train')\n self.list_sample_new = [line.strip().split(\" \") for line in open(coarse_list, 'r')]\n random.seed(seed)\n if coarse_num < len(self.list_sample_new):\n self.list_sample_new = random.sample(self.list_sample_new, coarse_num)\n else:\n random.shuffle(self.list_sample_new)\n else:\n self.list_sample_new = random.sample(self.list_sample, n_sup) \n else:\n self.list_sample_new = self.list_sample\n\n def __getitem__(self, index):\n # load image and its label\n image_path = os.path.join(self.data_root, self.list_sample_new[index][0])\n label_path = os.path.join(self.data_root, self.list_sample_new[index][1])\n image = self.img_loader(image_path, 'RGB')\n label = self.img_loader(label_path, 'L')\n\n # loader paste img and mask\n if self.acp:\n if random.random() > self.prob:\n paste_idx = random.randint(0, self.__len__()-1)\n paste_img_path = os.path.join(self.data_root, self.list_sample_new[paste_idx][0])\n paste_img = self.img_loader(paste_img_path, 'RGB')\n paste_label_path = os.path.join(self.data_root, self.list_sample_new[paste_idx][1])\n paste_label = self.img_loader(paste_label_path, 'L')\n paste_img, paste_label = self.paste_trs(paste_img, paste_label)\n else:\n paste_img, paste_label = None, None\n\n if self.fm:\n inputs = self.transform(image, label)\n if len(inputs) == 5:\n image_weak, label_weak, image_strong, label_strong, valid = inputs\n return image_weak[0], label_weak[0,0].long(), image_strong[0], label_strong[0,0].long(), valid[0,0].long()\n else:\n image, label, valid = inputs\n return image[0], label[0,0].long(),valid[0,0].long()\n \n elif self.acm:\n image, label = self.transform(image, label)\n return image[0], label[0, 0].long(), index\n else:\n image, label = self.transform(image, label)\n\n if self.acp:\n if paste_img is not None:\n return torch.cat((image[0], paste_img[0]),dim=0), torch.cat([label[0,0].long(), paste_label[0,0].long()],dim=0)\n else:\n h, w = image[0].shape[1], image[0].shape[2]\n paste_img = torch.zeros(3, h, w)\n paste_label = torch.zeros(h,w)\n return torch.cat((image[0], paste_img),dim=0), torch.cat([label[0,0].long(), paste_label.long()],dim=0)\n \n return image[0], label[0, 0].long()\n\n def __len__(self):\n return len(self.list_sample_new)\n\ndef build_transfrom(cfg, fm=False, acp=False):\n trs_form = []\n mean, std, ignore_label = cfg['mean'], cfg['std'], cfg['ignore_label']\n trs_form.append(psp_trsform.ToTensor())\n trs_form.append(psp_trsform.Normalize(mean=mean, std=std))\n if cfg.get('resize', False):\n trs_form.append(psp_trsform.Resize(cfg['resize']))\n if cfg.get('rand_resize', False):\n if not acp:\n trs_form.append(psp_trsform.RandResize(cfg['rand_resize']))\n else:\n trs_form.append(psp_trsform.RandResize(cfg['acp']['rand_resize']))\n if cfg.get('rand_rotation', False):\n rand_rotation = cfg['rand_rotation']\n trs_form.append(psp_trsform.RandRotate(rand_rotation, ignore_label=ignore_label))\n if cfg.get('GaussianBlur', False) and cfg['GaussianBlur']:\n trs_form.append(psp_trsform.RandomGaussianBlur())\n if cfg.get('flip', False) and cfg.get('flip'):\n trs_form.append(psp_trsform.RandomHorizontalFlip())\n if cfg.get('crop', False):\n crop_size, crop_type = cfg['crop']['size'], cfg['crop']['type']\n trs_form.append(psp_trsform.Crop(crop_size, crop_type=crop_type, ignore_label=ignore_label))\n if fm and cfg.get('cutout', False):\n n_holes, length = cfg['cutout']['n_holes'], cfg['cutout']['length']\n trs_form.append(psp_trsform.Cutout(n_holes=n_holes, length=length))\n if fm and cfg.get('cutmix', False):\n n_holes, prop_range = cfg['cutmix']['n_holes'], cfg['cutmix']['prop_range']\n trs_form.append(psp_trsform.Cutmix(prop_range=prop_range,n_holes=n_holes))\n return psp_trsform.Compose(trs_form)\n\n\ndef build_city_semi_loader_cp(split, all_cfg, seed=0):\n cfg_dset = all_cfg['dataset']\n cfg_trainer = all_cfg['trainer']\n\n fm = True if 'cutout' in cfg_dset['train'].keys() or 'cutmix' in cfg_dset['train'].keys() else False\n acp = True if 'acp' in cfg_dset.keys() else False\n acm = cfg_dset['train'].get('acm', False)\n cfg = copy.deepcopy(cfg_dset)\n cfg.update(cfg.get(split, {}))\n\n workers = cfg.get('workers', 2)\n batch_size = cfg.get('batch_size', 1)\n n_sup = cfg.get('n_sup',2975)\n coarse = cfg.get('coarse', False)\n coarse_num = cfg.get('coarse_num', 3000)\n prob = cfg['acp'].get('prob',0.5)\n # build transform\n trs_form = build_transfrom(cfg)\n trs_form_unsup = build_transfrom(cfg, fm=fm)\n if acp:\n paste_trs = build_transfrom(cfg, acp=True)\n else:\n paste_trs = None\n dset = city_dset(cfg['data_root'], cfg['data_list'], trs_form, seed, n_sup, split, acp=acp, paste_trs=paste_trs, prob=prob)\n\n # build sampler\n sample = DistributedSampler(dset)\n loader = DataLoader(dset, batch_size=batch_size, num_workers=workers,\n sampler=sample, shuffle=False, pin_memory=False)\n\n #build sampler for unlabeled set\n dset_unsup = city_dset(cfg['data_root'], cfg['data_list'], trs_form_unsup, seed, n_sup, split, unsup=True, coarse=coarse, coarse_num=coarse_num, fm=fm, acm=acm)\n if split == 'train':\n sample_unsup = DistributedSampler(dset_unsup)\n loader_unsup = DataLoader(dset_unsup, batch_size=batch_size, num_workers=workers, sampler=sample_unsup, shuffle=False, pin_memory=False, drop_last=True)\n return loader, loader_unsup\n return loader","sub_path":"semseg/dataset/cityscapes_semi_cp.py","file_name":"cityscapes_semi_cp.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"307109189","text":"#!coding=utf-8\n\n\"\"\"\n生成excel的类\n\"\"\"\nimport pymysql\nimport openpyxl\n\nf6s_machine_CHOICES = {\n 0 : '爱夫卡合格',\n 1 : '爱夫卡不合格',\n 2 : '其他品牌合格',\n 3 : '其他品牌不合格',\n }\n\nf7s_machine_CHOICES = {\n 0 : '爱夫卡合格',\n 1 : '爱夫卡不合格',\n 2 : '其他品牌合格',\n 3 : '其他品牌不合格',\n 4 : 'PC版合格',\n 5 : 'PC版不合格',\n }\n\n\nclass MakeToExcel:\n def __init__(self, host_List, sql,type):\n self.host_list = host_List\n self.sql = sql\n self.type = type\n self.carclass_dict = ''\n self.cargroup_dict = ''\n\n def get_data(self):\n conn = pymysql.connect(self.host_list['hostname'], self.host_list['username'], self.host_list['password'],self.host_list['database'],charset='utf8')\n cur = conn.cursor()\n cur.execute(self.sql)\n self.data = cur.fetchall()\n cur.execute(\"\"\"select id,cnname from www_tbcarclass\"\"\")\n self.carclass_dict = dict(cur.fetchall())\n cur.execute(\"\"\"select id,cnname from www_tbcargroup\"\"\")\n self.cargroup_dict = dict(cur.fetchall())\n\n def to_execl(self, filename, data,title):\n wb = openpyxl.Workbook()\n ws = wb.active\n ws.append(title)\n for i in range(len(data)):\n if self.type == 'wenkong_zhuanjia':\n if not data[i][1]:continue\n if self.type == 'f6shuanji':\n modified_data = list(data[i])\n modified_data[9] = f6s_machine_CHOICES[modified_data[9]]\n ws.append(modified_data)\n continue\n if self.type == 'f7shuanji':\n modified_data = list(data[i])\n modified_data[9] = f7s_machine_CHOICES[modified_data[9]]\n ws.append(modified_data)\n continue\n if self.type == 'carlist':\n if not data[i][1]:continue\n if self.type == 'banbenfabu_list':\n modified_data = list(data[i])\n modified_data[4] = self.carclass_dict[modified_data[4]]\n modified_data[5] = self.cargroup_dict[modified_data[5]]\n if modified_data[0].endswith('0'):\n modified_data.extend('是')\n else:\n modified_data.extend('否')\n ws.append(modified_data)\n continue\n\n ws.append(data[i])\n wb.save(filename + '.xlsx')\n\n\nif __name__ == '__main__':\n host_list = {\n 'hostname': 'yun1.szfcar.com',\n 'username': 'root',\n 'password': 'fcar.8',\n 'database' : 'www',\n\n }\n\n ## 文控倒版本用的\n #sql = \"\"\"select (select name from www_tbmachine where id=machine_id),(select cnname from www_tbcarlist where id=carlist_id),(select cnname from www_tblang where id=lang_id),ver,chkdate from www_tbverlist where chkdate is not NULL ;\"\"\"\n\n ## f6s 换机\n #sql = \"\"\"select * from sh_tbreplace\"\"\"\n #title = ('id','换机日期','快递名称','快递单号','发货人','发货人电话','发货人地址','区域','市场人员','机器属性','机器型号','机器序列号','发货日期','发货数量','单价','金额','回款','备注','创建��间','修改时间',)\n ## f7s 换机\n #sql = \"\"\"select * from sh_tbreplacec\"\"\"\n #title = ('id', '换机日期', '快递名称', '快递单号', '发货人', '发货人电话', '发货人地址', '区域', '市场人员', '机器属性', '机器型号', '机器序列号', '发货日期', '发货数量', '单价','金额', '回款', '备注', '创建时间', '修改时间',)\n #sql = \"\"\"select (select name from www_tbmachine where id=machine_id),(select cnname from www_tbcarlist where id=carlist_id and cnname like \"%%专家%%\"),(select cnname from www_tblang where id=lang_id),ver,chkdate from www_tbverlist where chkdate is NULL ;\"\"\"\n title = ('版本号','发布日期','车型名','语言','车系','车组','是否为大版本')\n sql = \"\"\"select ver,datetime,(select cnname from www_tbcarlist where id=carlist_id),(select cnname from www_tblang where id=lang_id ),(select carclass_id from www_tbcarlist where id=carlist_id),(select cargroup_id from www_tbcarlist where id=carlist_id) from www_tbverlist where chkdate;\"\"\"\n p = MakeToExcel(host_list, sql,'banbenfabu_list')\n p.get_data()\n # for i in range(len(p.data)):\n # if p.data[i][4] == 3:\n # print(p.data[i])\n #print(sorted(p.carclass_dict))\n print(p.data)\n p.to_execl('ttt', p.data,title)\n\n","sub_path":"index/make_excel.py","file_name":"make_excel.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"233585691","text":"def get_data():\n with open(\"input-riceboard-bb43.txt\", \"r\") as f:\n board_data = []\n for line in f:\n board_data.append([int(x) for x in line.split()])\n return board_data\n \n \ndef sum_board(array):\n total_rice = (array[0]**(array[1]**2) - 1) // (array[0] - 1) % array[2]\n return int(total_rice)\n\n\ndef output_data(data):\n with open(\"Output.txt\", 'w') as f2:\n for i in data:\n f2.write(\"Case #{}: {}\\n\".format(i[0], i[1]))\n\n \ndef main():\n var = get_data()\n wasted_rice_cases = []\n for i in range(1, len(var)):\n print(\"dealing with {}\".format(i))\n wasted_rice_cases.append([i, sum_board(var[i])])\n output_data(wasted_rice_cases)\n\nif __name__ == '__main__':\n main()\n","sub_path":"RiceBoard.py","file_name":"RiceBoard.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"479079726","text":"###This python script accesses the staging folder stg_ias/Viewability, \n###and and inserts the data from the csv file to the azure tables viewability\n### The QA table for all UI automation scripts is 'jobsinfo'. This table stores the filename, number of rows in each file uploaded, beginning and end row key of each file inserted.###\n\nimport pandas as pd\nimport os\nfrom emailscraper import *\nimport time\nimport time\nimport uuid\nimport datetime\n\nTABLE_NAME = 'iasviewabilityraw' #iasviewabilityraw\nJOBS_TABLE_NAME = 'jobsinfo'\nTEST_FOLDER = r'/home/dananalytics/xbx-fy19-gamepass-3.0/environments/myenv/ui-data-automation/stg-ias/Viewability'\nSKIP_ROWS = 0\nos.chdir(TEST_FOLDER)\n\ndefine_table(TABLE_NAME)\ndefine_table(JOBS_TABLE_NAME)\n\n###from each flat file in the directory, read file contents into a dataframe, set row key position,\n### insert each row into table at start index position and increment index, save filename,start and end rowkey postion into jobsinfo table and return current position\ndef process_csv_file(filename, row_key_start_pos, headerIndex, skipRows):\n csv_df = pd.read_csv(filename, header=headerIndex, skiprows=SKIP_ROWS, skipfooter=1, engine='python')\n #csv_df.columns = csv_df.columns.str.replace(' ', '')\n current_pos = row_key_start_pos\n for i, row in csv_df.iterrows():\n insert_into_collection(current_pos, row)\n print(i, current_pos, row['Date'])\n current_pos = current_pos + 1\n save_rowKey_pos_to_DB(filename, row_key_start_pos, current_pos)\n return current_pos\n\n###define enity and and property names and values and insert or replace into entity/table\ndef insert_into_collection(rowkey, row):\n task = Entity()\n task.PartitionKey = row['Campaign']\n task.MediaPartner = row['Media Partner']\n task.RowKey = int(rowkey)\n task.Date = row['Date']\n #task.Campaign = row['Campaign']\n task.Placement = row['Placement']\n task.MeasuredAds = row['Measured Ads']\n task.ViewableAds = row['Viewable Ads']\n table_service.insert_or_replace_entity(TABLE_NAME, task)\n\n\n### this entity stores all the other tables' info: filename, number of rows inserted, start and end row of each file. Partition key here is the table name\ndef save_rowKey_pos_to_DB(filename, start_index, last_row_index):\n print(\"Saving rowKey pos : \" + filename + ' START: ' + str(start_index) + ' END: ' + str(last_row_index))\n task = Entity()\n task.PartitionKey = TABLE_NAME\n task.RowKey = str(uuid.uuid4())\n current_date = datetime.datetime.today().strftime(\"%m/%d/%Y\")\n task.Date = current_date\n task.filename = filename\n task.NumberOfRows = int(last_row_index) - int(start_index)\n task.start_index = int(start_index)\n task.last_index = int(last_row_index)\n table_service.insert_entity(JOBS_TABLE_NAME, task)\n\n### this function get the position of last row inserted into a table by querying the jobsinfo table and starts inserting new data at that position\ndef read_last_index_position_fromTable():\n rows = table_service._query_entities(JOBS_TABLE_NAME, \"PartitionKey eq'\" + TABLE_NAME + \"'\")\n jobs_df = pd.DataFrame(rows)\n if jobs_df.empty:\n # save_row_keys_toTable('xyz', 0, 0)\n return 0\n else:\n sorted_df = jobs_df.sort_values(by=['Timestamp'], ascending=False)\n # return sorted_df.last_index[0]\n return sorted_df.last_index.max()\n\n### Read each file from Folder, retrieve position of last row inserted and start inserting from that position\ndef process_csv_folder(folder_name):\n os.chdir(folder_name)\n index = read_last_index_position_fromTable()\n print(\"READ INDEX AS : \" + str(index))\n for item in os.listdir(folder_name):\n file_name = os.path.abspath(item)\n print(file_name)\n index = process_csv_file(file_name, index, 0, SKIP_ROWS)\n\nstart = time.time()\nprocess_csv_folder(TEST_FOLDER)\nprint(time.time() - start)\nprint('------------Upload to '+TABLE_NAME+' table Done---------------')\n","sub_path":"IAS_VA_to_azure_tables.py","file_name":"IAS_VA_to_azure_tables.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"110210653","text":"#append select link data for single link\n#Nagendra Dhakar, nagendra.dhakar@rsginc.com, 11/03/16\n\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n#Run from the tstep year outputs folder such as D:\\swim2\\scenario_16\\outputs\\t23\n#D:/swim2/model/lib/Python27/python.exe ..\\..\\model\\code\\visum\\append_selectlink_data.py\n\nimport os, sys\nimport pandas as pd\nimport numpy as np\nimport zipfile\nimport warnings\nfrom Properties import Properties\n\n#do not print warnings\nwarnings.filterwarnings(\"ignore\")\n\n#load properties\nif len(sys.argv) < 2:\n print(len(sys.argv))\n print(sys.argv)\n print(\"missing arguments!\")\n sys.exit(1)\n\nproperty_file = sys.argv[1]\nproperties = Properties()\nproperties.loadPropertyFile(property_file)\n\nout_zip_file = properties['sl.output.bundle.file']\noutput_folder = os.path.dirname(out_zip_file)\n\n#time of day\nam_peak_start = int(properties['am.peak.start'])\nam_peak_end = int(properties['am.peak.end'])\nmd_offpeak_end = int(properties['md.offpeak.end'])\npm_peak_end = int(properties['pm.peak.end'])\n\nauto_classes = properties['sl.auto.classes'].split(\",\") #am, md, pm, ni\ntruck_classes = properties['sl.truck.classes'].split(\",\") #am, md, pm, ni\n\nselect_link_file = properties['sl.output.file.select.link.results'] #only file name - no full file path\n\n#tour and trip files\nhousehold_file = properties['sdt.household.data']\ntours_sdt_file = properties['sdt.person.tours']\ntours_ldt_file = properties['ldt.tours']\ntrips_sdt_file = properties['sdt.person.trips']\ntrips_ldt_file = properties['ldt.person.trips']\ntrips_ldt_vehicle_file = properties['ldt.vehicle.trips']\ntrips_ct_file = properties['ct.truck.trips']\ntrips_et_file = properties['et.truck.trips']\n\n#other files\nsynpop_file = properties['spg2.current.synpop.summary']\nemployment_file = properties['sdt.current.employment']\nalpha2beta_file = os.path.join(output_folder,'alpha2beta.csv')\n\n#output summary file\nout_summary_file = properties['sl.output.file.select.link.summary']\n\n'''\nreads a csv file\n'''\ndef read_data(infile, full_file_path = True):\n \n if (full_file_path==False):\n infile = os.path.join(output_folder, infile)\n \n mydata = pd.read_csv(infile)\n\n return(mydata)\n\n'''\nassigns assignment class ([mode]_pea, [mode]_offpeak, [mode]_pm, [mode]_ni - where mode is 'a' or 'd') to auto/truck trips\n'''\ndef determine_assignclass(mydata, mode, field_time):\n\n #periods of OD pairs to select from the select link file\n #for example: if MD period has 'a_peak' then OD pairs in 'a_peak' of the select link file fill be selected from the MD pperiod in the auto trip files.\n #this gives more control to user on what od pairs to assign to different time periods\n if (mode == 'CT_TRIP')|(mode == 'ET_TRIP'):\n mode_classes = truck_classes\n else:\n mode_classes = auto_classes\n\n #create ASSIGNCLASS field\n mydata['ASSIGNCLASS'] = '' #default - none\n mydata.ASSIGNCLASS[mydata[field_time] < am_peak_start] = mode_classes[3] #ni off peak\n mydata.ASSIGNCLASS[(mydata['ASSIGNCLASS'] == '') & (mydata[field_time] < am_peak_end)] = mode_classes[0] #am peak\n mydata.ASSIGNCLASS[(mydata['ASSIGNCLASS'] == '') & (mydata[field_time] < md_offpeak_end)] = mode_classes[1] #md offpeak\n mydata.ASSIGNCLASS[(mydata['ASSIGNCLASS'] == '') & (mydata[field_time] < pm_peak_end)] = mode_classes[2] #pm peak\n mydata.ASSIGNCLASS[mydata['ASSIGNCLASS'] == ''] = mode_classes[3] #ni offpeak\n\n return(mydata)\n\n'''\nassigns home zone id to trips\n'''\ndef determine_home_zone(mydata):\n\n #read household file\n households = read_data(household_file)\n\n #append home zone\n mydata = pd.merge(mydata, households[['HH_ID','TAZ']], left_on = ['hhID'], right_on = ['HH_ID'])\n mydata.rename(columns = {'TAZ':'HOME_ZONE'}, inplace = True)\n mydata = mydata.drop(['HH_ID'], 1)\n\n return(mydata)\n\n\n'''\ndetermines from trip type (trip purpose) for long distance travel (LDT) trips\n'''\ndef determine_ldt_trip_type(mydata, mytrips):\n \n # get only single record for each trip - file contains mutiple records depending on occupancy\n mytrips_grouped = mytrips[['hhID','memberID','tourID','origin','destination','tourPurpose','tripPurpose','tripMode','tripStartTime']].groupby(['hhID','memberID','tourID','origin','destination'])\n mytrips_unique = mytrips_grouped['tourPurpose','tripPurpose','tripMode','tripStartTime'].first()\n mytrips_unique = mytrips_unique.reset_index()\n mytrips_unique = mytrips_unique.sort_values(['hhID','memberID','tourID','tripStartTime'])\n\n #select link trips\n mydata_grouped = mydata[['hhID','memberID','tourID','tripMode']].groupby(['hhID','memberID','tourID'])\n mydata_unique = mydata_grouped['tripMode'].first()\n mydata_unique = mydata_unique.reset_index()\n \n #select only trips of tours that are in the select link results - to save time\n mytrips_selectlink = pd.merge(mytrips_unique, \n mydata_unique[['hhID','memberID','tourID']],\n on = ['hhID','memberID','tourID']) \n\n # find FROM_TRIP_TYPE\n mytrips_triptype = mytrips_selectlink.groupby(['hhID','memberID','tourID']).apply(last_trip_type)\n\n #merge with select link data\n mydata_triptype = pd.merge(mydata, mytrips_triptype[['hhID','memberID','tourID','origin', 'destination','FROM_TRIP_TYPE']],\n on = ['hhID','memberID','tourID','origin', 'destination'])\n return(mydata_triptype)\n\n'''\ndetermines from trip type (trip purpose) for short distance travel (SDT) trips\n'''\ndef determine_sdt_trip_type(mydata, mytrips):\n \n #create a dataframe with only unique hhID, memberID, tour# \n mydata_grouped = mydata[['hhID','memberID','tour#','tourSegment']].groupby(['hhID','memberID','tour#'])\n mydata_unique = mydata_grouped['tourSegment'].sum()\n mydata_unique = mydata_unique.reset_index()\n mydata_unique = mydata_unique.drop(['tourSegment'],1)\n\n #select only trips of tours that are in the select link results - to save time\n mytrips_selectlink = pd.merge(mytrips[['hhID','memberID','tour#','tourSegment', 'subTour(yes/no)','tourPurpose','tripPurpose']], \n mydata_unique,\n on = ['hhID','memberID','tour#'])\n\n #create last_trip_type for unique select link tours/trips\n mytrips_selectlink = mytrips_selectlink.sort_values(['hhID','memberID','tour#','tourSegment', 'subTour(yes/no)'])\n mytrips_triptype = mytrips_selectlink.groupby(['hhID','memberID','tour#']).apply(last_trip_type)\n\n #merge with select link data\n mydata_triptype = pd.merge(mydata, mytrips_triptype[['hhID','memberID','tour#','tourSegment', 'subTour(yes/no)','FROM_TRIP_TYPE']],\n on = ['hhID','memberID','tour#','tourSegment', 'subTour(yes/no)'])\n \n return(mydata_triptype)\n\n'''\ndetermines trip type (trip purpose) of the previous trip\n'''\ndef last_trip_type(df):\n #just to be sure\n #df.sort(['tourSegment', 'subTour(yes/no)'])\n last_trip_purpose = ''\n i=0\n for index, row in df.iterrows():\n trip_purpose = row['tripPurpose']\n #if first trip in the tour then set to tour purpose (home or work)\n if (i==0):\n if trip_purpose == 'HOME':\n last_trip_purpose = 'WORK'\n elif trip_purpose == 'WORK':\n last_trip_purpose = 'HOME'\n\n df.set_value(index,'FROM_TRIP_TYPE',last_trip_purpose)\n\n last_trip_purpose = trip_purpose\n i += 1\n \n return(df)\n\n'''\nselects select link OD pairs in trip file and attach select link information\n'''\ndef append_select_link(infile, timefield, selectlink, tourfile, colname, summary_df):\n\n print('reading trip file: ' + infile)\n trips = read_data(infile)\n\n print('determine assignment class ...')\n trips = determine_assignclass(trips, colname, timefield)\n \n print('append select link results ...')\n trips_select_link = pd.merge(trips, selectlink, left_on = ['ASSIGNCLASS','origin','destination'], right_on = ['ASSIGNCLASS','FROMZONE','TOZONE'])\n trips_select_link.rename(columns = {'FROMZONE':'EXTERNAL_ZONE_ORIGIN', 'TOZONE':'EXTERNAL_ZONE_DESTINATION',\n 'PERCENT':'SELECT_LINK_PERCENT'}, inplace = True)\n\n print('total select link trips: ' + str(len(trips_select_link)))\n \n #assign station number\n trips_select_link.EXTERNAL_ZONE_DESTINATION[trips_select_link['DIRECTION']=='IN'] = '_' + trips_select_link['STATIONNUMBER'].astype(str)\n trips_select_link.EXTERNAL_ZONE_ORIGIN[trips_select_link['DIRECTION']=='OUT'] = '_' + trips_select_link['STATIONNUMBER'].astype(str)\n\n #summary of trips by time period, stations, and direction\n if len(trips_select_link) > 0:\n #update assignclass definitions - time periods\n trips_select_link.ASSIGNCLASS[trips_select_link['ASSIGNCLASS'].str.contains('_peak')] = 'peak'\n trips_select_link.ASSIGNCLASS[trips_select_link['ASSIGNCLASS'].str.contains('_offpeak')] = 'offpeak'\n trips_select_link.ASSIGNCLASS[trips_select_link['ASSIGNCLASS'].str.contains('_ni')] = 'ni'\n trips_select_link.ASSIGNCLASS[trips_select_link['ASSIGNCLASS'].str.contains('_pm')] = 'pm'\n \n if (colname != 'LDT_VEHICLE_TRIP'):\n #summary of trips by time period, station number and direction\n summary = trips_select_link.groupby(['ASSIGNCLASS', 'STATIONNUMBER','DIRECTION']).count()['SELECT_LINK_PERCENT'].reset_index()\n summary = summary.rename(columns={'SELECT_LINK_PERCENT':colname})\n summary_df = pd.merge(summary_df, summary, on = ['ASSIGNCLASS', 'STATIONNUMBER','DIRECTION'], how = 'left')\n\n if (colname == 'SDT_PERSON_TRIP')|(colname == 'LDT_PERSON_TRIP'):\n trips_select_link['VEHICLETRIP'] = 0\n trips_select_link.VEHICLETRIP[trips_select_link['tripMode']=='DA'] = 1\n trips_select_link.VEHICLETRIP[trips_select_link['tripMode']=='SR2'] = 1/2\n trips_select_link.VEHICLETRIP[trips_select_link['tripMode']=='SR3P'] = 1/3.5\n #trips_select_link['VEHICLETRIP'] = trips_select_link['VEHICLETRIP'] * trips_select_link['SELECT_LINK_PERCENT']\n\n if (colname == 'SDT_PERSON_TRIP'):\n colname = 'SDT_VEHICLE_TRIP'\n else:\n colname = 'LDT_VEHICLE_TRIP'\n\n summary = trips_select_link.groupby(['ASSIGNCLASS', 'STATIONNUMBER','DIRECTION']).sum()['VEHICLETRIP'].reset_index()\n summary['VEHICLETRIP'] = summary['VEHICLETRIP'].astype(int)\n summary = summary.rename(columns={'VEHICLETRIP':colname})\n summary_df = pd.merge(summary_df, summary, on = ['ASSIGNCLASS', 'STATIONNUMBER','DIRECTION'], how = 'left')\n \n summary_df = summary_df.fillna(0)\n\n #drop unnecessary fields\n trips_select_link = trips_select_link.drop(['ASSIGNCLASS','FROMNODETONODE','DIRECTION','STATIONNUMBER'], 1)\n \n #append HOME_ZONE and FROM_TRIP_TYPE fields\n #trucks\n if (colname == 'CT_TRIP') | (colname == 'ET_TRIP'):\n #set HOME_ZONE as trip origin and FROM_TRIP_TYPE to empty\n trips_select_link['HOME_ZONE'] = trips_select_link['origin']\n trips_select_link['FROM_TRIP_TYPE'] = ''\n\n #autos\n else:\n trips_select_link = determine_home_zone(trips_select_link)\n if 'SDT' in infile:\n trips_select_link = determine_sdt_trip_type(trips_select_link, trips)\n elif 'LDT' in infile:\n trips_select_link = determine_ldt_trip_type(trips_select_link, trips)\n else:\n print('Unexpected file: ' + infile)\n \n else:\n summary_df[colname] = 0\n\n print('writing trip file ...')\n outfile = os.path.splitext(infile)[0] + '_select_link.csv'\n outfile = os.path.join(output_folder, outfile)\n trips_select_link.to_csv(outfile, index = False)\n \n return(outfile, summary_df)\n\n'''\ncreates a zip file of the outputs and delete csv files\n'''\ndef zip_output(infile_sdt, infile_ldt, infile_ldt_vehicle, infile_ct, infile_et):\n \n #zip all files\n with zipfile.ZipFile(out_zip_file, 'w') as myzip:\n myzip.write(infile_sdt,os.path.basename(infile_sdt))\n myzip.write(infile_ldt,os.path.basename(infile_ldt))\n myzip.write(infile_ldt_vehicle,os.path.basename(infile_ldt_vehicle))\n myzip.write(infile_ct,os.path.basename(infile_ct))\n myzip.write(infile_et,os.path.basename(infile_et))\n myzip.write(synpop_file,os.path.basename(synpop_file))\n myzip.write(employment_file,os.path.basename(employment_file))\n myzip.write(alpha2beta_file,os.path.basename(alpha2beta_file))\n \n myzip.close()\n \n #delete csv files\n os.remove(infile_sdt)\n os.remove(infile_ldt)\n os.remove(infile_ldt_vehicle)\n os.remove(infile_ct)\n os.remove(infile_et)\n\n'''\nmain function that appends select link data to trip files\n'''\ndef main():\n global select_link_summary\n \n #read select link data\n print('Read select link data')\n select_link_result = read_data(select_link_file, full_file_path = False)\n select_link_summary = read_data(out_summary_file, full_file_path = False)\n select_link_summary = select_link_summary[['PERIOD', 'STATIONNUMBER','DIRECTION', 'AUTO_SL_OD', 'TRUCK_SL_OD']] #keep only selected columns, so that it works even rerunning the append step\n select_link_summary.rename(columns = {'PERIOD':'ASSIGNCLASS'}, inplace = True)\n \n #append select link result to trips\n print('Append select link results to trips')\n outfile_sdt, select_link_summary = append_select_link(trips_sdt_file, 'tripStartTime', select_link_result, tourfile=tours_sdt_file, colname='SDT_PERSON_TRIP', summary_df=select_link_summary)\n outfile_ldt, select_link_summary = append_select_link(trips_ldt_file, 'tripStartTime', select_link_result, tourfile=tours_ldt_file, colname='LDT_PERSON_TRIP', summary_df=select_link_summary)\n outfile_ldt_vehicle, select_link_summary = append_select_link(trips_ldt_vehicle_file, 'tripStartTime', select_link_result, tourfile=tours_ldt_file, colname='LDT_VEHICLE_TRIP', summary_df=select_link_summary)\n outfile_ct, select_link_summary = append_select_link(trips_ct_file, 'tripStartTime', select_link_result, tourfile=None, colname='CT_TRIP', summary_df=select_link_summary)\n outfile_et, select_link_summary = append_select_link(trips_et_file, 'tripStartTime', select_link_result, tourfile=None, colname='ET_TRIP', summary_df=select_link_summary)\n\n #zip outputs\n print('Zip outputs')\n zip_output(outfile_sdt, outfile_ldt, outfile_ldt_vehicle, outfile_ct, outfile_et)\n\n #write summary\n print('Write select link summary')\n select_link_summary.rename(columns = {'ASSIGNCLASS':'PERIOD'}, inplace = True)\n select_link_summary.to_csv(os.path.join(output_folder, out_summary_file), header=True, index=False) \n \nif __name__ == \"__main__\":\n main()\n","sub_path":"root/scenario/model/code/visum/append_select_link_data.py","file_name":"append_select_link_data.py","file_ext":"py","file_size_in_byte":15384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"557599891","text":"from django.test import TestCase\nfrom unittest.mock import patch\nfrom django.core.management import call_command\nfrom django.db.utils import OperationalError\n\nclass CommandTest(TestCase):\n def test_wait_for_db_ready(self):\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n gi.return_value= True\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 1)\n @patch('time.sleep', return_value= True)\n def test_wait_for_db(self, t):\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n gi.side_effect= [OperationalError]* 5 + [True]\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 6)","sub_path":"app/core/tests/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"527919785","text":"#!/usr/bin/env python\n\"\"\"Infer experiment metadata from High Throughput Sequencing (HTS) data.\"\"\"\n\nimport argparse\nimport logging\nimport sys\nfrom typing import (Optional, Sequence)\n\nfrom htsinfer import (\n infer_single_paired,\n __version__,\n)\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef parse_args(\n args: Optional[Sequence[str]] = None\n) -> argparse.Namespace:\n \"\"\"Parse CLI arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=sys.modules[__name__].__doc__,\n )\n\n parser.add_argument(\n '-f1', '--file-1',\n metavar=\"FILE\",\n type=str,\n required=True,\n help=\"file path to read/first mate library\",\n )\n parser.add_argument(\n '-f2', '--file-2',\n metavar=\"FILE\",\n type=str,\n default=None,\n help=\"file path to second mate library\",\n )\n parser.add_argument(\n '-n', '--max-records',\n metavar=\"INT\",\n type=int,\n default=10000,\n help=(\n \"maximum number of records to process, starting with first \"\n \"record; set to 0 to process entire file(s)\"\n )\n )\n parser.add_argument(\n '--verbose', \"-v\",\n action='store_true',\n default=False,\n help=\"print logging messages to STDERR\",\n )\n parser.add_argument(\n '--debug',\n action='store_true',\n default=False,\n help=\"print debugging messages to STDERR\",\n )\n parser.add_argument(\n '--version',\n action='version',\n version='%(prog)s {version}'.format(version=__version__),\n help=\"show version information and exit\",\n )\n\n return parser.parse_args(args)\n\n\ndef setup_logging(\n verbose: bool = False,\n debug: bool = False,\n) -> None:\n \"\"\"Configure logging.\"\"\"\n if debug:\n level = logging.DEBUG\n elif verbose:\n level = logging.INFO\n else:\n level = logging.WARNING\n logging.basicConfig(\n level=level,\n format=\"[%(asctime)s] %(message)s\",\n datefmt='%m-%d %H:%M:%S',\n )\n\n\ndef main() -> None:\n \"\"\"Main function.\n\n Args:\n args: Command-line arguments and their values.\n \"\"\"\n args = parse_args()\n setup_logging(\n verbose=args.verbose,\n debug=args.debug,\n )\n LOGGER.info(\"Started script...\")\n LOGGER.debug(f\"CLI options: {args}\")\n results = {}\n results['single_paired'] = infer_single_paired.infer(\n file_1=args.file_1,\n file_2=args.file_2,\n )\n LOGGER.info(f\"Results: {results}\")\n LOGGER.info(\"Done.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"htsinfer/htsinfer.py","file_name":"htsinfer.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"113617208","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\n\nclass Player:\n def __init__(self, name, curr_room):\n self.name = name\n self.curr_room = curr_room\n self.backpack = []\n\n def move_player(self, command):\n print(f'MOVE THIS PLAYER: {command}')\n print(f'Testing command formation => {command}_to')\n\n if getattr(self.curr_room, f'{command}_to') is not None:\n self.curr_room = getattr(self.curr_room, f'{command}_to')\n else:\n print(f'!! -- Direction not available from your current room -- !!')\n \n def add_item(self, item):\n self.backpack.append(item)\n\n return_string = f''\n return_string += f'{item} was added to your backpack'\n\n return return_string\n\n def drop_item(self, item):\n if item in self.backpack: \n self.backpack.remove(item)\n\n return_string = f''\n return_string += f'{item} was removed from your backpack'\n\n return return_string\n else:\n return_string = f''\n return_string += f'{item} is not in your backpack'\n \n\n\n def __str__(self):\n output = '*\\n'\n output += f'PLAYER CLASS \\n'\n output += f'Name: {self.name} \\n'\n output += f'**\\n'\n output += f'Current Room:\\n'\n output += f'{self.curr_room}\\n'\n output += f'**\\n'\n output += '*'\n\n return output\n\n# Test Player Class\n# testPlayerClass = Player('testPlayer')\n# print(testPlayerClass)\n","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"435511493","text":"import scrapy\nfrom ..items import GuaziItem\n\nclass GuaziSpider(scrapy.Spider):\n name = 'guazi2'\n allowed_domains = ['www.guazi.com']\n #重写start_url start_requests()方法\n\n def start_requests(self):\n \"\"\"生成所有的url地址,一次性交给调度器\"\"\"\n for i in range(1,6):\n url = 'https://www.guazi.com/ty/buy/o{}/#bread'.format(i)\n yield scrapy.Request(url=url,callback=self.parse)\n\n def parse(self, response):\n #基准xpath:匹配所有汽车节点对象\n li_list = response.xpath('//ul[@class=\"carlist clearfix js-top\"]/li')\n items = []\n for li in li_list:\n #每辆汽车的请求对象\n item = GuaziItem()\n item['url'] = 'https://www.guazi.com'+li.xpath('./a/@href').extract()[0]\n item['name'] = li.xpath('./a/@title').extract()[0]\n item['price'] = li.xpath('./a/div[@class=\"t-price\"]/p/text()').extract()[0]\n items.append(item)\n for item in items:\n #Request中meta参数:在不同解析函数之间传递数据,item数据会随着response一起\n yield scrapy.Request(url=item['url'],meta={'item':item},callback=self.detail_parse)\n\n def detail_parse(self,response):\n item = response.meta['item']\n item['time'] = response.xpath('/html/body/div[4]/div[3]/div[2]/ul/li[1]/span').get()\n item['km'] = response.xpath('/html/body/div[4]/div[3]/div[2]/ul/li[2]/span').get()\n item['disp'] = response.xpath('/html/body/div[4]/div[3]/div[2]/ul/li[3]/span').get()\n item['trans'] = response.xpath('/html/body/div[4]/div[3]/div[2]/ul/li[4]/span').get()\n yield item\n\n\n\n\n","sub_path":"Spider/day08/GuaziUpgrade/Guazi/spiders/guazi2.py","file_name":"guazi2.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"67715187","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom datetime import date\nfrom forms.models import *\nfrom django.shortcuts import (\n get_object_or_404,\n render,\n redirect,\n HttpResponseRedirect,\n)\nimport random\nfrom django.http import Http404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nimport json\nfrom django.core.files.storage import FileSystemStorage\nimport os\nfrom django.views.decorators.http import require_POST\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\nfrom django.conf import settings\nimport openpyxl\nfrom openpyxl import * \nfrom openpyxl.drawing.image import Image\nimport csv\nfrom .models import *\nfrom shutil import copyfile\n\n# Create your views here.\n\n@login_required\ndef CreateExtensionsForm(request):\n\n if request.session.get(\"company\"):\n company = request.session.get(\"company\")\n print(company)\n else:\n raise Http404\n\n try:\n Exts = Extention.objects.filter(company_id=company)\n except Extention.DoesNotExist:\n raise Http404\n\n tempComp = Company.objects.get(id=company)\n save_path = os.path.join(settings.MEDIA_ROOT, \"Reports\", str(tempComp.order) + \"_Extensions.csv\")\n toForm = os.path.join(\"Reports\", str(tempComp.order) + \"_Extensions.csv\")\n\n with open(save_path, 'w', newline='') as csvfile:\n\n fieldnames = ['Extension Number','Extension Name','CIDName','CIDNum','E911CIDNum','DirectorySearchable','DefaultNPA','Wrap Time','Record Call','FwdTime','Record Notify Email','Roaming Passcode','VM Extension Number','VM Forward Email','VM to Email Only','VM Passcode','SIP Username','SIP Password','Extension Active']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n\n for Ext in Exts:\n number = Ext.ext\n name = Ext.name\n cidName = Ext.caller_id_name\n\n try:\n cidNumber = Numbers.objects.get(id=Ext.caller_id_number).number\n except Numbers.DoesNotExist:\n cidNumber = Ext.caller_id_number\n\n E911Number = cidNumber\n directorySearch = 1\n defaultNPA = \"\" \n wrapTime = 0\n recordCall = 2\n forwardTime = 25\n recordNotify = \"\"\n roamingPass = \"\"\n VMExtNumber = 0\n VMPasscode = \"\"\n if (Ext.voicemail == True):\n VMExtNumber = str(1) + str(number)\n VMPasscode = 12345\n VMtoEmail = 0\n VMEmail = \"\"\n if (Ext.voicemail_toEmail == True):\n VMEmail = Ext.voicemail_email\n VMtoEmail = 1\n ExtActive = 1\n\n writer.writerow({\n 'Extension Number': number,\n 'Extension Name': name,\n 'CIDName': cidName,\n 'CIDNum': cidNumber,\n 'E911CIDNum': E911Number,\n 'DirectorySearchable': directorySearch,\n 'DefaultNPA': defaultNPA,\n 'Wrap Time': wrapTime,\n 'Record Call': recordCall,\n 'FwdTime': forwardTime,\n 'Record Notify Email': recordNotify,\n 'Roaming Passcode': roamingPass,\n 'VM Extension Number': VMExtNumber,\n 'VM Forward Email': VMEmail,\n 'VM to Email Only': VMtoEmail,\n 'VM Passcode': VMPasscode,\n 'SIP Username': \"\",\n 'SIP Password': \"\",\n 'Extension Active': ExtActive\n })\n \n try:\n tempDel = Reports.objects.filter(company=tempComp, type='Extensions')\n for each in tempDel:\n each.delete()\n \n except Reports.DoesNotExist:\n print()\n \n x = Reports.objects.create(company=tempComp, document=toForm, type=\"Extensions\")\n x.save()\n\n return HttpResponse(\"Done Report\" + str(company))\n\n\n@login_required\ndef CreatePortingForm(request):\n\n if request.session.get(\"company\"):\n company = request.session.get(\"company\")\n print(company)\n else:\n raise Http404\n\n template = os.path.join(settings.MEDIA_ROOT, \"Reports\", \"Template_Porting.xlsx\")\n\n tempComp = Company.objects.get(id=company)\n save_path = os.path.join(settings.MEDIA_ROOT, \"Reports\", str(tempComp.order) + \"_Porting.xlsx\")\n toForm = os.path.join(\"Reports\", str(tempComp.order) + \"_Porting.xlsx\")\n\n copyfile(template, save_path)\n\n wb = load_workbook(save_path)\n ws = wb.active\n\n #First Part\n ws['E9'] = tempComp.company_name\n tempAddress = Address.objects.get(id=tempComp.site_address_id)\n if tempAddress.Suite == \"\":\n suiteHandler = \"\"\n else:\n suiteHandler = str(tempAddress.Suite) + \" - \"\n ws['E10'] = suiteHandler + tempAddress.StreetAddress\n ws['E11'] = tempComp.type\n ws['E13'] = tempComp.currentProvider\n\n\n ws['E16'] = tempComp.listing_name\n ws['E17'] = tempComp.type\n ws['E18'] = tempComp.category_listing\n ws['E19'] = tempComp.listing_phone\n\n tempAddress = Address.objects.get(id=tempComp.listing_address_id)\n res = tempAddress.StreetAddress.split(\", \");\n if tempAddress.Suite == \"\":\n suiteHandler = \"\"\n else:\n suiteHandler = str(tempAddress.Suite) + \" - \"\n \n ws['E20'] = suiteHandler + res[0]\n ws['E23'] = res[1]\n ws['E24'] = res[2]\n ws['E25'] = tempAddress.Postal\n\n ws['A29'] = \"Phone number(s) to be ported (If more than the below fields, please attach EXCEL SHEET with all the TNs)\"\n counter = 30\n\n tempPort = Numbers.objects.filter(Type=1, company_id=tempComp)\n for each in tempPort:\n ws['A'+ str(counter)] = each.number\n counter = counter + 1\n \n counter = counter + 1\n ws['A'+ str(counter)] = \"Phone number(s) to be disconnect (if applicable) (If more than the below fields, please attach EXCEL SHEET with all the TNs)\"\n counter = counter + 1\n\n tempPort = Numbers.objects.filter(Type=0, company_id=tempComp)\n for each in tempPort:\n ws['A'+ str(counter)] = each.number\n counter = counter + 1\n \n \n counter = counter + 1\n ws['A'+ str(counter)] = \"Authorized Customer Signiture\"\n counter = counter + 1\n\n \n tempUpload = Uploads.objects.get(company_id=tempComp, type='signiture')\n img = openpyxl.drawing.image.Image(tempUpload.document)\n \n img.anchor = 'A' + str(counter)\n ws.add_image(img)\n \n counter = counter + 8\n #Printed Name\n printed_name = request.POST['input_name']\n ws['A'+ str(counter)] = \"Authorized Printed Name (as per above signiture)\"\n counter = counter + 1\n ws['A'+ str(counter)] = printed_name.upper()\n\n counter = counter + 2\n ws['A'+ str(counter)] = \"Date\"\n counter = counter + 1\n\n today = date.today()\n d1 = today.strftime(\"%d/%m/%Y\")\n ws['A'+ str(counter)] = d1\n\n try:\n tempDel = Reports.objects.filter(company=tempComp, type='PortingForm')\n for each in tempDel:\n each.delete()\n \n except Reports.DoesNotExist:\n print()\n \n x = Reports.objects.create(company=tempComp, document=toForm, type=\"PortingForm\")\n x.save()\n wb.save(save_path)\n print(\"Porting\")\n return HttpResponse(\"Done\")\n\n@login_required\ndef CreatePBXForm(request):\n\n if request.session.get(\"company\"):\n company = request.session.get(\"company\")\n print(company)\n else:\n raise Http404\n\n template = os.path.join(settings.MEDIA_ROOT, \"Reports\", \"Template_PBX.xlsx\")\n\n tempComp = Company.objects.get(id=company)\n save_path = os.path.join(settings.MEDIA_ROOT, \"Reports\", str(tempComp.order) + \"_PBX.xlsx\")\n toForm = os.path.join(\"Reports\", str(tempComp.order) + \"_PBX.xlsx\")\n\n copyfile(template, save_path)\n\n wb = load_workbook(save_path)\n ws = wb.active\n\n ws['B3'] = tempComp.company_name\n tempAddress = Address.objects.get(id=tempComp.site_address_id)\n res = tempAddress.StreetAddress.split(\", \");\n\n if tempAddress.Suite == \"\":\n suiteHandler = \"\"\n else:\n suiteHandler = str(tempAddress.Suite) + \" - \"\n\n ws['B4'] = suiteHandler + res[0]\n ws['B5'] = res[1]\n ws['B6'] = res[2]\n ws['B7'] = tempAddress.Postal\n ws['B8'] = tempComp.listing_phone\n\n #Contact Name\n ws['B10'] = str(request.user.first_name) + \" \" + str(request.user.last_name)\n #Contact Email\n ws['B11'] = request.user.email\n #Contact Phone\n ws['B12'] = request.user.phone_number\n\n\n tempNumbers = Numbers.objects.filter(Type=1, company_id=tempComp)\n counter = 15\n for each in tempNumbers:\n \n ws['A' + str(counter + 1)] = \"DID #\"\n ws['A' + str(counter + 2)] = \"Address\"\n ws['A' + str(counter + 3)] = \"City\"\n ws['A' + str(counter + 4)] = \"Province\"\n ws['A' + str(counter + 5)] = \"Postal Code\"\n\n tempAddress = Address.objects.get(id=each.Address_911_id)\n res = tempAddress.StreetAddress.split(\", \");\n\n if tempAddress.Suite == \"\":\n suiteHandler = \"\"\n else:\n suiteHandler = str(tempAddress.Suite) + \" - \"\n\n ws['B' + str(counter + 1)] = each.number\n ws['B' + str(counter + 2)] = suiteHandler + res[0]\n ws['B' + str(counter + 3)] = res[1]\n ws['B' + str(counter + 4)] = res[2]\n ws['B' + str(counter + 5)] = tempAddress.Postal\n\n counter = counter + 6\n \n try:\n tempDel = Reports.objects.filter(company=tempComp, type='PBX')\n for each in tempDel:\n each.delete()\n \n except Reports.DoesNotExist:\n print()\n \n\n x = Reports.objects.create(company=tempComp, document=toForm, type=\"PBX\")\n x.save()\n wb.save(save_path)\n\n print(\"PBX\")\n return HttpResponse(\"Done\")\n","sub_path":"Portal/reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"557680373","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n\n#from django.contrib import admin\n#admin.autodiscover()\n\nurlpatterns = patterns('swift_meta.views',\n # Examples:\n # url(r'^$', 'test.views.home', name='home'),\n # url(r'^test/', include('test.ui.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n\n # Uncomment the next line to enable the admin:\n #url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'index'),\n url(r'index', 'index'),\n url(r'show','display_meta'),\n)\n","sub_path":"tutsite/swift_meta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"263513097","text":"import math\n\n\nclass rcma:\n def check(self,beta,eta,any,timeRange):\n L = []\n\n avg = timeRange / 10\n for i in range(1,11):\n L.append(round(avg * i,4))\n print(L)\n # 失效率\n failureRates = []\n # 可靠度\n reliabilitys = []\n # 故障概率\n failureProbabilitys = []\n # 概率密度\n probabilityDensitys = []\n for i in L:\n if beta == 1:\n failureRate = (1 / eta) * 1000000\n else:\n failureRate = (beta / eta) * math.pow(i / eta, beta - 1) * 1000000\n\n failureRates.append(failureRate)\n reliability = 1 / math.pow(math.e, math.pow(i / eta, beta))\n reliabilitys.append(reliability)\n\n failureProbability = 1 - reliability\n failureProbabilitys.append(failureProbability)\n\n probabilityDensity = (failureRate / 1000000) * reliability\n probabilityDensitys.append(probabilityDensity)\n print(failureRates)\n print(reliabilitys)\n print(failureProbabilitys)\n print(probabilityDensitys)\nrcma().check(0.3,0.2,3,200)","sub_path":"new_darams_selenium/model_check/rcma_check.py","file_name":"rcma_check.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"294324810","text":"from django.contrib import admin\nfrom django.conf import settings\n\nfrom snippets.models import *\n\nclass SnippetAdmin(admin.ModelAdmin):\n if 'tagging' in settings.INSTALLED_APPS:\n list_display = ('__unicode__', 'lang', 'tag_str', 'active', 'date')\n # this will work in 1.3\n #list_filter = ('rel_tags__tag__name', )\n else:\n list_display = ('__unicode__', 'lang', 'active', 'date')\n\n list_filter = ('lang', 'categories') #, 'parent'\n list_editable = ('active',)\n\n prepopulated_fields = {'slug': ('title',)}\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.creator = request.user\n\n # always save model! this is where it is done\n obj.save()\n\n\nadmin.site.register(Snippet, SnippetAdmin)\nadmin.site.register(Category)\n\n","sub_path":"src/snippets/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"215518294","text":"from datetime import datetime\nfrom django.conf.urls.defaults import *\nfrom newscenter.feeds import NewsroomFeed\nfrom django.views.generic.list import ListView\n\nfrom newscenter import models \n\n##Object List\nurlpatterns = patterns('',\n url(r'^$', ListView.as_view(queryset=models.Newsroom.objects.all()), name='newscenter_index'),\n url(r'^categories/$', ListView.as_view(queryset=models.Newsroom.objects.all()), name='category_index')\n)\n\n##Custom \nurlpatterns += patterns('newscenter.views',\n (r'^(?P[\\-\\d\\w]+)/$',\n 'newsroom_detail', None, 'news_newsroom_detail'),\n (r'^categories/(?P[\\-\\d\\w]+)/$',\n 'category_detail', None, 'news_category_detail'),\n (r'^(?P[\\-\\d\\w]+)/(?P\\d{4})/(?P[a-z]{3})/(?P[\\-\\d\\w]+)/$',\n 'article_detail', None, 'news_article_detail'),\n (r'^(?P[\\-\\d\\w]+)/(?P\\d{4})/(?P[a-z]{3})/$',\n 'archive_month', None, 'news_archive_month',),\n (r'^(?P[\\-\\d\\w]+)/(?P\\d{4})/$', \n 'archive_year', None, 'news_archive_year',)\n)\n\n##Feeds\nurlpatterns += patterns('',\n (r'^(?P[\\-\\d\\w]+)/rss/$', NewsroomFeed(), None, 'newsroom_feed'),\n)\n","sub_path":"newscenter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"174327217","text":"#!/usr/bin/env python\n# ***********************************************************************************************************\n#\n# Starfish Storage Corporation (\"COMPANY\") CONFIDENTIAL\n# Unpublished Copyright (c) 2011-2017 Starfish Storage Corporation, All Rights Reserved.\n#\n# NOTICE: All information contained herein is, and remains the property of COMPANY. The intellectual and\n# technical concepts contained herein are proprietary to COMPANY and may be covered by U.S. and Foreign\n# Patents, patents in process, and are protected by trade secret or copyright law. Dissemination of this\n# information or reproduction of this material is strictly forbidden unless prior written permission is\n# obtained from COMPANY. Access to the source code contained herein is hereby forbidden to anyone except\n# current COMPANY employees, managers or contractors who have executed Confidentiality and Non-disclosure\n# agreements explicitly covering such access.\n#\n# ANY REPRODUCTION, COPYING, MODIFICATION, DISTRIBUTION, PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR\n# THROUGH USE OF THIS SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS STRICTLY PROHIBITED,\n# AND IN VIOLATION OF APPLICABLE LAWS AND INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE\n# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS TO REPRODUCE, DISCLOSE OR DISTRIBUTE\n# ITS CONTENTS, OR TO MANUFACTURE, USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.\n#\n# FOR U.S. GOVERNMENT CUSTOMERS REGARDING THIS DOCUMENTATION/SOFTWARE\n# These notices shall be marked on any reproduction of this data, in whole or in part.\n# NOTICE: Notwithstanding any other lease or license that may pertain to, or accompany the delivery of,\n# this computer software, the rights of the Government regarding its use, reproduction and disclosure are\n# as set forth in Section 52.227-19 of the FARS Computer Software-Restricted Rights clause.\n# RESTRICTED RIGHTS NOTICE: Use, duplication, or disclosure by the Government is subject to the\n# restrictions as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and Computer\n# Software clause at DFARS 52.227-7013.\n#\n# ***********************************************************************************************************\n# -*- coding: utf-8 -*-\n\"\"\"\n Part of the distribution\n\"\"\"\n# make Event Entries out of eventTuple(s)\n#\n# these are independent methods, so no need to put them in a class\n#\n#\n# ADD/CREATE\n# REMOVE\n# MOVE/RENAME\n# CHANGE/ATTRIBUTE (several variations)\n#\n\nimport logging\nimport os\nimport stat\nimport sys\nimport time\n\nfrom sf_gpfs import event_context\nfrom sf_em_common.switch import switch\n\nfrom sfutils.scan_entry import ScanEntry\n\nEvent = ScanEntry.Event\n\nentrylogger = logging.getLogger(\"Make entries\")\n\n#---------------------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------------------\ndef generic_entry_format(filesystem,event_type,event_tuple_list):\n \"\"\"\n when sfutils not present, just use this interface to record events\n \"\"\"\n entryL = []\n for e in event_tuple_list:\n full_path = e[1]\n st = e[2]\n attrs = e[3]\n entry = {\n 'event': event_type,\n 'full_path': os.path.relpath(full_path, filesystem),\n 'inode': st.st_ino,\n 'st_mode': st.st_mode,\n 'xattrs': attrs\n }\n if st:\n entry['uid'] = st.st_uid\n entry['gid'] = st.st_gid\n entry['size'] = st.st_size\n entry['blocks'] = st.st_blocks\n entry['atime'] = st.st_atime\n entry['mtime'] = st.st_mtime\n entry['ctime'] = st.st_ctime\n if event_type== 'removed':\n entry['parent_inode'] = st.st_ino\n entry['parent_path'] = os.path.dirname(full_path)\n entryL.append(entry)\n return entryL\n\n#\n\"\"\"\nhttps://github.com/StarfishStorage/starfish/blob/master/starfish/src/sfutils/scan_entry.py\n EVENT = 'event'\n SELECTOR = 'selector'\n INODE = 'inode'\n PARENT_INODE = 'parent_inode'\n NEW_PARENT_INODE = 'new_parent_inode' # new parent inode, used by move event\n DEPTH = 'depth' # deprecated, now calculated by loader from path\n FULL_PATH = 'full_path'\n NEW_BASENAME = 'new_bname' # new basename of entry, used by move event\n UID = 'uid'\n GID = 'gid'\n SIZE = 'size'\n BLOCKS = 'blocks'\n # FIXME: why mode has st_ prefix?\n MODE = 'st_mode' # deprecated, use TYPE and PERMISSIONS\n TYPE = 'type'\n PERMISSIONS = 'perm'\n ATIME = 'atime'\n MTIME = 'mtime'\n CTIME = 'ctime'\n AGGREGATES = 'aggregates'\n OUT_OF_SYNC = 'out_of_sync'\n SYNC_TIME = 'sync_time'\n ATTRS = 'attrs'\n OLD_INODE = 'old_inode'\n NEW_FULL_PATH = 'new_full_path'\n RECURSIVE = 'recursive'\n BASENAME = 'basename' # used only in selector\n TARGET = 'target'\n\"\"\"\n#---------------------------------------------------------------------------------------------------\ndef add_stat_to_entry(entry,st):\n \"\"\"\n add stat() data to entry dict()\n st assumed to have real stat() info on entry (do checking/logging in callers)\n \"\"\"\n entry[ScanEntry.UID] = st.st_uid\n entry[ScanEntry.GID] = st.st_gid\n entry[ScanEntry.SIZE] = st.st_size\n entry[ScanEntry.BLOCKS] = st.st_blocks # no built-in indexing for blocks\n entry[ScanEntry.PERMISSIONS] = stat.S_IMODE(st.st_mode)\n entry[ScanEntry.ATIME] = st.st_atime\n entry[ScanEntry.MTIME] = st.st_mtime\n entry[ScanEntry.CTIME] = st.st_ctime\n return entry\n#---------------------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------------------\n# these should be another module/class\n#\n# reformat events for passing to SF\n# use selector when possible to avoid stat() call\n#---------------------------------------------------------------------------------------------------\ndef sf_heartbeat_entry():\n \"\"\"\n MONITOR_SYNC_TIME event\n \"\"\"\n\n entryL = []\n entry = {\n ScanEntry.EVENT : Event.MONITOR_SYNC_TIME,\n ScanEntry.SYNC_TIME: time.time(),\n }\n entryL.append(entry)\n return entryL\n#---------------------------------------------------------------------------------------------------\ndef sf_attribute_entry(eventDict,st,attrs):\n \"\"\"\n Create ATTRIBUTE event entry\n this is a CHANGED event\n Use selector\n Required ScanEntry fields:\n * 'inode' - entity inode number\n * 'attrs' - custom attributes.\n Optional:\n * 'file_path' - path relative to the volume\n \"\"\"\n file_path = eventDict['filepath']\n inode = int(eventDict['src_ino'])\n basename = eventDict['basename']\n entry = {\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: inode,\n ScanEntry.TYPE : stat.S_IFMT(st.st_mode),\n ScanEntry.BASENAME: basename,\n },\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.CUSTOM_FS_ATTRS: attrs,\n }\n entry[ScanEntry.EVENT] = Event.CHANGED\n entry = add_stat_to_entry(entry,st)\n if (file_path):\n entry[ScanEntry.SELECTOR][ScanEntry.FULL_PATH] = file_path\n return [entry]\n#---------------------------------------------------------------------------------------------------\ndef sf_cr_entry(eventDict,st,attrs):\n \"\"\"\n Create CREATE/MKDIR event entry with custom attributes.\n this is a ADDED event\n Use selector\n Required ScanEntry fields:\n * 'inode' - entity inode number\n * 'attrs' - custom attributes.\n Optional (if exist):\n * 'file_path' - path relative to the volume\n * 'pinode' - parent dir entity inode number\n * 'basename' - file basename\n\n \"\"\"\n targetdir_entry = {}\n file_path = eventDict['filepath']\n inode = int(eventDict['target_ino'])\n pinode = int(eventDict['target_parent_ino'])\n basename = eventDict['basename']\n\n entry = {\n ScanEntry.INODE: inode,\n ScanEntry.CUSTOM_FS_ATTRS: attrs,\n ScanEntry.PARENT_INODE: pinode,\n ScanEntry.TYPE : stat.S_IFMT(st.st_mode),\n ScanEntry.FULL_PATH: file_path,\n ScanEntry.SYNC_TIME : time.time(),\n }\n entry[ScanEntry.BASENAME] = basename\n entry[ScanEntry.EVENT] = Event.ADDED\n entry = add_stat_to_entry(entry,st)\n\n \"\"\"\n # REFACTOR\n targetdir_entry = {\n ScanEntry.EVENT : Event.CHANGED,\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: pinode,\n ScanEntry.TYPE : stat.S_IFMT(stat.S_IFDIR),\n },\n ScanEntry.MTIME: st.st_mtime,\n ScanEntry.CTIME: st.st_ctime,\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.CUSTOM_FS_ATTRS: {},\n }\n return [entry] + [targetdir_entry]\n \"\"\"\n return [entry]\n#---------------------------------------------------------------------------------------------------\ndef sf_rm_entry(eventDict,dir_st,attrs):\n \"\"\"\n Create UNLINK/RMDIR event entry with custom attributes.\n this is a REMOVED event\n\n Use selector\n Required ScanEntry fields:\n * 'inode' - entity inode number\n * 'type' - file type (reg or dir)\n * 'attrs' - custom attributes.\n Optional:\n * 'file_path' - path relative to the volume\n * 'pino' - parent inode (should still exist)\n\n dir_st is the parent dir stat() metadata - currently ignored\n only useful if targetdir_entry is created\n \"\"\"\n file_path = eventDict['filepath']\n pinode = int(eventDict['parent_ino'])\n basename = eventDict['basename']\n st_mode = int(eventDict['mode'])\n entry = {\n ScanEntry.SELECTOR: {\n ScanEntry.PARENT_INODE : pinode,\n ScanEntry.TYPE : stat.S_IFMT(st_mode),\n ScanEntry.BASENAME: basename,\n },\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.CUSTOM_FS_ATTRS: {},\n }\n entry[ScanEntry.EVENT] = Event.REMOVED\n if (file_path):\n entry[ScanEntry.SELECTOR][ScanEntry.FULL_PATH] = file_path\n entry[ScanEntry.PERMISSIONS] = stat.S_IMODE(st_mode)\n\n \"\"\"\n # REFACTOR\n # this may not be processed properly by SF - anyway, it is very inefficient (entry bloat) if several files in the same directory are\n # removed at the same time - better to do windowing with event coalescing\n # \n targetdir_entry = {\n ScanEntry.EVENT : Event.CHANGED,\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: pinode,\n ScanEntry.TYPE : stat.S_IFMT(stat.S_IFDIR),\n },\n ScanEntry.CUSTOM_FS_ATTRS: {},\n }\n targetdir_entry = add_stat_to_entry(targetdir_entry,dir_st)\n return [entry] + [targetdir_entry]\n \"\"\"\n return [entry]\n#---------------------------------------------------------------------------------------------------\ndef sf_link_entry(eventDict,st,attrs):\n \"\"\"\n Create hard LINK event entry with custom attributes.\n this is a ADDED event\n Available ScanEntry selector fields:\n * 'inode' - entity inode number\n * 'basename' - file basename\n * 'pinode' - parent dir entity inode number\n * 'attrs' - custom attributes.\n Optional (if exist):\n * 'file_path' - path relative to the volume\n\n \"\"\"\n targetdir_entry = {}\n file_path = eventDict['filepath']\n inode = int(eventDict['target_ino'])\n pinode = int(eventDict['target_parent_ino'])\n basename = eventDict['basename']\n\n entry = {\n ScanEntry.INODE: inode,\n ScanEntry.CUSTOM_FS_ATTRS: attrs,\n ScanEntry.PARENT_INODE: pinode,\n ScanEntry.TYPE : stat.S_IFMT(st.st_mode),\n ScanEntry.FULL_PATH: file_path,\n ScanEntry.SYNC_TIME : time.time(),\n }\n entry[ScanEntry.BASENAME] = basename\n entry[ScanEntry.EVENT] = Event.ADDED\n entry = add_stat_to_entry(entry,st)\n\n \"\"\"\n # REFACTOR\n targetdir_entry = {\n ScanEntry.EVENT : Event.CHANGED,\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: pinode,\n ScanEntry.TYPE : stat.S_IFMT(stat.S_IFDIR),\n },\n ScanEntry.MTIME: st.st_mtime,\n ScanEntry.CTIME: st.st_ctime,\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.CUSTOM_FS_ATTRS: {},\n }\n return [entry] + [targetdir_entry]\n \"\"\"\n return [entry]\n#---------------------------------------------------------------------------------------------------\ndef sf_symlink_entry(eventDict,entryL):\n \"\"\"\n Modify CREATE event entry with symlink information\n this is an ADDED event\n\n Only the first entry in entryL (for the symlink, not the parent dir) has to be modified\n Assume symlink target is relative path to mount point\n \"\"\"\n if not entryL:\n return []\n entry = entryL[0]\n entry[ScanEntry.SYMLINK_TARGET] = eventDict['symlink_target']\n\n return entryL\n\n#---------------------------------------------------------------------------------------------------\ndef sf_rename_entry(eventDict,st,attrs):\n \"\"\"\n Create MOVED event entries with custom attributes.\n Use selector\n Required ScanEntry fields:\n * SOURCE PARENT INODE - inode number\n * NEW PARENT INODE - inode number\n * SOURCE BASENAME - \n * NEW BASENAME - \n Optional:\n * 'NEW_FULL_PATH' - path relative to the volume\n * 'FULL_PATH' - path relative to the volume\n\n 3 possible entries:\n file - MOVED\n if target dir is not root path of FS:\n target dir - CHANGED\n if src dir is not root path of FS and if src dir != target dir:\n src dir - CHANGED\n\n \"\"\"\n # REFACTOR\n dir_list = []\n inode = int(eventDict['target_ino'])\n basename = eventDict['basename']\n new_bname = eventDict['new_bname']\n src_pinode = int(eventDict['src_parent_ino'])\n target_pinode = int(eventDict['target_parent_ino'])\n \"\"\"\n selector needs inode, parent inode, basename to distinguish between\n hlinked files in the same directory with diff names and\n hlinked files in diff directories with the same names\n to know which entry is being moved\n \"\"\"\n file_entry = {\n ScanEntry.EVENT : Event.MOVED,\n ScanEntry.NEW_PARENT_INODE: target_pinode,\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: inode,\n ScanEntry.BASENAME: basename,\n ScanEntry.PARENT_INODE: src_pinode,\n },\n ScanEntry.CUSTOM_FS_ATTRS: attrs,\n }\n if 'target_filepath' in eventDict:\n target_filepath = eventDict['target_filepath']\n file_entry[ScanEntry.NEW_FULL_PATH] = target_filepath\n if 'src_filepath' in eventDict:\n src_filepath = eventDict['src_filepath']\n file_entry[ScanEntry.SELECTOR][ScanEntry.FULL_PATH] = src_filepath\n file_entry[ScanEntry.NEW_BASENAME] = new_bname\n\n entry_list = [ file_entry ]\n\n \"\"\"vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n targetdir_filepath = os.path.dirname(target_filepath)\n if targetdir_filepath != \"\":\n targetdir_entry = {\n ScanEntry.EVENT : Event.CHANGED,\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: target_pinode,\n ScanEntry.TYPE : stat.S_IFMT(stat.S_IFDIR),\n ScanEntry.BASENAME : os.path.basename(targetdir_filepath),\n },\n ScanEntry.MTIME: st.st_mtime,\n ScanEntry.CTIME: st.st_ctime,\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.CUSTOM_FS_ATTRS: {},\n }\n dir_list = [targetdir_entry]\n\n srcdir_filepath = os.path.dirname(src_filepath)\n if (srcdir_filepath != \"\") and (src_pinode != target_pinode):\n srcdir_entry = {\n ScanEntry.EVENT : Event.CHANGED,\n ScanEntry.SELECTOR: {\n ScanEntry.INODE: src_pinode,\n ScanEntry.TYPE : stat.S_IFMT(stat.S_IFDIR),\n ScanEntry.BASENAME : os.path.basename(srcdir_filepath),\n },\n ScanEntry.MTIME: st.st_mtime,\n ScanEntry.CTIME: st.st_ctime,\n ScanEntry.SYNC_TIME : time.time(),\n ScanEntry.CUSTOM_FS_ATTRS: {},\n }\n dir_list += [srcdir_entry]\n entry_list.extend(dir_list)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n \"\"\"\n return entry_list\n#---------------------------------------------------------------------------------------------------\ndef sf_entry_format(event_tuple_list):\n \"\"\"\n reformat events for passing to SF\n Create new event entry with custom attributes.\n\n By this time, basename is known to be valid UTF-8, but should still test filepath\n\n REFACTOR: doesn't include synchronous events - create, remove, rename, prepermchange - not generated\n would want to handle synchronous events very differently\n\n \"\"\"\n sf_entryL = []\n for e in event_tuple_list:\n entryL =[]\n eventDict, st, attrs = e # unpack event tuple\n event_type = eventDict['event']\n for case in switch(event_type):\n if case('postcreate'):\n entryL = sf_cr_entry(eventDict,st,attrs)\n break\n if case('postlink'):\n entryL = sf_link_entry(eventDict,st,attrs)\n break\n if case('postsymlink'):\n entryL = sf_cr_entry(eventDict,st,attrs)\n entryL = sf_symlink_entry(eventDict,entryL)\n break\n if case('postremove'):\n \"\"\"\n In this case, st is stat() for parent directory\n \"\"\"\n entryL = sf_rm_entry(eventDict,st,attrs)\n break\n# easier to just pass whole eventDict than to try to get args right\n if case('postrename') :\n entryL = sf_rename_entry(eventDict,st,attrs)\n break\n if case('postpermchange') :\n entryL = sf_attribute_entry(eventDict,st,attrs)\n break\n if case('attribute'):\n entryL = sf_attribute_entry(eventDict,st,attrs)\n break\n if entryL:\n sf_entryL.extend(entryL)\n return sf_entryL\n\n#---------------------------------------------------------------------------------------------------\n","sub_path":"sf-gpfs/gpfsmonitor/src/sf_gpfs/make_entries.py","file_name":"make_entries.py","file_ext":"py","file_size_in_byte":18639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"3743131","text":"class LRUCache:\n def __init__(self, capacity: int):\n self.cap=capacity\n self.dict={}\n self.frq=[]\n \n def get(self, key: int) -> int:\n if key in self.dict:\n self.frq.remove(key)\n self.frq.append(key)\n return self.dict.get(key,-1) \n\n def put(self, key: int, value: int) -> None:\n if key not in self.dict:\n if len(self.dict)+1>self.cap:\n self.dict.pop(self.frq.pop(0))\n else:\n self.frq.remove(key)\n self.frq.append(key)\n self.dict[key]=value","sub_path":"lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"626517901","text":"#!/usr/bin/env python\n#import PCF8591 as ADC\nimport RPi.GPIO as GPIO\nimport time\nimport math\nfrom smbus2 import SMBus\n\n\nbus = SMBus(1)\n\nDO = 17\nBuzz = 18\nGPIO.setmode(GPIO.BCM)\n\n\ndef setup():\n # ADC.setup(0x48)\n GPIO.setup(DO, \tGPIO.IN)\n GPIO.setup(Buzz, \tGPIO.OUT)\n GPIO.output(Buzz,\t1)\n\n\ndef Print(x):\n if x == 1:\n print('')\n print(' *********')\n print(' * Safe~ *')\n print(' *********')\n print('')\n if x == 0:\n print('')\n print(' ***************')\n print(' * Danger Gas! *')\n print(' ***************')\n print('')\n\n\ndef BoardRes(raw_adc):\n return float(5*(1023.0-raw_adc)/float(raw_adc))\n\n\ndef loop():\n status = 1\n count = 0\n\n while True:\n\n bus.write_byte(0x48, 0x40)\n adcval = bus.read_byte(0x48)\n print(adcval)\n print(BoardRes(adcval))\n\n tmp = GPIO.input(DO)\n if tmp != status:\n Print(tmp)\n status = tmp\n if status == 0:\n count += 1\n if count % 2 == 0:\n GPIO.output(Buzz, 1)\n else:\n GPIO.output(Buzz, 0)\n else:\n GPIO.output(Buzz, 1)\n count = 0\n\n time.sleep(0.2)\n\n\ndef destroy():\n GPIO.output(Buzz, 1)\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n try:\n setup()\n loop()\n except KeyboardInterrupt:\n destroy()\n","sub_path":"hardware/test/dac.py","file_name":"dac.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"62720426","text":"import re\n\nfrom django.shortcuts import render, redirect\nfrom .models import BankAccount, TransactionHistory, OPERATION, MAX_INCORRECT_PIN, ValidationError\n\nenter_credential = 'Enter credentials'\n\n\ndef check_card_id(request):\n card_id = request.POST.get('card_id')\n if not card_id:\n return render(request, 'check_card_id.html')\n try:\n account = BankAccount.objects.get(card_id=card_id)\n if not account.is_blocked_card:\n request.session['card_id'] = card_id\n return render(request, 'check_pin.html', {'msg': 'Enter PIN'})\n else:\n return render(\n request, 'message.html', {\n 'msg': 'Account has been blocked'})\n except BankAccount.DoesNotExist:\n return render(request, 'message.html', {'msg': 'No such account'})\n\n\ndef check_pin(request):\n try:\n card_id = get_account(request)\n except KeyError:\n return render(request, 'message.html',\n {'msg': enter_credential})\n user_pin = request.POST.get('pin')\n account = BankAccount.objects.get(card_id=card_id)\n if not account.is_blocked_pin and account.pin == user_pin:\n request.session['pin'] = account.pin\n return redirect('/transactions')\n\n elif not account.is_blocked_pin or account.incorrect_pin < MAX_INCORRECT_PIN:\n account.incorrect_pin += 1\n account.save()\n if account.incorrect_pin == MAX_INCORRECT_PIN:\n return render(\n request, 'message.html', {\n 'msg': 'Pin has been blocked'})\n\n return render(\n request, 'check_pin.html', {\n 'msg': 'Incorrect pin: ' + str(\n MAX_INCORRECT_PIN - account.incorrect_pin) + ' chance(s) left'})\n else:\n return render(request, 'message.html', {'msg': 'Pin is blocked'})\n\n\ndef balance(request):\n if not credential(request):\n return render(request, 'message.html', {'msg': enter_credential})\n card_id = get_account(request)\n account = BankAccount.objects.get(card_id=card_id)\n transaction = TransactionHistory(\n card_id=account,\n balance=account.balance,\n operation=OPERATION[0][0])\n transaction.save()\n return render(request, 'message.html', {\n 'balance': BankAccount.objects.get(card_id=card_id).balance,\n 'card_id': add_separator(card_id),\n })\n\n\ndef transactions(request):\n if not credential(request):\n return render(request, 'message.html', {'msg': enter_credential})\n\n card_id = get_account(request)\n account = BankAccount.objects.get(card_id=card_id)\n return render(request, 'transactions.html', {\n 'transactions': account.transactionhistory_set.order_by('-date'),\n 'card_id': add_separator(card_id),\n 'balance': account.balance\n })\n\n\ndef money_withdrawal(request):\n if not credential(request):\n return render(request, 'message.html', {'msg': enter_credential})\n try:\n card_id = get_account(request)\n account = BankAccount.objects.get(card_id=card_id)\n withdrawal = request.POST.get('amount')\n if not withdrawal:\n return render(request, 'money_withdrawal.html', {\n 'balance': account.balance})\n try:\n transaction = TransactionHistory(\n card_id=account,\n operation=OPERATION[1][0],\n money_withdrawal=int(withdrawal))\n transaction.save()\n return render(request, 'message.html', {\n 'card_id': add_separator(card_id),\n 'balance': account.balance,\n 'msg': 'Withdrawal: {}'.format(withdrawal),\n })\n except ValidationError:\n return render(request, 'message.html', {\n 'card_id': add_separator(card_id),\n 'balance': account.balance,\n 'msg': 'Amount exceeds the balance'\n })\n except ValueError:\n return render(\n request, 'money_withdrawal.html', {\n 'msg': 'Only digits allowed'})\n\n\ndef get_account(request):\n return request.session['card_id']\n\n\ndef logout(request):\n request.session.flush()\n return redirect('/check_card_id')\n\n\ndef credential(request):\n return bool(request.session.get('card_id') and request.session.get('pin'))\n\n\ndef add_separator(string, sep='-'):\n return sep.join(re.findall('....', string))\n","sub_path":"bank_account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"113995897","text":"# Time: O(n)\n# Space: O(1)\n\n# Given an array nums, write a function to move all 0's\n# to the end of it while maintaining the relative order\n# of the non-zero elements.\n#\n# For example, given nums = [0, 1, 0, 3, 12], after\n# calling your function, nums should be [1, 3, 12, 0, 0].\n#\n# Note:\n# You must do this in-place without making a copy of the array.\n# Minimize the total number of operations.\n\ntry:\n xrange\nexcept NameError:\n xrange = range\n\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n pos = 0\n for i in xrange(len(nums)):\n if nums[i]:\n nums[pos], nums[i] = nums[i], nums[pos]\n # wrong for nums = [1], cannot just set item being swapped to 0\n # nums[pos], nums[i] = nums[i], 0\n pos += 1\n\n def moveZeroes2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n # cmp not exist in Python3\n # def cmp(a, b):\n # return (a > b) - (a < b)\n nums.sort(cmp=lambda a, b: 0 if b else -1)\n\n\nclass Solution2(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n pos = 0\n for i in xrange(len(nums)):\n if nums[i]:\n nums[pos] = nums[i]\n pos += 1\n\n for i in xrange(pos, len(nums)):\n nums[i] = 0\n\n\na = [0, 1, 0, 13, 12]\nSolution().moveZeroes2(a)\nprint(a)\n","sub_path":"Python/move-zeroes.py","file_name":"move-zeroes.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"498625834","text":"# -*- coding: utf-8 -*-\r\nimport subprocess\r\nfrom multiprocessing import Pool\r\nimport itertools\r\nimport os\r\n\r\n'''\r\nこのプログラムは実行スクリプトと同じディレクトリに入れてください.\r\n実行されるコマンドは\r\njava SCRIPT USERNUM REGULARIZE RANK STEPSIZE\r\nです.\r\n標準出は\r\nresult_USERNUM_REGULARIZE_RANK_STEPSIZE.txt\r\nに出力されます.気に入らない場合は書き換えてください.\r\n'''\r\n\r\nfilter_width = [2, 4, 5, 8]\r\n\r\nfilter_count = [7, 25, 49, 98, 124, 145, 186]\r\n\r\nfstride_x = [1]\r\n\r\nfstride_y = [2, 4]\r\n\r\nframe_size = [30, 40, 80, 100]\r\n\r\nframe_stride = [10, 20, 40, 50]\r\n\r\nmfcc_bins = [10, 12, 16, 20, 36, 40]\r\n\r\n\r\n# 実行スクリプト\r\nfile_name = 'train.py'\r\n\r\ndef f(args):\r\n str_args = list(map(str, args))\r\n if not os.path.exists('result_{}.txt'.format('_'.join(str_args))):\r\n with open('result_{}.txt'.format('_'.join(str_args)), 'w') as f:\r\n subprocess.call(['python', file_name, \"--model_architecture\", \"low_latency_conv_tune\", \"--model_size_info\"]+str_args, stdout=f)\r\n return True\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # 引数の順番でおねがいします\r\n params = itertools.product(filter_width, filter_count, fstride_x, fstride_y, frame_size, frame_stride, mfcc_bins)\r\n\r\n # デフォルト並列数は4に設定しています\r\n p = Pool(1)\r\n p.map(f, params)\r\n","sub_path":"executer.py","file_name":"executer.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"128671387","text":"import random as r\n\narray = [r.randint(1, 50) for i in range(30)]\nprint(array)\nfor i in range(1, len(array)):\n tmp = array[i]\n j = i\n while j > 0 and tmp < array[j -1]:\n array[j] = array[j - 1]\n j -= 1\n array[j] = tmp\nprint(array)\n","sub_path":"Python_examples/sorts/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"286777196","text":"# -*- coding: UTF-8 -*-\n\nfrom Ui_ReplayWidget import *\nfrom testdata import *\n\n\nif __name__==\"__main__\":\n app = QtGui.QApplication(sys.argv)\n scene = QtGui.QGraphicsScene()\n view = Ui_2DReplayWidget(scene)\n view.setBackgroundBrush(QtGui.QColor(0, 0, 0))\n view.Initialize(iniInfo, begInfo0)\n view.UpdateEndData(cmd0, endInfo0)#bug(OK)\n view.UpdateBeginData(begInfo1)\n view.UpdateEndData(cmd1, endInfo1)\n view.GoToRound(1, 0)\n cursor = Ui_TargetCursor()\n scene.addItem(cursor)\n cursor.setPos(QtCore.QPointF(0, 0))\n \n #view.ShowMoveAnimation()#bug\n view.show()\n sys.exit(app.exec_())\n","sub_path":"Ui_2DReplay/testfile.py","file_name":"testfile.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"271627750","text":"from datetime import datetime\nimport socket\n\nsock = socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n\nPORT = 3000\n\naddress = ('127.0.0.1', PORT)\n\nsock.bind(address)\n\ntry: \n sock.listen(1)\n print('--- Starting server on port {} at {} ---'.format(PORT, datetime.now().strftime('%H:%M:%S %d-%m-%y')))\n conn, addr=sock.accept()\n\n buffer_length = 8\n\n message_complete = False\n\n message = ''\n\n while not message_complete:\n part = conn.recv(buffer_length)\n message += part\n if len(part) < buffer_length:\n break\n\n message = message.decode('utf8')\n print('{} Echoed: {}'.format(\n datetime.now().strftime('%H:%M:%S %d-%m-%y'), message))\n \n conn.sendall(message.encode('utf8'))\n\nexcept KeyboardInterrupt:\n try:\n conn.close()\n except NameError:\n pass\n\n sock.close()\n print('--- Stopping server on port {} at {} ---'.format(PORT,\n datetime.now().strftime('%H:%M:%S %d-%m-%y')))\n\n\nconn.close()\nsock.close()\nprint('--- Stopping server on port {} at {} ---'.format(PORT,\n datetime.now().strftime('%H:%M:%S %d-%m-%y')))\n\n","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"421153236","text":"import pandas as pd\nfrom datetime import datetime\nfrom functools import reduce\ncurrentMonth = datetime.now().month\nfrom datetime import date\nlastyear_lastday = date(date.today().year-1, 12, 31)\n\n\ncurrent_month_file_path = 'Y:/款项登记/18年回款登记/2018-{}月回款/2018-{}月回款.xlsx'.format(currentMonth, currentMonth)\ndf_currentMonth = pd.read_excel(current_month_file_path, sheet_name='2018年{}月'.format(currentMonth))\ndf_currentMonth['收款日期'] = pd.to_datetime(df_currentMonth['收款日期'], format='%Y-%m-%d')\ndf_previous = pd.read_excel('Y:/款项登记/好视通客户成交管理-总表 .xlsx', sheet_name='2018年')\n# budget\nbudget = pd.read_excel('Y:/款项登记/18年回款登记/业绩目标.xlsx', sheet_name=0)\n# 离职人员名单\nexit_employees = pd.read_excel('Y:/款项登记/18年回款登记/2018年入职登记表.xlsx', sheet_name='离职登记')\nexit_employees = exit_employees[['离职日期','部门','姓名']]\nexit_employees.离职日期 = pd.to_datetime(exit_employees['离职日期'], format='%Y-%m-%d')\nexit_employees = exit_employees[(exit_employees['离职日期'].dt.month == currentMonth-1)]\nexit_employee_department = set(exit_employees.部门.unique())\nexit_employee_name = set(exit_employees.姓名.unique())\n\n\n# helper function: return report columns\ndef make_column(targetColumn, columnName, data):\n df_target = data[['所属部门', targetColumn]].groupby('所属部门').sum()/10000\n df_target = df_target.reset_index()\n df_target = df_target.rename(index=str, columns={targetColumn: columnName})\n df_target = df_target.round(2)\n return df_target\n\n\ndef get_time_series():\n df_currentMonth = pd.read_excel(current_month_file_path, sheet_name='2018年{}月'.format(currentMonth))\n df_previous = pd.read_excel('Y:/款项登记/好视通客户成交管理-总表 .xlsx', sheet_name='2018年')\n df_ytd = df_previous.append(df_currentMonth)\n df_ytd['收款日期'] = pd.to_datetime(df_ytd['收款日期'], format='%Y-%m-%d')\n df_ytd.index = df_ytd['收款日期']\n df_ytd = df_ytd[(df_ytd['收款日期'] > lastyear_lastday)]\n\n # df_ytd['month'] = df_ytd['收款日期'].dt.strftime('%m')\n df_ytd['week'] = df_ytd['收款日期'].dt.strftime('%W')\n # df_ytd['day'] = df_ytd['收款日期'].dt.strftime('%d')\n\n df_ytd_week = df_ytd.groupby(['week']).sum()\n df_ytd_week = df_ytd_week.reset_index()\n df_ytd_week = df_ytd_week[['week','净现金业绩','本期收款']]\n\n return df_ytd_week\n\ndf_ytd_week = get_time_series()\n\n\n\ndef get_汇总表():\n # 本月收款\n 本月收款 = make_column('本期收款', '本月收款', df_currentMonth)\n # 软件金额\n 软件金额 = make_column('净现金业绩', '软件金额', df_currentMonth)\n # 单数_新签代理\n 单数_新签代理 = df_currentMonth[['所属部门', '新单数']].groupby('所属部门').sum()\n 单数_新签代理 = 单数_新签代理.reset_index()\n # 当日业绩\n df_currentMonth['收款日期'] = pd.to_datetime(df_currentMonth['收款日期'], format='%Y-%m-%d')\n df_today = df_currentMonth.loc[(df_currentMonth.收款日期 == datetime.today().strftime(\"%Y-%m-%d\"))]\n 当日业绩 = make_column('净现金业绩', '当日业绩', df_today)\n\n currentMonth_column_name = '{}月'.format(currentMonth)\n budget_currentMonth = budget[['部门', currentMonth_column_name, '2018年预测']]\n budget_currentMonth = budget_currentMonth.rename(index=str,\n columns={'部门': '所属部门', budget_currentMonth.columns[1]: '月度任务'})\n # 之前月份累计收款\n 之前月总收款 = make_column('本期收款', '之前月总收款', df_previous)\n # 之前月总业绩\n 之前月总业绩 = make_column('净现金业绩', '之前月总业绩', df_previous)\n # 合并表\n dfs = [软件金额, budget_currentMonth, 单数_新签代理, 本月收款, 当日业绩, 之前月总收款, 之前月总业绩]\n 合并表 = reduce(lambda left, right: pd.merge(left, right, on='所属部门', how='left'), dfs)\n 合并表 = 合并表.fillna(0)\n\n # 年度累计收款\n 合并表['年度累计收款'] = 合并表['本月收款'] + 合并表['之前月总收款']\n # 年度累计业绩\n 合并表['年度累计业绩'] = 合并表['软件金额'] + 合并表['之前月总业绩']\n 汇总 = 合并表.round(2)\n 汇总 = 汇总.append(汇总.sum(numeric_only=True), ignore_index=True)\n 汇总.iloc[-1, 0] = '汇总'\n 汇总['月度完成率(%)'] = 汇总['软件金额'].div(汇总['月度任务']).multiply(100).round(2)\n 汇总['年度达成率(%)'] = 汇总['年度累计业绩'].div(汇总['2018年预测']).multiply(100).round(2)\n 汇总 = 汇总.round(2)\n 汇总 = 汇总.astype(str)\n 汇总 = 汇总.replace('inf',0)\n 汇总 = 汇总.replace('nan', 0)\n 汇总 = 汇总.replace('0.0', 0)\n 汇总['月度完成率(%)'] = 汇总['月度完成率(%)'].astype(float)\n return 汇总\n\n汇总 = get_汇总表()\n\n\n\ndef get_个人业绩YTD():\n # df_currentMonth = pd.read_excel(current_month_file_path, sheet_name='2018年{}月'.format(currentMonth))\n 当月业绩 = df_currentMonth[['销售人员', '净现金业绩', '所属部门', '新单数']].groupby(['所属部门', '销售人员']).sum()\n 当月业绩 = 当月业绩.reset_index()\n 当月业绩 = 当月业绩.rename(index=str, columns={'净现金业绩': '{}月净现金业绩'.format(currentMonth)})\n\n # 获取:个人排名_前一个月份业绩 列表\n # 获取:个人排名_前一个月份业绩 列表\n i = 1\n df_list = []\n while i < currentMonth:\n previous_month_file_path = 'Y:/款项登记/18年回款登记/2018-{}月回款/2018-{}月回款.xlsx'.format(currentMonth - i,\n currentMonth - i)\n last_month_df = pd.read_excel(previous_month_file_path, sheet_name='2018年{}月'.format(currentMonth - i))\n last_month_df = last_month_df[['所属部门', '销售人员', '净现金业绩']].groupby(['所属部门', '销售人员']).sum()\n last_month_df = last_month_df.reset_index()\n last_month_df = last_month_df.rename(index=str, columns={'净现金业绩': '{}月净现金业绩'.format(currentMonth - i)})\n # last_month_df = last_month_df.reset_index()\n df_list.append(last_month_df)\n i += 1\n\n 前单月业绩合并表 = reduce(lambda left, right: pd.merge(left, right, left_on=['所属部门', '销售人员'],\n right_on=['所属部门', '销售人员'], how='outer'), df_list)\n 个人业绩YTD = pd.merge(当月业绩, 前单月业绩合并表, left_on=['所属部门', '销售人员'],\n right_on=['所属部门', '销售人员'], how='outer')\n 个人业绩YTD['1-{}月净现金业绩'.format(currentMonth)] = 个人业绩YTD.sum(numeric_only=True, axis=1)\n 个人业绩YTD = 个人业绩YTD.reset_index()\n 个人业绩YTD = 个人业绩YTD.round(2)\n # 个人业绩YTD.iloc[:, 2:] = 个人业绩YTD.iloc[:, 2:].astype(float)\n 个人业绩YTD = 个人业绩YTD.fillna(0)\n 个人业绩YTD = 个人业绩YTD.drop(columns=['index'])\n 个人业绩YTD = 个人业绩YTD.sort_values(['所属部门','{}月净现金业绩'.format(currentMonth)], ascending=[True,False])\n 个人业绩YTD = 个人业绩YTD[~个人业绩YTD['销售人员'].isin(exit_employee_name) & ~个人业绩YTD['销售人员'].isin(exit_employee_department)]\n return 个人业绩YTD\n\n个人业绩YTD = get_个人业绩YTD()\n\n\n\ndef get_业绩排名表():\n x = 0\n df_list_2 = []\n 累计排名 = 个人业绩YTD[['所属部门','销售人员', '1-{}月净现金业绩'.format(currentMonth)]]\n 累计排名['YTD累计排名'] = 累计排名['1-{}月净现金业绩'.format(currentMonth)].rank(ascending=False)\n 累计排名 = 累计排名.drop(columns=['1-{}月净现金业绩'.format(currentMonth)])\n 累计排名.YTD累计排名 = 累计排名.YTD累计排名.fillna(累计排名.YTD累计排名.max() + 2)\n\n df_list_2.append(累计排名)\n while x < currentMonth:\n previous_month_file_path='Y:/款项登记/18年回款登记/2018-{}月回款/2018-{}月回款.xlsx'.format(currentMonth-x,\n currentMonth - x)\n last_month_df = pd.read_excel(previous_month_file_path, sheet_name='2018年{}月'.format(currentMonth - x))\n last_month_df = last_month_df[['销售人员', '净现金业绩']].groupby(['销售人员']).sum()\n last_month_df['{}月排名'.format(currentMonth - x)] = last_month_df['净现金业绩'].rank(ascending=False)\n last_month_df = last_month_df.drop(columns=['净现金业绩'])\n last_month_df = last_month_df.reset_index()\n df_list_2.append(last_month_df)\n x += 1\n\n 业绩排名表 = reduce(lambda left, right: pd.merge(left, right, on='销售人员', how='outer' ), df_list_2)\n 业绩排名表 = 业绩排名表.loc[业绩排名表['所属部门'] != '售后部']\n 业绩排名表 = 业绩排名表.fillna(0)\n 业绩排名表 = 业绩排名表.sort_values(['所属部门','{}月排名'.format(currentMonth)], ascending=[True,True])\n for column in 业绩排名表.ix[:, 2:]:\n 业绩排名表[column] = 业绩排名表[column].fillna(int(业绩排名表[column].max()) + int(1))\n\n 业绩排名表 = 业绩排名表[~业绩排名表['销售人员'].isin(exit_employee_name) & ~业绩排名表['销售人员'].isin(exit_employee_department)]\n return 业绩排名表\n\n业绩排名表 = get_业绩排名表()\n\n\n\ndef get_办事处汇总表():\n # sales_by_team = 个人业绩YTD[['所属部门','{}月净现金业绩'.format(currentMonth),'1-{}月净现金业绩'.format(currentMonth)]]\n # sales_by_team = sales_by_team.groupby(['所属部门']).sum()/10000\n # sales_by_team = sales_by_team.reset_index()\n subset_汇总 = 汇总[['所属部门','软件金额','之前月总业绩','月度任务','月度完成率(%)']]\n subset_汇总 = subset_汇总.rename(index=str, columns={'软件金额': '{}月净现金业绩'.format(currentMonth)})\n # sales_by_team = pd.merge(sales_by_team, subset_汇总, on=['所属部门'])\n sales_by_team = subset_汇总.round(2)\n return sales_by_team\n\nsales_by_team = get_办事处汇总表()\n\n# export data files\n\nencoding='gbk'\nsep=','\nexport_path = 'C:/Users/Administrator/Desktop/db2/'\n\ndf_ytd_week.to_csv(export_path+'df_ytd_week.csv',encoding=encoding,index=False)\n汇总.to_csv(export_path+'total.csv', encoding=encoding, sep=',', index=False)\n个人业绩YTD.to_csv(export_path+'sales_figures.csv', encoding=encoding, index=False)\n业绩排名表.to_csv(export_path+'sales_ranking.csv',encoding=encoding,index=False)\nsales_by_team.to_csv(export_path+'sales_by_team.csv',encoding=encoding,index=False)","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":11002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"576063641","text":"import mxnet as mx\nimport numpy as np\nimport bvh_reader as br\nimport bvh_writer as bw\nimport customizedRNN as RNN # JG CustomizedRNN.py\nimport os\nimport time\nfrom collections import OrderedDict\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\nprint(\"<<>>\")\n\n\ndef encoder(layer_number=1, hidden_number=500, Dropout_rate=0.2, Zoneout_rate=0.0, cell='gru'):\n print(\"<<>>\")\n cell_type = cell\n Muilti_cell = mx.rnn.SequentialRNNCell()\n\n for i in range(layer_number):\n\n if cell_type == 'gru' or cell_type == 'GRU' or cell_type == 'Gru':\n\n if Zoneout_rate > 0:\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = mx.rnn.ZoneoutCell(\n mx.rnn.GRUCell(num_hidden=hidden_number, prefix=\"gru_encoder_{}\".format(i)),\n zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n else:\n Muilti_cell.add(mx.rnn.GRUCell(num_hidden=hidden_number, prefix=\"gru_encoder_{}\".format(i)))\n\n print(\"stack {}-'{}'- decoder cell\".format(i, cell_type))\n\n elif cell_type == 'lstm' or cell_type == 'LSTM' or cell_type == 'Lstm':\n\n if Zoneout_rate > 0:\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = mx.rnn.ZoneoutCell(\n mx.rnn.LSTMCell(num_hidden=hidden_number, prefix=\"lstm_encoder_{}\".format(i)),\n zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n else:\n Muilti_cell.add(mx.rnn.LSTMCell(num_hidden=hidden_number, prefix=\"lstm_encoder_{}\".format(i)))\n\n print(\"stack {}-'{}'- decoder cell\".format(i, cell_type))\n\n else:\n\n if Zoneout_rate > 0:\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = mx.rnn.ZoneoutCell(\n mx.rnn.RNNCell(num_hidden=hidden_number, prefix=\"rnn_encoder_{}\".format(i)),\n zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n else:\n Muilti_cell.add(mx.rnn.RNNCell(num_hidden=hidden_number, prefix=\"rnn_encoder_{}\".format(i)))\n\n print(\"stack {}-'{}'- encoder cell\".format(i, cell_type))\n\n if Dropout_rate > 0 and (layer_number - 1) > i:\n Muilti_cell.add(mx.rnn.DropoutCell(Dropout_rate, prefix=\"dropout_encoder_{}\".format(i)))\n print(\"stack {}-'{}'- encoder dropout cell\".format(i, cell_type))\n\n print(\"\\n\")\n return Muilti_cell\n\n\n'''Decoder'''\n\n\ndef decoder(layer_number=1, hidden_number=500, output_number=100, Dropout_rate=0.2, Zoneout_rate=0.0, Residual=True,cell='gru'):\n print(\"<<>>\")\n cell_type = cell\n Muilti_cell = RNN.SequentialRNNCell()\n\n for i in range(layer_number):\n\n if cell_type == 'gru' or cell_type == 'GRU' or cell_type == 'Gru':\n\n if Residual == True and Zoneout_rate > 0:\n print(\"residualcell applied_{}\".format(i))\n Residual_cell = RNN.ResidualCell(\n RNN.GRUCell(num_hidden=hidden_number, num_output=output_number, prefix=\"gru_decoder_{}\".format(i)))\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = RNN.ZoneoutCell(Residual_cell, zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n elif Residual == True and Zoneout_rate == 0:\n print(\"residualcell applied_{}\".format(i))\n Residual_cell = RNN.ResidualCell(\n RNN.GRUCell(num_hidden=hidden_number, num_output=output_number, prefix=\"gru_decoder_{}\".format(i)))\n Muilti_cell.add(Residual_cell)\n\n elif Residual == False and Zoneout_rate > 0:\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = RNN.ZoneoutCell(\n RNN.GRUCell(num_hidden=hidden_number, num_output=output_number, prefix=\"gru_decoder_{}\".format(i)),\n zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n else:\n Muilti_cell.add(\n RNN.GRUCell(num_hidden=hidden_number, num_output=output_number, prefix=\"gru_decoder_{}\".format(i)))\n\n print(\"stack {}-'{}'- decoder cell\".format(i, cell_type))\n\n elif cell_type == 'lstm' or cell_type == 'LSTM' or cell_type == 'Lstm':\n\n if Residual == True and Zoneout_rate > 0:\n print(\"residualcell applied_{}\".format(i))\n Residual_cell = RNN.ResidualCell(RNN.LSTMCell(num_hidden=hidden_number, num_output=output_number,\n prefix=\"lstm_decoder_{}\".format(i)))\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = RNN.ZoneoutCell(Residual_cell, zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n elif Residual == True and Zoneout_rate == 0:\n print(\"residualcell applied_{}\".format(i))\n Residual_cell = RNN.ResidualCell(RNN.LSTMCell(num_hidden=hidden_number, num_output=output_number,\n prefix=\"lstm_decoder_{}\".format(i)))\n Muilti_cell.add(Residual_cell)\n\n elif Residual == False and Zoneout_rate > 0:\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = RNN.ZoneoutCell(RNN.LSTMCell(num_hidden=hidden_number, num_output=output_number,\n prefix=\"lstm_decoder_{}\".format(i)),\n zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n else:\n Muilti_cell.add(RNN.LSTMCell(num_hidden=hidden_number, num_output=output_number,\n prefix=\"lstm_decoder_{}\".format(i)))\n\n print(\"stack {}-'{}'- decoder cell\".format(i, cell_type))\n\n else:\n\n if Residual == True and Zoneout_rate > 0:\n print(\"residualcell applied_{}\".format(i))\n Residual_cell = RNN.ResidualCell(\n RNN.RNNCell(num_hidden=hidden_number, num_output=output_number, activation='relu',\n prefix=\"rnn_decoder_{}\".format(i)))\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = RNN.ZoneoutCell(Residual_cell, zoneout_outputs=Zoneout_rate, zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n elif Residual == True and Zoneout_rate == 0:\n print(\"residualcell applied_{}\".format(i))\n Residual_cell = RNN.ResidualCell(\n RNN.RNNCell(num_hidden=hidden_number, num_output=output_number, activation='relu',\n prefix=\"rnn_decoder_{}\".format(i)))\n Muilti_cell.add(Residual_cell)\n\n elif Residual == False and Zoneout_rate > 0:\n print(\"zoneoutcell applied-{}\".format(i))\n Zoneout_cell = RNN.ZoneoutCell(\n RNN.RNNCell(num_hidden=hidden_number, num_output=output_number, activation='relu',\n prefix=\"rnn_decoder_{}\".format(i)), zoneout_outputs=Zoneout_rate,\n zoneout_states=Zoneout_rate)\n Muilti_cell.add(Zoneout_cell)\n\n else:\n Muilti_cell.add(RNN.RNNCell(num_hidden=hidden_number, num_output=output_number, activation='relu',\n prefix=\"rnn_decoder_{}\".format(i)))\n\n print(\"stack {}-'{}'- decoder cell\".format(i, cell_type))\n\n if Dropout_rate > 0 and (layer_number - 1) > i:\n Muilti_cell.add(RNN.DropoutCell(Dropout_rate, prefix=\"dropout_decoder_{}\".format(i)))\n print(\"stack {}-'{}'- decoder dropout cell\".format(i, cell_type))\n\n print(\"\\n\")\n return Muilti_cell\n\ndef MotionNet(epoch=None , batch_size=None , save_period=None , cost_limit=None ,\n optimizer=None, learning_rate=None , lr_step=None , lr_factor=None , stop_factor_lr=None , use_gpu=True ,\n TEST=None , num_layer=None , cell=None, hidden_unit=None ,time_step = None , seed_timestep = None , batch_Frame= None , frame_time=None, graphviz=None):\n\n print(\"-------------------Motion Net-------------------\")\n\n '''1. Data_Loading - bvh_reader'''\n Normalization_factor, train_motion, train_label_motion , seed_timestep, pre_timestep, column, file_directory = br.Motion_Data_Preprocessing(time_step , seed_timestep , batch_Frame , TEST)\n\n if TEST==True:\n print(\"\")\n data = OrderedDict()\n data['seed_motion'] = train_motion\n label = {'label_motion': train_label_motion}\n\n test_iter = mx.io.NDArrayIter(data=data, label=label)\n\n else:\n print(\"\")\n data = OrderedDict()\n data['seed_motion'] = train_motion\n label = {'label_motion': train_label_motion}\n\n train_iter = mx.io.NDArrayIter(data=data, label=label, batch_size=batch_size, shuffle=False, last_batch_handle='pad') # Motion data is complex and requires sequential learning to learn from easy examples. So shuffle = False ->In here, not using sequential learning\n\n if use_gpu:\n ctx = mx.gpu(0)\n else:\n ctx = mx.cpu(0)\n\n '''2. Network'''\n all_motion = mx.sym.Variable('seed_motion')\n label_motion = mx.sym.Variable('label_motion')\n\n seed_motion = mx.sym.slice_axis(data=all_motion , axis=1 , begin = 0 , end = seed_timestep) # (batch , time , column)\n\n if TEST == True:\n pre_motion = mx.sym.reshape(mx.sym.slice_axis(data=all_motion, axis=1, begin=seed_timestep, end=seed_timestep + 1),shape=(1, -1)) # (batch=1,column) - first frame\n else:\n pre_motion = mx.sym.slice_axis(data=all_motion, axis=1, begin=seed_timestep-1, end=-1)\n\n print(\"-------------------Network Shape--------------------\")\n e_cell = encoder(layer_number= num_layer , hidden_number=hidden_unit , Dropout_rate=0.0 , Zoneout_rate=0.0 , cell=cell)\n d_cell = decoder(layer_number= num_layer , hidden_number=hidden_unit , output_number=column , Dropout_rate=0.0 , Zoneout_rate=0.0 , Residual = True , cell=cell)\n print(\"\\n\")\n\n _ , e_state = e_cell.unroll(length=seed_timestep , inputs=seed_motion , merge_outputs=True , layout='NTC')\n\n #seq2seq in test\n if TEST==True:\n # customized by JG\n if num_layer == 1:\n d_output, _ = d_cell.SingleLayer_feed_previous_unroll(length=pre_timestep, begin_state=e_state,inputs=pre_motion, merge_outputs=True,layout='NTC') # MultiLayer_feed_previous_unroll is also possible.\n else:\n d_output, _ = d_cell.MultiLayer_feed_previous_unroll(length=pre_timestep, begin_state=e_state,inputs=pre_motion, merge_outputs=True,layout='NTC')\n\n #seq2seq in training\n else:\n d_output , _ = d_cell.unroll(length=pre_timestep , begin_state=e_state , inputs = pre_motion, merge_outputs=True, layout='NTC')\n\n output = mx.sym.LinearRegressionOutput(data = d_output , label=label_motion)\n\n digraph=mx.viz.plot_network(symbol=output,hide_weights=True)\n\n #why? batch_Frame>=10 ? -> graph 'plot' size too small for label\n if graphviz==True and TEST == True and batch_Frame>=10:\n digraph.view(\"{}_batch_Frame_TEST_Seq2Seq\".format(batch_Frame)) #show graph\n elif graphviz==True and TEST == False and batch_Frame>=10:\n digraph.view(\"{}_batch_Frame_Training_Seq2Seq\".format(batch_Frame)) #show graph\n\n print(\"-------------------Network Learning Parameter--------------------\")\n print(output.list_arguments())\n print(\"\\n\")\n\n if TEST == False:\n mod = mx.module.Module(symbol=output, data_names=['seed_motion'], label_names=['label_motion'], context=ctx)\n print(\"-------------------Network Data Name--------------------\")\n print(mod.data_names)\n print(mod.label_names)\n print(\"\\n\")\n else:\n # test mod\n test_mod = mx.mod.Module(symbol=output , data_names=['seed_motion'] , label_names=['label_motion'] , context=ctx)\n print(\"-------------------Network Data Name--------------------\")\n print(test_mod.data_names)\n print(test_mod.label_names)\n print(\"\\n\")\n\n\n print(\"-------------------Network Data Shape--------------------\")\n if TEST==False:\n print(train_iter.provide_data)\n print(train_iter.provide_label)\n else:\n print(test_iter.provide_data)\n print(test_iter.provide_label)\n print(\"\\n\")\n '''\n grad_req (str, list of str, dict of str to str) – \n Requirement for gradient accumulation. Can be ‘write’, ‘add’, or ‘null’ \n (default to ‘write’). Can be specified globally (str) or for each argument (list, dict).\n '''\n if TEST == False:\n\n mod.bind(data_shapes=train_iter.provide_data , label_shapes=train_iter.provide_label , for_training=True , shared_module=None , inputs_need_grad=False , force_rebind=False , grad_req='write')\n # weights load\n weights_path = 'weights/MotionNet-{}.params'.format(save_period)\n\n if os.path.exists(weights_path):\n mod.load_params(weights_path)\n else:\n mod.init_params(initializer=mx.initializer.Xavier(rnd_type='gaussian', factor_type='avg', magnitude=1))\n\n start_time=time.time()\n print(\"-------------------Learning Start--------------------\")\n\n if optimizer=='sgd':\n lr_sch = mx.lr_scheduler.FactorScheduler(step = lr_step, factor = lr_factor , stop_factor_lr = stop_factor_lr)\n mod.init_optimizer(optimizer=optimizer, optimizer_params={'learning_rate': learning_rate , 'lr_scheduler': lr_sch})\n else:\n mod.init_optimizer(optimizer=optimizer, optimizer_params={'learning_rate': learning_rate})\n\n metric = mx.metric.create(['mse'])\n\n for epoch in range(1, epoch + 1, 1):\n train_iter.reset()\n metric.reset()\n for batch in train_iter:\n mod.forward(batch, is_train=True)\n\n #Data Order Transform (N,T,C)\n mod.update_metric(metric,batch.label)\n\n mod.backward()\n mod.update()\n\n #print('epoch : {} , MSE : {}'.format(epoch,metric.get()))\n if epoch % 100 == 0:\n end_time=time.time()\n print(\"-------------------------------------------------------\")\n print(\"{}_learning time : {}\".format(epoch,end_time-start_time))\n print(\"-------------------------------------------------------\")\n\n if epoch % 10000 == 0:\n if not os.path.exists(\"weights\"):\n os.makedirs(\"weights\")\n\n print('Saving weights')\n mod.save_params(\"weights/MotionNet-{}.params\".format(epoch))\n\n cal = mod.predict(eval_data=train_iter , merge_batches=True , reset=True, always_output_list=False).asnumpy()\n cost = cal - train_label_motion\n cost=(cost**2)/2\n cost=np.mean(cost)\n print('{} epoch '.format(epoch), end='')\n print(\"Joint Angle Square Error : {}\".format(cost))\n\n if cost < cost_limit:\n\n if not os.path.exists(\"weights\"):\n os.makedirs(\"weights\")\n\n print('Saving weights')\n mod.save_params(\"weights/MotionNet-{}.params\".format(epoch))\n\n print(\"############################################################################################\")\n print(\"End the learning.\")\n print(\"############################################################################################\")\n\n #order : (N , T(all_time) , C)\n seed = train_iter.data[0][1].asnumpy()\n #order : (N , T(predict time) , C)\n prediction_motion = mod.predict(eval_data=train_iter, merge_batches=True, reset=True, always_output_list=False).asnumpy() / Normalization_factor\n\n '''Creating a bvh file with predicted values -bvh_writer'''\n bw.Motion_Data_Making(seed[:,:seed_timestep] / Normalization_factor, prediction_motion,\n seed_timestep, pre_timestep, batch_Frame, frame_time, file_directory,\n TEST)\n\n return \"optimization completed\"\n\n print(\"\\n\")\n\n print(\"-------------------Network Information--------------------\")\n print(mod.data_shapes)\n print(mod.label_shapes)\n print(mod.output_shapes)\n print(mod.get_params())\n print(mod.get_outputs())\n print(\"{} learning optimization completed\".format(epoch))\n print(\"\\n\")\n\n if TEST==True:\n\n test_mod.bind(data_shapes=test_iter.provide_data , label_shapes=test_iter.provide_label , for_training=False)\n\n # weights load\n weights_path = 'weights/MotionNet-{}.params'.format(save_period)\n if os.path.exists(weights_path):\n test_mod.load_params(weights_path)\n\n #order : (N , T(all time) , C)\n seed = test_iter.data[0][1].asnumpy()\n\n #order : (N , T(predict time) , C)\n prediction_motion = test_mod.predict(eval_data=test_iter , merge_batches=True , always_output_list=False).asnumpy()/Normalization_factor\n\n print(\"Test Prediction motion shape : {}\".format(np.shape(prediction_motion)))\n\n #test cost\n cost = prediction_motion - train_label_motion\n cost=(cost**2)/2\n cost=np.mean(cost)\n print(\"prediction error : {}\".format(cost))\n\n '''Creating a bvh file with predicted values -bvh_writer'''\n bw.Motion_Data_Making(seed[:,:seed_timestep] / Normalization_factor , prediction_motion , seed_timestep , pre_timestep , batch_Frame , frame_time , file_directory ,TEST)\n\n return \"learning completed\"\n\n else:\n\n print(\"Can not test\")\n\n\nif __name__ == \"__main__\":\n\n TEST=False\n\n #The following parameters must have the same value in 'training' and 'test' modes.\n num_layer=1 \n cell='lstm'\n hidden_unit=1000 \n time_step = 100 \n seed_timestep = 20 \n batch_Frame= 1 \n frame_time=30\n save_period=0\n\n '''Execution'''\n if TEST : \n\n completed=MotionNet(TEST=TEST , save_period=1 , num_layer=num_layer , cell=cell, hidden_unit=hidden_unit , time_step = time_step , seed_timestep = seed_timestep , batch_Frame= batch_Frame , frame_time=frame_time ,graphviz=True)\n print(completed)\n\n else:\n\n completed = MotionNet(epoch=300000 , batch_size=75 , save_period=save_period, cost_limit=0.1 ,\n optimizer='adam', learning_rate=0.001 , lr_step=5000, lr_factor=0.99, stop_factor_lr=1e-08 , use_gpu=True ,\n TEST=TEST , num_layer=num_layer , cell=cell , hidden_unit=hidden_unit , time_step = time_step , seed_timestep = seed_timestep , batch_Frame = batch_Frame , frame_time=frame_time , graphviz=True)\n print(completed)\n\nelse:\n print(\"MotionNet_imported\")","sub_path":"DeepHumanPrediction/Code/DeepHumanPrediction/Motion_Prediction_advanced_Seq2Seq_batchversion/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":19452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"649585471","text":"# This library handles queries over the Postgres database (connection and execution).\n\nimport psycopg2\nimport pandas as pd\nimport configuration\n\nclass DBInterface:\n def __init__(self, dataset):\n self.dataset = dataset\n\n def __enter__(self):\n self.conn = psycopg2.connect(\n # f\"host=localhost port=54321 dbname=intex user=adm_intex password=Dd7iKZUP3wYmE7pPvK\"\n f\"dbname={self.dataset}_reviews port=5432 user=behrooz password=212799\"\n )\n self.cur = self.conn.cursor()\n return self\n\n def __exit__(self, type, value, traceback):\n self.cur.close()\n self.conn.close()\n\n # Once the query string is determined, this function executes the query over the Postgres database.\n def execute_query(self, query_type, query_string, query_parameters=None):\n\n # In case the query is relevance retrival, the output should be a list.\n # In other cases, it should be a dictionary.\n results = {}\n\n self.cur.execute(query_string)\n query_results = self.cur.fetchall()\n\n # In case the query is rating retrieval, the outcome should be case to integer.\n if query_type in [\"get_ratings\"]:\n for query_result in query_results:\n results[query_result[0]] = int(query_result[1])\n\n # The case of key-value retrievals\n elif query_type in [\"get_reviews\", \"get_summaries\", \"topic_definition\"]:\n for query_result in query_results:\n results[query_result[0]] = query_result[1]\n\n # The case of list retrievals (whose list memebers should be seperated by a space)\n elif query_type in [\"get_topics\", \"get_sentiments\"]:\n for query_result in query_results:\n results[query_result[0]] = query_result[1].split(\" \")\n\n # All other normal cases\n else:\n for query_result in query_results:\n results = query_result\n\n # In an exceptional case, if the executed query yields no result, the function returns False.\n if len(results) == 0:\n return False\n\n return results\n\n # Most query executions in INTEX are \"select queries\". However, there exist a few manipulation\n # ... queries as well, i.e., \"insert\". These queries are handled by this function.\n def manipulation_query(self, topic_insert_query):\n\n self.cur.execute(topic_insert_query)\n self.conn.commit()\n return True\n\n # Each query is identified with a name, i.e., \"query_type\".\n # If some parameters are required to make the query specific, they are provided in \"parameters\" as a list.\n def get_query(self, query_type, parameters=None):\n\n query = \"\"\n\n # The query to retrieve elements relevant to an input element (in \"parameters[0]\"), order defined in \"parameters[1]\" and limit in \"parameters[2]\"\n if query_type == \"relevance\":\n query = \"select rev1, rev2 from review_sim where (rev1=\" + str(parameters[0]) + \" or rev2=\" + str(\n parameters[0]) + \") order by \"+parameters[1]+\" desc limit \"+str(parameters[2])+\";\"\n\n # The query to retrieve reviews ratings for a given set of elements (indicated in \"parameters[0]\")\n elif query_type == \"get_ratings\":\n query = \"select id, overall from reviews where in_sample = TRUE and id in (\" + \\\n parameters[0]+\")\"\n\n # The query to retrieve the textual content of reviews for a given set of elements (indicated in \"parameters[0]\")\n elif query_type == \"get_reviews\":\n query = \"select id, reviewtext from reviews where in_sample = TRUE and id in (\" + \\\n parameters[0]+\")\"\n\n # The query to retrieve summaries for a given set of elements (indicated in \"parameters[0]\")\n elif query_type == \"get_summaries\":\n query = \"select id, summary from reviews where in_sample = TRUE and id in (\" + \\\n parameters[0]+\")\"\n\n # The query to get item (product) information under review (given the review_id in \"parameters[0]\")\n elif query_type == \"product_info\":\n query = \"select t2.asin, t2.title, t2.category, t2.price from reviews t1, items t2 where t1.id = \" + \\\n str(parameters[0])+\" and t1.asin = t2.asin;\"\n\n # The query to get review information for the review whose id is indicated in \"parameters[0]\"\n elif query_type == \"reviews_info\":\n query = \"select asin, overall, summary, reviewtext from reviews where id = \" + \\\n str(parameters[0])\n\n # The query to get the sentiment values for the review whose id is indicated in \"parameters[0]\"\n elif query_type == \"sentiment\":\n query = \"select rev_sentiment from sentiment where rid = \" + \\\n str(parameters[0])\n\n # The query to get the topic values for the review whose id is indicated in \"parameters[0]\"\n elif query_type == \"topic\":\n query = \"select topics from topic where rid = \"+str(parameters[0])\n\n # The query to get the topic values of ALL reviews.\n elif query_type == \"get_topics\":\n query = \"select rid, topics from topic;\"\n\n # The query to get the sentiment values of ALL reviews.\n elif query_type == \"get_sentiments\":\n query = \"select rid, rev_sentiment from sentiment;\"\n\n elif query_type == \"topic_insert\":\n query = \"insert into topic (rid, topics) values (\" + \\\n str(parameters[0])+\", '\"+parameters[1]+\"');\"\n\n elif query_type == \"topic_similarity_insert\":\n query = \"update review_sim set topic_sim = \" + \\\n str(parameters[0])+\" where rev1 = \"+str(parameters[1]\n )+\" and rev2 = \"+str(parameters[2])+\";\"\n elif query_type == \"tag_insert\":\n query = \"insert into tag (rid, tags) values (\" + \\\n str(parameters[0])+\", '\"+parameters[1]+\"');\"\n elif query_type == \"sentiment_insert\":\n query = \"insert into sentiment (rid, rev_sentiment) values (\"+str(\n parameters[0])+\", '\"+parameters[1]+\"');\"\n\n elif query_type == \"sentiment_similarity_insert\":\n query = \"update review_sim set sentiment = \" + \\\n str(parameters[0])+\" where rev1 = \"+str(parameters[1]\n )+\" and rev2 = \"+str(parameters[2])+\";\"\n elif query_type == \"text_similarity_insert\" and parameters[3] == \"summary\":\n query = \"update review_sim set summary_sim = \" + \\\n str(parameters[0]) + \"where rev1 = \" + str(parameters[1]\n ) + \" and rev2 = \" + str(parameters[2]) + \";\"\n elif query_type == \"text_similarity_insert\" and parameters[3] == \"review\":\n query = \"insert into review_sim (rev1,rev2,sim) values (\"+str(\n parameters[1])+\",\"+str(parameters[2])+\",\"+str(parameters[0])+\");\"\n\n elif query_type == \"previous_sample_remove\":\n query = \"update reviews set in_sample = FALSE;\"\n elif query_type == \"sample_mark\":\n query = \"update reviews set in_sample = TRUE where id = \" + \\\n str(parameters[0])+\";\"\n elif query_type == \"keywords\":\n query = \"select id from elements as e inner join attributeswhere reviewtext like '\" + \\\n str(parameters[0])+\"' and in_sample = TRUE limit 1;\"\n elif query_type == \"random_review\":\n query = \"select id from reviews where in_sample = TRUE order by random() limit 1;\"\n elif query_type == \"session_existed\":\n query = \"select iterations_so_far from session where name = '\" + \\\n parameters[0]+\"';\"\n elif query_type == \"iteration_update\":\n query = \"update session set iterations_so_far = \" + \\\n str(parameters[0])+\" where name = '\"+parameters[1]+\"'\"\n elif query_type == \"session_create\":\n query = f\"\"\"insert into session (name, iterations_so_far, target) values \n ('{parameters[0]}',1,'{parameters[1]}');\"\"\"\n elif query_type == \"text_score\":\n query = \"select sim from review_sim where rev1 = \" + \\\n parameters[0]+\" and rev2 = \"+parameters[1]+\";\"\n elif query_type == \"summary_score\":\n query = \"select summary_sim from review_sim where rev1 = \" + \\\n parameters[0]+\" and rev2 = \"+parameters[1]+\";\"\n elif query_type == \"topic_score\":\n query = \"select topic_sim from review_sim where rev1 = \" + \\\n parameters[0]+\" and rev2 = \"+parameters[1]+\";\"\n elif query_type == \"sentiment_score\":\n query = \"select sentiment_sim from review_sim where rev1 = \" + \\\n parameters[0]+\" and rev2 = \"+parameters[1]+\";\"\n elif query_type == \"tag_score\":\n query = \"select tag_sim from review_sim where rev1 = \" + \\\n parameters[0]+\" and rev2 = \"+parameters[1]+\";\"\n elif query_type == \"target_retrieve\":\n query = \"select target from session where name = '\" + \\\n parameters[0]+\"';\"\n elif query_type == \"topic_definition_insert\":\n query = \"insert into topic_definition (topic_id, words) values (\"+str(\n parameters[0])+\", '\"+str(parameters[1])+\"')\"\n elif query_type == \"topic_definition_truncate\":\n query = \"truncate table topic_definition;\"\n elif query_type == \"topic_truncate\":\n query = \"truncate table topic;\"\n elif query_type == \"topic_definition\":\n query = \"select topic_id, words from topic_definition;\"\n elif query_type == \"iteration_register\":\n query = f\"\"\"insert into exploration_iteration \n (session_name, iteration, feedback, guidance, terminate) \n values ('{parameters[0]}',{parameters[1]},{parameters[2]},ARRAY{parameters[3]},{parameters[4]});\"\"\"\n elif query_type == \"get_tag\":\n query = \"select tags from tag where rid = \"+str(parameters[0])\n\n return query\n\n def get_item_id_from_keywords(self, keywords):\n self.cur.execute(\n f\"\"\"select e.id \n from elements as e \n inner join attributes as a on e.id = a.element_id and a.name = 'text' \n where e.type = 'item' and a.value like ' {keywords}' \n limit 1;\"\"\")\n result = self.cur.fetchone()\n return result[0] if result != None else None\n\n def get_item_count(self):\n self.cur.execute(\n f\"\"\"select count(e.id) \n from elements as e \n where e.type = 'item';\"\"\")\n return self.cur.fetchone()[0]\n\n def get_random_element_id(self, element_type=None):\n self.cur.execute(\n f\"\"\"select e.id \n from elements as e \n order by random() limit 1;\"\"\")\n result = self.cur.fetchone()\n return result[0] if result != None else None\n\n def get_random_element(self, element_type=None):\n return pd.read_sql(\n f\"\"\"select e.*\n from elements as e \n order by random() limit 1;\"\"\", self.conn).iloc[0]\n\n def get_element(self, element_id):\n return pd.read_sql(\n f\"\"\"select e.* \n from elements as e \n where e.id = {element_id};\"\"\", self.conn).iloc[0]\n\n def get_elements(self, element_ids):\n element_ids = str(element_ids).replace(\n '[', '(').replace(']', ')')\n return pd.read_sql(\n f\"\"\"select e.* \n from elements as e \n where e.id in {element_ids};\"\"\", self.conn)\n\n def get_element_pair_similarity(self, element1_id, element2_id):\n return pd.read_sql(\n f\"\"\"select * \n from similarities\n where (element1_id = {element1_id} and element2_id = {element2_id})\n or (element1_id = {element2_id} and element2_id = {element1_id});\"\"\", self.conn)\n\n def get_element_to_element_list_similarities(self, exploration_element_ids, element_ids):\n element_ids = str(element_ids).replace(\n '[', '(').replace(']', ')')\n exploration_element_ids = str(exploration_element_ids).replace(\n '[', '(').replace(']', ')')\n return pd.read_sql(\n f\"\"\"select * \n from similarities\n where (element1_id in {exploration_element_ids} and element2_id in {element_ids})\n or (element1_id in {element_ids} and element2_id in {exploration_element_ids});\"\"\", self.conn)\n\n def get_relevant_elements(self, element, relevance_function, k_prime):\n query = f\"\"\"select e.*, s.{relevance_function} \n from similarities as s\n inner join elements as e on e.id != {element.id} and (e.id = s.element1_id or e.id = s.element2_id) \n where ( s.element1_id = {element.id} or s.element2_id = {element.id} ) \"\"\"\n # and e.type = '{element_type}' \"\"\"\n\n if element.type == \"item\":\n query += f\"and (e.type = 'item' or e.item_id = {element.id})\"\n\n query += f\"\"\"\n order by s.{relevance_function} desc, e.id \n limit {k_prime};\"\"\"\n return pd.read_sql(query, self.conn)\n\n def get_episode_start_elements(self, k):\n order_by = \"id\"\n if configuration.environment_configurations[\"start_random\"]:\n order_by = \"random()\"\n return pd.read_sql(f\"\"\"SELECT * FROM elements order by {order_by} limit {k+1}; \"\"\", self.conn)\n\n def get_related_items(self, related_item_ids):\n if len(related_item_ids) == 0:\n return pd.DataFrame()\n else:\n related_item_ids = str(related_item_ids).replace(\n '[', '(').replace(']', ')')\n return pd.read_sql(\n f\"\"\"select e.* \n from elements as e\n where e.id in {related_item_ids};\"\"\", self.conn)\n\n def get_max_text_size(self):\n self.cur.execute(\n f\"select max(length(text)) from elements;\")\n return self.cur.fetchone()[0]\n\n def get_max_tag_count(self):\n self.cur.execute(\n f\"select max(array_length(tags,1)) from elements;\")\n return self.cur.fetchone()[0]\n\n def get_target_item_ids(self, query): # core items to select for the experiments\n target_item_ids = pd.read_sql(query, self.conn).item_id.to_list()\n return target_item_ids\n\n def get_target_ids(self, query):\n target_ids = pd.read_sql(query, self.conn).id.to_list()\n if configuration.learning_configurations[\"target_limit\"]:\n limit = configuration.learning_configurations[\"target_size\"]\n return target_ids[:limit]\n return target_ids\n","sub_path":"data/db_interface.py","file_name":"db_interface.py","file_ext":"py","file_size_in_byte":14888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"251153313","text":"import cv2\r\nimport numpy as np\r\nimport sys\r\n\r\nclass ImageProcessing():\r\n\t\r\n\t# global reimg, img_with_bounding\r\n\t\r\n\tdef __init__(self, img, feature):\r\n\t\t\r\n\t\t# sharpening\r\n\t\tgray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\t\tblurred = cv2.bilateralFilter(gray_image, 9, 75, 75)\r\n\t\tgray_image = cv2.addWeighted(gray_image, 1.5, blurred, -0.5, 0)\r\n\t\tgray_image = cv2.bilateralFilter(gray_image, 9, 75, 75)\r\n\t\r\n\t\t# Extract ROI\t\t\r\n\t\tx,y,w,h = cv2.boundingRect(gray_image) # (x=0,y=0,w=1920,h=1080)\r\n\t\t#print(\"x,y,w,h: \", x,y,w,h)\r\n\t\tself.roi = img[y+int(h/2.5):y + h-int(h/2.5) , x+int(w/2.5):x + w-int(w/2.5)]\r\n\t\t\r\n\t\t# Draw a bounding of ROI\r\n\t\tself.img_with_bounding = img.copy()\r\n\t\tcv2.rectangle(self.img_with_bounding, (x+int(w/2.5), y+int(h/2.5)), (x + w-int(w/2.5), y + h-int(h/2.5)), (255, 0, 0), 2)\r\n\t\t\r\n\t\t# Find Needle position\r\n\t\tself.gray_roi = cv2.cvtColor(self.roi, cv2.COLOR_BGR2GRAY)\r\n\t\tx,y,w,h = cv2.boundingRect(self.gray_roi)\r\n\t\t#print(\"xr,yr,wr,hr: \", x,y,w,h)\r\n\t\tself.needle_pose = np.array([[w/2, h/2]]) \r\n\t\t\r\n\t\t# Otsu's thresholding\r\n\t\tret,self.th = cv2.threshold(self.gray_roi,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\t\t\r\n\t\t# Otsu's thresholding after Gaussian filtering\r\n\t\tself.blur = cv2.GaussianBlur(self.gray_roi,(5,5),0)\r\n\t\tret2,self.th2 = cv2.threshold(self.blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\t\t\r\n\t\t# Morphological filtering\r\n\t\tkernel = np.ones((2,2),np.uint8)\r\n\t\tself.dilation = cv2.dilate(self.th,kernel,iterations = 1)\r\n\t\t#opening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)\r\n\t\t\r\n\t\tif feature == 'ORB':\r\n\t\t\t# Initiate ORB object\r\n\t\t\torb = cv2.ORB_create(nfeatures = 5000, scaleFactor = 1.1, nlevels = 10, scoreType = cv2.ORB_FAST_SCORE, patchSize = 100)\r\n\r\n\t\t\t# find the keypoints with ORB\r\n\t\t\tkeypoints = orb.detect(self.gray_roi, None)\r\n\r\n\t\t\t# compute the descriptors with ORB\r\n\t\t\tkeypoints, descriptors = orb.compute(self.gray_roi, keypoints)\r\n\t\t\tself.point2f = cv2.KeyPoint_convert(keypoints)\r\n\r\n\t\t\t# retval = cv2.ORB.getMaxFeatures(orb)\r\n\t\t\t# print('retval: ',retval)\r\n\t\t\t# print('number of Kp: ', len(keypoints))\r\n\t\telif feature == 'FAST':\r\n\t\t\t# Initiate FAST object with default values\r\n\t\t\tfast = cv2.FastFeatureDetector_create(10,True,2) \r\n\r\n\t\t\t# TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2\r\n\r\n\t\t\t# find and draw the keypoints\r\n\t\t\tkeypoints = fast.detect(self.th2, None)\r\n\t\t\tself.point2f = cv2.KeyPoint_convert(keypoints)\r\n\t\t\r\n\t\telif feature == 'BLOB':\r\n\t\t\t# Setup SimpleBlobDetector parameters.\r\n\t\t\tparams = cv2.SimpleBlobDetector_Params()\r\n\r\n\t\t\t# Change thresholds\r\n\t\t\tparams.minThreshold = 10\r\n\t\t\tparams.maxThreshold = 200\r\n\r\n\t\t\t# Filter by Area.\r\n\t\t\tparams.filterByArea = True\r\n\t\t\tparams.minArea = 5\r\n\r\n\t\t\t# Filter by Circularity\r\n\t\t\tparams.filterByCircularity = True\r\n\t\t\tparams.minCircularity = 0.1\r\n\r\n\t\t\t# Filter by Convexity\r\n\t\t\tparams.filterByConvexity = True\r\n\t\t\tparams.minConvexity = 0.5\r\n\r\n\t\t\t# Filter by Inertia\r\n\t\t\tparams.filterByInertia = True\r\n\t\t\tparams.minInertiaRatio = 0.01\r\n\r\n\t\t\t# Create a detector with the parameters\r\n\t\t\tver = (cv2.__version__).split('.')\r\n\t\t\tif int(ver[0]) < 3:\r\n\t\t\t\tdetector = cv2.SimpleBlobDetector(params)\r\n\t\t\telse:\r\n\t\t\t\tdetector = cv2.SimpleBlobDetector_create(params)\r\n\r\n\t\t\t# Detect blobs.\r\n\t\t\tkeypoints = detector.detect(self.gray_roi)\r\n\t\t\tself.point2f = cv2.KeyPoint_convert(keypoints)\r\n\t\t\r\n\t\telse:\r\n\t\t\tprint('Error in feature type')\r\n\t\t\tsys.exit(1)\r\n\t\t\t\r\n\t\t# draw only the location of the keypoints without size or orientation\r\n\t\tself.final_keypoints = cv2.drawKeypoints(self.roi, keypoints, None, color=(0,255,0), flags=0)\r\n\r\n\t\t# split channel\r\n\t\tb_channel, g_channel, r_channel = cv2.split(self.final_keypoints)\r\n\t\talpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 50 #creating a dummy alpha channel image.\r\n\t\timg_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))\r\n\t\t\r\n\t\t# Create new layers\r\n\t\tlayer_1 = np.zeros((h, w, 4))\r\n\t\tlayer_2 = np.zeros((h, w, 4))\r\n\t\t\r\n\t\t# Draw a blue line with thickness of 1 px on layer_1\r\n\t\tcv2.line(layer_1,(int(w/2)-20,int(h/2)),(int(w/2)+20,int(h/2)),(255,0,0,255),1)\r\n\t\tcv2.line(layer_1,(int(w/2),int(h/2)-20),(int(w/2),int(h/2)+20),(255,0,0,255),1)\r\n\t\t\r\n\t\t# cv2.line(layer_1,(int(w/2)-60,int(h/2)),(int(w/2)-20,int(h/2)),(255,0,0,255),1)\r\n\t\t# cv2.line(layer_1,(int(w/2)-40,int(h/2)-20),(int(w/2)-40,int(h/2)+20),(255,0,0,255),1)\r\n\t\t\r\n\t\t# Draw a red closed circle on layer_2\r\n\t\tcv2.circle(layer_2,(int(w/2),int(h/2)), 10, (0,0,255,255), 1)\r\n\t\t\r\n\t\t# copy the first layer into the resulting image\r\n\t\tself.reimg = img_BGRA[:] \r\n\t\t\r\n\t\t#overlay each drawing parts\r\n\t\tcnd = layer_1[:, :, 3] > 0\r\n\t\tself.reimg[cnd] = layer_1[cnd]\r\n\t\tcnd = layer_2[:, :, 3] > 0\r\n\t\tself.reimg[cnd] = layer_2[cnd]\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\tinput_image = cv2.imread(\"calibresult.jpg\")\r\n\timp = ImageProcessing(input_image, 'ORB')\r\n\t\r\n\tcv2.namedWindow('ORB keypoints', cv2.WINDOW_NORMAL)\r\n\tcv2.imshow('ORB keypoints', imp.reimg)\r\n\t\r\n\t# cv2.namedWindow('title', cv2.WINDOW_NORMAL)\r\n\t# cv2.imshow('title', imp.dilation)\r\n\t\r\n\tcv2.namedWindow('title', cv2.WINDOW_NORMAL)\r\n\tcv2.imshow('title', imp.img_with_bounding)\r\n\t\r\n\tcv2.waitKey(0)\r\n\tprint(imp.point2f)\r\n\tprint(imp.needle_pose)\r\n\tprint(type(imp.point2f))\r\n\tprint()\r\n\t# print(imp.roi.shape)\r\n\tprint(imp.reimg)\r\n\tprint(imp.reimg.shape)\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t","sub_path":"module_image.py","file_name":"module_image.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"24016175","text":"import sys\nfrom qtpy import QtWidgets\nfrom ui.mainwindow import Ui_MainWindow\n\n\napp = QtWidgets.QApplication(sys.argv)\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.setWindowTitle(\"Testanwendung\")\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.ui.pb_vergleiche.clicked.connect(self.on_pb_vergleiche_click)\n self.ui.pb_berechne_bmi.clicked.connect(self.on_pb_berechne_bmi_click)\n\n def on_pb_vergleiche_click(self):\n result = \"\"\n\n if self.ui.le_input1.text() == \"\" or self.ui.le_input2.text() == \"\":\n result = \"FEHLER!! Eins der Felder ist leer!\"\n self.ui.tb_result.setTextColor(\"red\")\n else:\n if self.ui.le_input1.text() == self.ui.le_input2.text():\n result = \"GLEICH\"\n else:\n result = \"UNGLEICH\"\n\n self.ui.tb_result.setText(result)\n\n def on_pb_berechne_bmi_click(self):\n groesse_m = float(self.ui.le_height_m.text())\n gewicht_kg = float(self.ui.le_weight_kg.text())\n if groesse_m == 0.0:\n self.ui.lbl_bmi.setText(\"Größe darf nicht 0 sein!\")\n else:\n bmi = gewicht_kg/(groesse_m**2)\n\n self.ui.lbl_bmi.setText(\"BMI {0:.3f}\".format(bmi))\n\n\nwindow = MainWindow()\nwindow.show()\n\nsys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"585272546","text":"\n\ndef rotate(input_array, n=0):\n \"\"\"\n Function rotate an integer array by a specified N position\n Rotation is done to avoid shifting elements which may lead to\n poor performance in case of a large sized array.\n :param input_array: array to be rotated\n :param n: N position(s)\n :return: rotated array\n \"\"\"\n\n # Edge cases\n if input_array is None:\n raise ValueError('input list must not be none')\n\n if not isinstance(input_array, list):\n raise TypeError('input list must be a list type')\n\n if not isinstance(n, int):\n raise TypeError('N must be an integer type')\n\n if len(input_array) < 2:\n return input_array\n\n if n < 1:\n return input_array\n\n for i in range(n):\n input_array = input_array[-1:] + input_array[:-1]\n\n return input_array\n\n\nif __name__ == '__main__':\n print('Rotation of [20, 120, 40] by 2 is %s' % rotate([20, 120, 40], 2))\n","sub_path":"source/Array_Rotation/array_rotation.py","file_name":"array_rotation.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"88466239","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 5 17:29:09 2011\n\n@author: Sat Kumar Tomer\n@website: www.ambhas.com\n@email: satkumartomer@gmail.com\n\"\"\"\n\n# import required modules\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# define the variables\ntheta_e = 0.486\npsi = 16.7\nK = 0.65\nS_e = 0.3\nt = 1\n\n#calculate dtheta\ndtheta = (1-S_e)*theta_e\n\n# initial guess of F\nF_old = K*t\nepsilon = 1\nF = []\nwhile epsilon > 1e-4:\n F_new = psi*dtheta * np.log(1+F_old/(psi*dtheta)) + K*t\n epsilon = F_new - F_old\n F_old = F_new\n F.append(F_new)\n\n\nplt.clf()\nplt.plot(F,'-ok')\nplt.xlabel('Number of iteration',fontsize=25)\nplt.ylabel('F',fontsize=20)\nplt.savefig('/home/tomer/articles/python/tex/images/F.png')","sub_path":"codes/infil.py","file_name":"infil.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"82355833","text":"from flask import Flask\nimport os \nimport markov as mark\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef hello_world():\n random = mark.make_sentence()\n return random\n \nif __name__ == '__main__':\n app.debug = True\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)","sub_path":"Code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"252904526","text":"# Music download credits\n#\n# THE PROBLEM\n#\n# Assume the following values have already been entered into the\n# Python interpreter, denoting the cost in cents for downloading one\n# music track, your original budget in dollars, and the number of tracks\n# already downloaded:\n\ntrack_cost = 120 # cost in cents for downloading 1 track\nbudget = 50 # dollars\nnum_downloaded = 15 # number of tracks already downloaded\n\n# Cost in cents -> cost in dollars\ntrack_cost = track_cost / 100\n\n# Write expressions to calculate how many more tracks you can afford\n# to download and print that value to the screen.\n#\n# A problem solving strategy:\n# 1. Calculate the amount spent so far by\n# multiplying the number downloaded by the track cost\nspent = track_cost * num_downloaded\n\n# 2. Calculate the balance left by\n# deducting the amount spent so far from the budget\nbalance = budget - spent\n\n# 3. Divide the balance left by the track cost\ntracks_left = balance / track_cost\n\n# 4. Print the number of tracks left\nprint(\"There are\", int(tracks_left), \"tracks you can buy.\")\n\n#\n# Be careful to allow for the fact that one of the given values\n# is expressed in cents and the other is in dollars, i.e., the\n# units associated with the values are different.\n\n","sub_path":"week1/04_downloads_left.py","file_name":"04_downloads_left.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"405070459","text":"class Solution:\n def licenseKeyFormatting(self, s: str, k: int) -> str:\n s = s.replace('-',\"\").upper()\n remainder = len(s) % k\n \n first_grp = [s[0:remainder]]\n other_grps = [s[i:i+k] for i in range(remainder, len(s),k)]\n \n if remainder:\n return \"-\".join(first_grp+other_grps)\n return \"-\".join(other_grps)","sub_path":"license-key-formatting/license-key-formatting.py","file_name":"license-key-formatting.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"307302984","text":"import os\nfrom pathlib import Path\n\nfrom jinja2 import Template\n\nfrom charmhelpers.core import hookenv\nfrom charms.reactive import set_flag, clear_flag, endpoint_from_flag\nfrom charms.reactive import when, when_not\n\nfrom charms import layer\n\n\n@when('config.changed')\ndef update_model():\n clear_flag('charm.kubeflow-seldon-cluster-manager.started')\n\n\n@when('layer.docker-resource.cluster-manager-image.changed')\ndef update_image():\n clear_flag('charm.kubeflow-seldon-cluster-manager.started')\n\n\n@when_not('endpoint.redis.available')\ndef blocked():\n goal_state = hookenv.goal_state()\n if 'redis' in goal_state['relations']:\n layer.status.waiting('waiting for redis')\n else:\n layer.status.blocked('missing relation to redis')\n clear_flag('charm.kubeflow-seldon-cluster-manager.started')\n\n\n@when('layer.docker-resource.cluster-manager-image.available')\n@when('endpoint.redis.available')\n@when_not('charm.kubeflow-seldon-cluster-manager.started')\ndef start_charm():\n layer.status.maintenance('configuring container')\n\n config = hookenv.config()\n image_info = layer.docker_resource.get_info('cluster-manager-image')\n redis = endpoint_from_flag('endpoint.redis.available')\n redis_application_name = redis.all_joined_units[0].application_name\n model = os.environ['JUJU_MODEL_NAME']\n rendered_podspec = Template(Path('reactive/podspec.yaml.j2').read_text()).render(\n model=model,\n config=config,\n image_info=image_info,\n redis_application_name=redis_application_name,\n )\n layer.caas_base.pod_spec_set(rendered_podspec)\n\n layer.status.maintenance('creating container')\n set_flag('charm.kubeflow-seldon-cluster-manager.started')\n","sub_path":"reactive/kubeflow_seldon_cluster_manager.py","file_name":"kubeflow_seldon_cluster_manager.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"307356480","text":"\r\nclass ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\nclass Solution:\r\n def mergeKLists(self, lists):\r\n \"\"\"\r\n :type lists: List[ListNode]\r\n :rtype: ListNode\r\n \"\"\"\r\n if lists is None or len(lists) < 1:\r\n return\r\n if len(lists) < 2:\r\n return lists[0]\r\n head = lists[0]\r\n idx = 0\r\n for i in range(1, len(lists)):\r\n if lists[i].val < head.val:\r\n head = lists[i]\r\n idx = i\r\n lists[idx] = lists[idx].next\r\n temp1 = head\r\n while lists.count(None) != len(lists):\r\n temp2 = None\r\n for i in range(len(lists)):\r\n if lists[i] is None:\r\n continue\r\n if temp2 is None:\r\n temp2 = lists[i]\r\n idx = i\r\n continue\r\n if lists[i].val < temp2.val:\r\n idx = i\r\n temp2 = lists[i]\r\n temp1.next = temp2\r\n temp1 = temp2\r\n if lists[idx] is not None:\r\n lists[idx] = lists[idx].next\r\n return head\r\n\r\n\r\nn1 = ListNode(1)\r\nn2 = ListNode(2)\r\nn3 = ListNode(3)\r\nn2.next = n3\r\nn1.next = n2\r\nn4 = ListNode(4)\r\nn5 = ListNode(5)\r\nn6 = ListNode(6)\r\nn5.next = n6\r\nn4.next = n5\r\nl = [n1, n4]\r\nSolution().mergeKLists(l)\r\nwith open('data.txt') as f:\r\n lists = f.readlines()\r\n content = [l.strip() for l in lists]\r\n print(content)\r\n\r\n","sub_path":"mergeNLinklists.py","file_name":"mergeNLinklists.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"283100184","text":"def minimumBribes(q):\n moves = 0\n\n for pos, val in enumerate(q):\n if (val-1) - pos > 2:\n print(\"Too chaotic\")\n return\n \n for j in range(max(0,val-2), pos):\n if q[j] > val:\n moves+=1\n \n print(moves)\n return","sub_path":"Medium/New Year Chaos/minimumBribes.py","file_name":"minimumBribes.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"131863899","text":"key = \"058b861f487b4c3bb9707909e10fcb14\"\nendpoint = \"https://athh.cognitiveservices.azure.com/\"\n\n# -*- coding: utf-8 -*-\n\nimport os\nfrom azure.cognitiveservices.language.textanalytics import TextAnalyticsClient\nfrom msrest.authentication import CognitiveServicesCredentials\nfrom tweepy import OAuthHandler\nfrom tweepy import API\nfrom tweepy import Cursor\nfrom random import randint\nimport json\n\nconsumer_key = \"VB8PDpUiWYNAGm57hRXCgWcHN\" #twitter app’s API Key\nconsumer_secret = \"OTQL6AGkgNCsjJtbo13wpmAwmy2RD7402HBIunxTfstToFTDo9\" #twitter app’s API secret Key\naccess_token = \"847539459486736384-3xuge9myxwwSIBAAncHfKVxC9O27xNT\" #twitter app’s Access token\naccess_token_secret = \"ZVsNfoRlaDw05V7vIgry80KkiBrFalWktLNG3Bj44x9LB\" #twitter app’s access token secret\n\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\nauth_api = API(auth)\n\nextracted_tweets = auth_api.user_timeline(screen_name = 'TwitterA11y', count =5, include_rts = False, tweet_mode = 'extended')\nprint(len(extracted_tweets))\nfinal_tweets = [each_tweet.full_text for each_tweet in extracted_tweets]\n\nfinalJson = []\ncounter = 1\nfor tweet in final_tweets:\n # Preparing the data to give to Azure\n documents = {\"id\": counter, \"language\": \"en\", \"text\": tweet}\n counter = counter+1\n finalJson.append(documents)\n\ndef authenticateClient():\n credentials = CognitiveServicesCredentials(key)\n text_analytics_client = TextAnalyticsClient(\n endpoint=endpoint, credentials=credentials)\n return text_analytics_client\n\n\ndef sentiment():\n client = authenticateClient()\n try:\n response = client.sentiment(documents=finalJson)\n for document in response.documents:\n print(\"Document Id: \", document.id, \", Sentiment Score: \",\"{:.2f}\".format(document.score))\n\n response = client.key_phrases(documents=finalJson)\n for document in response.documents:\n print(\"Document Id: \", document.id)\n print(\"\\tKey Phrases:\")\n for phrase in document.key_phrases:\n print(\"\\t\\t\", phrase)\n\n except Exception as err:\n print(\"Encountered exception. {}\".format(err))\n\nsentiment()\n","sub_path":"senti2.py","file_name":"senti2.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"380229640","text":"import os\nfn = open(\"Name.searchenginedb\", \"w\")\nfs = open(\"Size.searchenginedb\", \"w\")\nff = open(\"Format.searchenginedb\", \"w\")\nfp = open(\"Path.searchenginedb\", \"w\")\ncontainer = 'p'\nmy_list = []\nfor (dir, _, files) in os.walk(\"./\"):\n for f in files:\n path = os.path.join(dir, f)\n if os.path.exists(path):\n container = path\n if(container.rsplit(\".\",1)[1] != \"searchenginedb\"): \n fp.write(path)\n fp.write('\\n')\n fsize = os.path.getsize(path)\n fname = container.rsplit( \".\", 1 )[ 0 ]\n fs.write(str(fsize))\n fs.write('\\n') \n fname = container.rsplit( \".\", 1 )[ 0 ]\n fname = fname.rsplit( \"/\", 1 )[ 1 ]\n if \"\\\\\" in fname:\n fname = fname.rsplit( \"\\\\\", 1 )[ 1 ]\n fname = fname.lower()\n fn.write(fname)\n fn.write('\\n')\n my_list.append(fname) \n ext = container.rsplit( \".\", 1 )[ 1 ]\n ext = ext.lower()\n ff.write(ext)\n ff.write('\\n')\nfp.close()\nfs.close()\nfn.close()\nff.close()\n","sub_path":"Project/Core/makefilesizeextpatharray.py","file_name":"makefilesizeextpatharray.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"549127660","text":"import numpy as np\r\nimport os\r\nimport cv2\r\nimport glob\r\n\r\n\r\n# ################################\r\n# Utils\r\n# ################################\r\ndef get_file_name_in_dir(in_dir,out_path,suffix='*.jpg'):\r\n '''\r\n Get the file names in \"in_dir\"\r\n\r\n :param in_dir: file dir\r\n :param out_path: save path\r\n :param suffix: file suffix\r\n :return: None\r\n '''\r\n filelist = glob.glob(os.path.join(in_dir, suffix))\r\n #pdb.set_trace()\r\n f = open(out_path,'w')\r\n for line in filelist:\r\n name = line.split('/')[-1].split('.')[0]\r\n f.write(name+'\\n')\r\n f.close()\r\n\r\n return 0\r\n\r\ndef get_file_full_path_in_dir(files_dir,out_path,suffix='*.jpg'):\r\n '''\r\n Get file full path from directory \"files_dir\"\r\n\r\n :param files_dir:\r\n :param out_path:\r\n :param suffix:\r\n :return:\r\n '''\r\n filelist = glob.glob(os.path.join(files_dir, suffix))\r\n f = open(out_path, 'w')\r\n for line in filelist:\r\n f.write(line + '\\n')\r\n f.close()\r\n\r\ndef change_img_format(in_dir,out_dir,in_suffix='*.bmp'):\r\n #pdb.set_trace()\r\n filelist = glob.glob(os.path.join(in_dir, in_suffix))\r\n\r\n for imgpath in filelist:\r\n im = cv2.imread(imgpath)\r\n imgname = imgpath.split('/')[-1].split('.')[0]\r\n\r\n # remove \"blankspace\" in image names\r\n imgname = imgname.split(' ')\r\n imgname = ''.join(imgname)\r\n\r\n outpath = os.path.join(outdir,imgname+'.jpg')\r\n cv2.imwrite(outpath,im)\r\n #pdb.set_trace()\r\n\r\n pass\r\n\r\ndef sec_to_hms(sec):\r\n hour = int(sec / 3600)\r\n minute = int((sec - 3600 * hour) / 60)\r\n second = int((sec - 3600 * hour - 60 * minute))\r\n\r\n return hour,minute,second\r\n\r\n# ################################\r\n# Test\r\n# ################################\r\nif __name__ == \"__main__\":\r\n\r\n # change images format\r\n imgdir = '/workspace/Share/data_transfer/video_key_frames/shajizhi/images'\r\n outdir = '/workspace/Share/data_transfer/video_key_frames/shajizhi/images'\r\n change_img_format(imgdir,outdir,'*.png')\r\n","sub_path":"Evaluation_guard/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"265356141","text":"#from Instrucciones.instruccion import Instruccion\nfrom tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.instruccion import Instruccion\n#from storageManager.jsonMode import *\nfrom tytus.parser.fase2.team21.Analisis_Ascendente.storageManager.jsonMode import *\n#import Tabla_simbolos.TablaSimbolos as ts\nimport tytus.parser.fase2.team21.Analisis_Ascendente.Tabla_simbolos.TablaSimbolos as TS\n\n\n\n#CREATE [OR REPLACE] DATABASE\nclass CreateReplace(Instruccion):\n '''#1 create\n #2 create or replace'''\n def __init__(self, caso, exists, id, complemento,concatena,fila,columna):\n self.caso = caso\n self.exists = exists\n self.id = id\n self.complemento = complemento\n self.concatena = concatena\n self.fila = fila\n self.columna = columna\n\n\n def ejecutar(createDataBase, ts,consola,exceptions):\n\n\n if createDataBase.caso==1 and createDataBase.exists==False or createDataBase.exists==True:\n #create database\n lb = showDatabases()\n for bd in lb:\n if bd == createDataBase.id:\n if createDataBase.exists:\n print(\"no pasa nada\")\n else:\n consola.append(f\"La Base de Datos {createDataBase.id} ya existe, error al crear\\n\")\n return\n\n createDatabase(str(createDataBase.id))\n entorno_bd= {}\n ts_local = TS.TablaDeSimbolos(entorno_bd)\n # simbolo (self, categoria,id, tipo, valor,Entorno):\n simbolo = TS.Simbolo(TS.TIPO_DATO.BASEDEDATOS, createDataBase.id, None, 0,ts_local) # inicializamos con 0 como valor por defecto\n ts.agregar_sim(simbolo)\n consola.append(f\"Se creo la base de datos {createDataBase.id} exitosamente\\n\")\n print(ts.simbolos)\n\n\n elif createDataBase.caso== 2 and createDataBase.exists==False:\n #create or replace\n lb = showDatabases()\n for bd in lb:\n if bd == createDataBase.id:\n\n # se borra a nivel de memoria en disco\n dropDatabase(str(createDataBase.id))\n # se quita el id de la tabla de simbolos\n ts.eliminar_sim(str(createDataBase.id))\n # simbolo (self, categoria,id, tipo, valor,Entorno):\n # se vuelve a crear un entorno para agregar de nuevo la base de datos\n createDatabase(str(createDataBase.id))\n entorno = {}\n ts_local = TS.TablaDeSimbolos(entorno)\n simbolo = TS.Simbolo(None, createDataBase.id, TS.TIPO_DATO.BASEDEDATOS, 0,ts_local) # inicializamos con 0 como valor por defecto\n ts.agregar_sim(simbolo)\n consola.append(f\"Replace, la base de datos {createDataBase.id} se ha creado exitosamente\\n\")\n print(ts.simbolos)\n return\n\n\n createDatabase(str(createDataBase.id))\n ts_local = TS.TablaDeSimbolos(ts.simbolos)\n # simbolo (self, categoria,id, tipo, valor,Entorno):\n simbolo = TS.Simbolo(None, createDataBase.id, TS.TIPO_DATO.BASEDEDATOS, 0,ts_local) # inicializamos con 0 como valor por defecto\n ts.agregar_sim(simbolo)\n consola.append(f\"Se creo la base de datos {createDataBase.id} exitosamente\\n\")\n print(ts.simbolos)\n\n elif createDataBase.caso == 2 and createDataBase.exists == True:\n #create or replace if not exists\n lb = showDatabases()\n for bd in lb:\n if bd == createDataBase.id:\n\n if createDataBase.exists:\n print(\"no pasa nada\")\n else:\n consola.append(\"La Base de Datos ya existe no se puede reemplazar\")\n\n return\n\n\n createDatabase(str(createDataBase.id))\n ts_local = TS.TablaDeSimbolos(ts.simbolos)\n # simbolo (self, categoria,id, tipo, valor,Entorno):\n simbolo = TS.Simbolo(None, createDataBase.id, TS.TIPO_DATO.BASEDEDATOS, 0,ts_local) # inicializamos con 0 como valor por defecto\n ts.agregar_sim(simbolo)\n consola.append(f\"Se creo la base de datos {createDataBase.id} exitosamente\\n\")\n print(ts.simbolos)\n\n\n def traducir(createDatabase,ts,consola,Exception,tv):\n\n #iniciar traduccion\n info = \"\" #info contiene toda el string a mandar como parametros\n print(\"concatena \\n\")\n print(createDatabase.concatena)\n for data in createDatabase.concatena:\n info += \" \" +data\n\n contador = tv.Temp()\n consola.append(f\"\\n\\t{contador} = \\\"{info}\\\"\")\n contador2 = tv.Temp()\n consola.append(f\"\\n\\t{contador2} = T({contador})\")\n consola.append(f\"\\n\\tT1 = T3({contador2})\")\n consola.append(f\"\\n\\tstack.append(T1)\\n\")\n\n'''\n t0 = \" CREATE DATABASE DBFase2 ;\"\n t1 = T(t0)\n\n T1 = T3(t1)\n stack.append(T1);\n'''\n\n\n\n\n\n\n\n\n#complemento de create or replace\nclass ComplementoCR(Instruccion):\n def __init__(self, idOwner, mode,fila,columna):\n self.idOwner = idOwner\n self.mode = mode\n self.fila = fila\n self.columna = columna\n\n\n\n","sub_path":"parser/fase2/team21/Analisis_Ascendente/Instrucciones/Create/createDatabase.py","file_name":"createDatabase.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"482948760","text":"######################################################################\n# Author: Thy H. Nguyen\n# Username: nguyent2\n\n# Assignment: A02: Exploring Turtles in Python\n# Purpose: Draws a 3D cube using turtles and nested for loops\n\n#Citation:\n\n# https://stackoverflow.com/questions/29441237/how-to-draw-a-semicircle-in-python-turtle-only\n# https://stackoverflow.com/questions/46695126/how-to-draw-circle-using-turtle-in-python-3\n# https://www.w3schools.com/colors/colors_picker.asp\n# Getting color background from https://www.w3schools.com/colors/colors_picker.asp\n# https://docs.python.org/3.3/library/turtle.html?\n# http://cs.berea.edu/courses/csc226book/\n######################################################################\n\n\nimport turtle\nfrom math import pi\nimport random\n\n\ndef Set_up_shape(bichthuy):\n \"\"\"\n This function just sets up the turtle ready to draw a lot of circles.\n :param bichthuy:\n :return:none\n \"\"\"\n\n bichthuy.shape(\"circle\") # Set up the shape of the turtle to be circle\n bichthuy.shapesize(0.1, 0.1) # Set up the pensize for the shapesize of the turtle\n bichthuy.speed(0) # Set up turtle's speed\n bichthuy.pensize(6) # Set up turtle's pensize\n\n\n\n def Draw_Circle(number, cao):\n \"\"\"\n This function draws a full circle (which serves as a wheel in a car)\n :param number:\n :param cao:\n :return: none\n \"\"\"\n for i in range(number): #the number of i will equal the number of degrees\n bichthuy.right(1) #Set up turtle turn right 1 degrees to draw a circle\n bichthuy.forward(cao) #Set up turtle to forward the number that will input later\n\n def Complete_The_Circle_To_Put_Pen_Back(solieu, chieucao):\n \"\"\"\n This function draws backward of a circle, to connect to the next step\n :param solieu:\n :param chieucao:\n :return: none\n \"\"\"\n for i in range(solieu):\n bichthuy.left(1) # Set up turtle turn left 1 degrees\n bichthuy.backward(chieucao) # Set up turtle forward the number input later\n\n # Call the function to draw wheels (inside another function)\n #Since just use circle to draw cars, so set up different radius.\n Draw_Circle(360, 0.5) #Draw a full circle - since 360 degrees - and 1 degrees corresponds with 0.5\n Complete_The_Circle_To_Put_Pen_Back(90, 0.5) #Draw 1/4 a circle to get the pen to the point desired for next step\n bichthuy.left(90) #Turning the turtle to get the desired position for next step\n bichthuy.forward(40)\n bichthuy.right(90) #Get ready for the next step\n Draw_Circle(180, 2.5) #Draw a half a circle, for a car, which is 180 degrees\n bichthuy.right(90) #Turning to draw a car\n bichthuy.forward(40)\n bichthuy.right(-90)\n Draw_Circle(360, 0.5)\n Complete_The_Circle_To_Put_Pen_Back(180, 0.5) #Drawing a wheel for a car\n bichthuy.left(90)\n bichthuy.forward((900/pi)-80-(360/pi)) #Complete the car\n\n\ndef Draw_Car_And_Fill_Car(mauu, yuri,ngoc):\n \"\"\"\n This function draws a whole car and fill it\n :param mauu:\n :param yuri:\n :return: none\n \"\"\"\n ngoc.color(\"#000080\") # Set color for turtle\n ngoc.fillcolor(yuri) # Set fill color for turtle\n ngoc.penup() # Set penup\n ngoc.forward(mauu) # Set turtle forward\n ngoc.pendown()\n ngoc.begin_fill() # Set up turtle to begin fill the color\n Set_up_shape(ngoc)\n ngoc.end_fill()\n\n\n#Create a list for color's random choice\nlan_nay_1 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_2 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_3 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_4 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_5 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_6 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_7 = random.choice([\"#00e600\", \"#00cc7a\"])\nlan_nay_8 = random.choice([\"#00e600\", \"#00cc7a\"])\n\ndef Draw_The_Two_Points(brother,abcxyz):\n \"\"\"\n This function draws a two point, of a line to ask for calculating the length\n :param brother:\n :return: none\n \"\"\"\n\n abcxyz.color(\"#ffff00\") # Set up color for turtle\n abcxyz.shape(\"circle\") # Set up shape for turtle\n abcxyz.shapesize(0.1, 0.1) # Set up shapesize for turtle\n abcxyz.pensize(6) # Set up pensize for turtle\n abcxyz.penup() # Set turtle's pen up\n abcxyz.forward(brother) # Set up forward\n abcxyz.right(90) # Turtle turn right\n abcxyz.forward(90 / pi) # Turtle go forward\n abcxyz.pendown()\n abcxyz.dot()\n\n\n\n\ndef Name_The_Two_Points(chieu_rong, chu_cai,thy):\n \"\"\"\n This function appears the point A and B\n :param chieu_rong:\n :param chu_cai:\n :return:\n \"\"\"\n\n thy.color(\"#660066\")\n thy.pensize(6)\n thy.penup()\n thy.forward(chieu_rong)\n thy.right(90)\n thy.forward(90)\n thy.pendown()\n thy.write(chu_cai, move=False, align=\"center\", font=(\"TimesNewRoman\", 40, \"bold\"))\n\n\n\ndef Ask_Question_And_Reply(soluong, vietchu, jordan):\n \"\"\"\n This function asks questions, and appears user's answer\n :param soluong:\n :param vietchu:\n :return: none\n \"\"\"\n\n jordan.color(\"#660066\")\n jordan.pensize(6)\n jordan.penup()\n jordan.left(90)\n jordan.forward(soluong)\n jordan.right(90)\n jordan.forward(0)\n jordan.pendown()\n jordan.write(vietchu, move=False, align=\"center\", font =(\"TimesNewRoman\",40,\"bold\") )\n\n\n\ndef main():\n \"\"\"\n This function calls all other functions\n :return:\n \"\"\"\n\n wns = turtle.Screen()\n wns.bgcolor(\"#e6ffff\")\n ngoc = turtle.Turtle()\n\n Draw_Car_And_Fill_Car(3 * 150, lan_nay_1,ngoc) #The distance of each car\n Draw_Car_And_Fill_Car(1 * 150, lan_nay_2,ngoc)\n Draw_Car_And_Fill_Car(-1 * 150, lan_nay_3,ngoc)\n Draw_Car_And_Fill_Car(-3 * 150, lan_nay_4,ngoc)\n Draw_Car_And_Fill_Car(-4 * 150, lan_nay_5,ngoc)\n Draw_Car_And_Fill_Car(-2 * 150, lan_nay_6,ngoc)\n Draw_Car_And_Fill_Car(0 * 150, lan_nay_7,ngoc)\n Draw_Car_And_Fill_Car(2 * 150, lan_nay_,8,ngoc)\n\n abcxyz = turtle.Turtle()\n Draw_The_Two_Points(-4 * 150 - 90 / pi, abcxyz)\n Draw_The_Two_Points(3 * 150 + 90 / pi + 900 / pi - 180 / pi - 80, abcxyz)\n\n conrua = turtle.Turtle()\n conrua.color(\"#ffff00\")\n conrua.shape(\"circle\")\n conrua.shapesize(0.1, 0.1)\n conrua.pensize(6)\n conrua.penup()\n conrua.forward(-4 * 150 - 90 / pi) # Set up the pen to the starting point\n conrua.right(90)\n conrua.forward(90 / pi)\n conrua.left(90)\n conrua.pendown()\n conrua.forward(((900 / pi) - (80 + 180 / pi)) * 8 + 180 / pi) # Draw the line needed\n\n #Call the name the two points to name them\n Name_The_Two_Points(-4 * 150 - 45, \"A\", thy)\n Name_The_Two_Points(4 * 150 + 50, \"B\", thy)\n\n\n Ask_Question_And_Reply(210, \"What do you want the length of AB to be ?\", jordan)\n\n # This answer variable asks for the user's input\n answer = wns.numinput(\"Can you give me any number of the length of AB ?\", \"Your answer: \", default=None, minval=0,\n maxval=10000000)\n\n Ask_Question_And_Reply(150, answer, jordan)\n\n wns.exitonclick()\nmain()\n","sub_path":"a02_nguyent2.py","file_name":"a02_nguyent2.py","file_ext":"py","file_size_in_byte":7061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"490113740","text":"#Some simple examples and tests examining dipolar systems\nimport sys\nimport numpy as np\nsys.path.append(\"../\")\nfrom bandstructure import Parameters\nfrom bandstructure.system import DipolarSystem\nfrom bandstructure.lattice import SquareLattice, HoneycombLattice, TriangularLattice\n\n#Initiate Lattice object and system object with params\nlattice = HoneycombLattice()\ncutoff = 10\ntbar = 1.\nt = 0.54*tbar\nw = 1.97*tbar\nparams = Parameters({'lattice':lattice,'cutoff':cutoff,'tbar':tbar,'w':w,'t':t})\ns = DipolarSystem(params)\n\n\n#Create Special Point M (lines 105-107 of lattice.py change [1,1]->[2pi/a,2pi/a])\nM = np.array([0.5,0.5]) #M is in units of Reciprocal Lattic Vectors\nlattice.addSpecialPoint('M',M)\n\n\n#Create Rhomboidal BZ and dispersion relation as in Fig.(2(a)) of arxiv.org/pdf/1410.5667.pdf\nZone = lattice.getKvectorsRhomboid(resolution=300)\npath1 = lattice.getKvectorsPath(resolution=300,pointlabels=['M','G','M'])\npath2 = lattice.getKvectorsPath(resolution=300,pointlabels=['X','G','X'])\npath3 = lattice.getKvectorsPath(resolution=300,pointlabels=['Y','G','Y'])\n\nBZ = s.solve(Zone)\nDisp1 = s.solve(path1)\nDisp2 = s.solve(path2)\nDisp3 = s.solve(path3)\n\n\n#print total BerryFlux (BF) for BZ: divide by (2*np.pi) for Chern number. It seems that .getBerryFlux() returns IM(BF) already\n#for i in range(BZ.numBands()):\n# BF = BZ.getBerryFlux(i)\n# print(\"C for band \" + str(i) + \": \",BF/(2*np.pi))\n\n\n#Plot paths\nDisp1.plot(\"path1.pdf\")\nDisp2.plot(\"path2.pdf\")\nDisp3.plot(\"path3.pdf\")\n\n","sub_path":"examples/dipolar.py","file_name":"dipolar.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"78415312","text":"from Tkinter import *\r\nimport ttk\r\nimport Globals\r\n\r\ndef exit():\r\n root.destroy()\r\n\r\ndef start():\r\n Globals.item_num = int(item_cmb.get())\r\n Globals.time_num = int(time_cmb.get())\r\n Globals.matrix_size = int(size_cmb.get())\r\n\r\n root.destroy()\r\n import Simulation\r\n\r\nroot = Tk()\r\n#root.geometry(\"450x450+300+300\")\r\nroot.title(\"Configuration\")\r\n\r\nitem_lbl = Label(text=\"Number of Items to Generate:\").grid(row=0, column=0, sticky =W)\r\n\r\nitem_txt = StringVar()\r\nitem_cmb = ttk.Combobox(root, textvariable = item_txt)\r\nitem_cmb.config(values = (\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"))\r\nitem_cmb.set(\"3\")\r\nitem_cmb.grid(row=0, column=1, sticky =W)\r\n\r\ntime_lbl = Label(text=\"How long should the simulation run for (Seconds):\").grid(row=1, column=0, sticky =W)\r\n\r\ntime_txt = StringVar()\r\ntime_cmb = ttk.Combobox(root, textvariable = time_txt)\r\ntime_cmb.config(values = (\"15\",\"30\",\"45\",\"60\"))\r\ntime_cmb.set(\"15\")\r\ntime_cmb.grid(row=1, column=1, sticky =W)\r\n\r\nsize_lbl = Label(text=\"Size of the matrix (Nodes for Height and Width):\").grid(row=2, column=0, sticky =W)\r\n\r\nsize_txt = StringVar()\r\nsize_cmb = ttk.Combobox(root, textvariable = size_txt)\r\nsize_cmb.config(values = (\"10\",\"20\",\"30\",\"40\",\"50\"))\r\nsize_cmb.set(\"10\")\r\nsize_cmb.grid(row=2, column=1, sticky =W)\r\n\r\nexit_but = Button(text=\"Exit\", command = exit).grid(row=3, column=0, sticky=N+S+E+W)\r\n\r\nstart_but = Button(text=\"Start\", command = start).grid(row=3, column=1, sticky=N+S+E+W)\r\n\r\nroot.mainloop()\r\n\r\n","sub_path":"Simulation/Opening_Window.py","file_name":"Opening_Window.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"270167899","text":"# the data from https://opentdb.com\nfrom question_model import QuestionModel\nfrom data import question_data\nfrom quiz_brain import QuizBrain\n\nquestion_bank = []\n\nfor question_number in range(len(question_data)):\n question = QuestionModel(question_data[question_number][\"question\"], question_data[question_number][\"correct_answer\"])\n question_bank.append(question)\n\n# print(question_bank)\n# print(question_bank[0].text)\n# print(question_bank[0].answer)\n\nquiz = QuizBrain(question_bank)\n\n# while there is a number of question the loop will running\nwhile quiz.still_has_questions():\n quiz.next_question()\n\nprint(\"You have completed the quiz and your final score is : \", quiz.score, \" / \", len(question_bank))\n","sub_path":"Quiz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"632197711","text":"from model.country import Country\nfrom model.state import State\nfrom model.city import City\nfrom model.address import Address\nfrom setup import db\nfrom sqlalchemy import exc, and_\n\n\nclass AddressController:\n def get_country_states(self, country_identifier):\n if isinstance(country_identifier, str):\n return Country.query.filter_by(name=country_identifier).first().states\n elif isinstance(country_identifier, int):\n return Country.query.get(country_identifier).states\n else:\n raise ValueError(f'country_identifier: {type(country_identifier)}')\n\n def get_state_cities(self, state_id):\n return State.query.get(state_id)\n\n def get_city_addresses(self, city_id):\n return City.query.get(city_id)\n\n def get_address_by_id(self, address_id):\n return Address.query.filter_by(id=address_id).join(City).join(State).join(Country).first()\n\n def register_address_by_names(self, street_name, neighbourhood, city_name, state_name, country_name):\n try:\n country = Country(name=country_name)\n db.session.add(country)\n db.session.commit()\n\n except exc.IntegrityError:\n db.session.rollback()\n country = Country.query.filter_by(name=country_name).first()\n\n try:\n state = State(name=state_name, country_id=country.id)\n db.session.add(state)\n db.session.commit()\n\n except exc.IntegrityError:\n db.session.rollback()\n state = State.query.filter(and_(State.name == state_name, State.country_id == country.id)).first()\n\n try:\n city = City(name=city_name, state_id=state.id)\n db.session.add(city)\n db.session.flush()\n\n except exc.IntegrityError:\n db.session.rollback()\n city = City.query.filter(and_(City.name == city_name, City.state_id == state.id)).first()\n\n try:\n address = Address(street_name=street_name, neighbourhood=neighbourhood, city_id=city.id)\n db.session.add(address)\n db.session.commit()\n\n return address.id\n\n except exc.IntegrityError:\n db.session.rollback()\n return None\n\n def remove_address_by_id(self, address):\n try:\n db.session.delete(address)\n db.session.commit()\n\n return True\n\n except exc.IntegrityError:\n db.session.rollback()\n return False\n","sub_path":"controller/address_controller.py","file_name":"address_controller.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"93090167","text":"import pandas as pd\nimport numpy as np\nimport os\n\n\ndef oi_action_ce(row):\n if row[\"Call Price Change\"] > 0 and row[\"Call Change in OI\"] > 0:\n return \"Long Buildup\"\n elif row[\"Call Price Change\"] > 0 and row[\"Call Change in OI\"] < 0:\n return \"Short Cover\"\n elif row[\"Call Price Change\"] < 0 and row[\"Call Change in OI\"] > 0:\n return \"Short Buildup\"\n elif row[\"Call Price Change\"] < 0 and row[\"Call Change in OI\"] < 0:\n return \"Long Unwind\"\n\n\ndef oi_action_pe(row):\n if row[\"Put Price Change\"] > 0 and row[\"Put Change in OI\"] > 0:\n return \"Long Buildup\"\n elif row[\"Put Price Change\"] > 0 and row[\"Put Change in OI\"] < 0:\n return \"Short Cover\"\n elif row[\"Put Price Change\"] < 0 and row[\"Put Change in OI\"] > 0:\n return \"Short Buildup\"\n elif row[\"Put Price Change\"] < 0 and row[\"Put Change in OI\"] < 0:\n return \"Long Unwind\"\n\n\ndef analyze_stock(index,data):\n data_path = os.path.dirname(os.path.realpath(__file__))\n expiry_data = {}\n df_ce_pe = pd.DataFrame()\n buillish = [\"Short Cover\", \"Long Buildup\"]\n bearish = [\"Long Unwind\", \"Short Buildup\"]\n columns = [\n \"Call OI\",\n \"Call Change in OI\",\n \"Call Volume\",\n \"Call LTP\",\n \"Call Price Change\",\n \"Strike Price\",\n \"Put Price Change\",\n \"Put LTP\",\n \"Put Volume\",\n \"Put Change in OI\",\n \"Put OI\",\n ]\n expiry_dates = data[\"expiryDates\"]\n for expiry_dt in expiry_dates:\n expiry_data[expiry_dt] = [\n d for d in data[\"data\"] if expiry_dt in d[\"expiryDate\"]\n ]\n ce = [expd[\"CE\"] for expd in expiry_data[expiry_dates[0]] if \"CE\" in expd.keys()]\n pe = [expd[\"PE\"] for expd in expiry_data[expiry_dates[0]] if \"PE\" in expd.keys()]\n df_ce = pd.DataFrame(ce)[\n [\n \"openInterest\",\n \"changeinOpenInterest\",\n \"totalTradedVolume\",\n \"lastPrice\",\n \"change\",\n \"strikePrice\",\n ]\n ]\n df_pe = pd.DataFrame(pe)[\n [\n \"strikePrice\",\n \"change\",\n \"lastPrice\",\n \"totalTradedVolume\",\n \"changeinOpenInterest\",\n \"openInterest\",\n ]\n ]\n df_ce_pe = (\n pd.merge(df_ce, df_pe, how=\"outer\", on=\"strikePrice\").fillna(0.0).round(2)\n )\n df_ce_pe.columns = columns\n # sends each row axis = 1\n df_ce_pe[\"Call OI Action\"] = df_ce_pe.apply(oi_action_ce, axis=1)\n df_ce_pe[\"Put OI Action\"] = df_ce_pe.apply(oi_action_pe, axis=1)\n df_ce_pe[\"Call Trend\"] = np.where(\n df_ce_pe[\"Call OI Action\"].isin(buillish),\n \"Bullish\",\n np.where(df_ce_pe[\"Call OI Action\"].isin(bearish), \"Bearish\", None),\n )\n df_ce_pe[\"Put Trend\"] = np.where(\n df_ce_pe[\"Put OI Action\"].isin(buillish),\n \"Bullish\",\n np.where(df_ce_pe[\"Put OI Action\"].isin(bearish), \"Bearish\", None),\n )\n columns.insert(0, \"Call Trend\")\n columns.insert(1, \"Call OI Action\")\n columns.insert(len(df_ce_pe.columns) - 1, \"Put OI Action\")\n columns.insert(len(df_ce_pe.columns), \"Put Trend\")\n df_ce_pe = df_ce_pe[columns]\n return df_ce_pe.to_excel(f'{data_path}/data/{index}.xlsx')\n","sub_path":"oiAnalyze.py","file_name":"oiAnalyze.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"369900629","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nspitzer --- Spitzer instruments.\n================================\n\n Functions\n ---------\n irsclean\n irsclean_files\n moving_wcs_fix\n\n Classes\n -------\n IRAC\n IRS\n\n\"\"\"\n\nimport numpy as np\nimport astropy.units as u\n\ntry:\n from ..ephem import Spitzer\nexcept ImportError:\n Spitzer = None\n\nfrom .instrument import Instrument, Camera, LongSlitSpectrometer\n\n__all__ = ['irsclean', 'irsclean_files', 'IRAC', 'IRS']\n\ncampaign2rogue = {\n 'IRSX002500': 'IRS1',\n 'IRSX002600': 'IRS2',\n 'IRSX002700': 'IRS3',\n 'IRSX002800': 'IRS4',\n 'IRSX002900': 'IRS5',\n 'IRSX003000': 'IRS6',\n 'IRSX003100': 'IRS7',\n 'IRSX003300': 'IRS8',\n 'IRSX003400': 'IRS9',\n 'IRSX003500': 'IRS10',\n 'IRSX003600': 'IRS11',\n 'IRSX003700': 'IRS12',\n 'IRSX003800': 'IRS13',\n 'IRSX003900': 'IRS14',\n 'IRSX004000': 'IRS15',\n 'IRSX004100': 'IRS16',\n 'IRSX004300': 'IRS17',\n 'IRSX004500': 'IRS18',\n 'IRSX004600': 'IRS19',\n 'IRSX004800': 'IRS20',\n 'IRSX005000': 'IRS21.1',\n 'IRSX007100': 'IRS21.2',\n 'IRSX006900': 'IRS21.3',\n 'IRSX007000': 'IRS21.4',\n 'IRSX005200': 'IRS22',\n 'IRSX005300': 'IRS23.1',\n 'IRSX007300': 'IRS23.2',\n 'IRSX005500': 'IRS24',\n 'IRSX005700': 'IRS25',\n 'IRSX005800': 'IRS26',\n 'IRSX006000': 'IRS27',\n 'IRSX006100': 'IRS28',\n 'IRSX006300': 'IRS29',\n 'IRSX006500': 'IRS30',\n 'IRSX006700': 'IRS31',\n 'IRSX006800': 'IRS32',\n 'IRSX007200': 'IRS33',\n 'IRSX007400': 'IRS34',\n 'IRSX007500': 'IRS35',\n 'IRSX007600': 'IRS36',\n 'IRSX007700': 'IRS37',\n 'IRSX007800': 'IRS38',\n 'IRSX008000': 'IRS39',\n 'IRSX008100': 'IRS40',\n 'IRSX008200': 'IRS41',\n 'IRSX008300': 'IRS42',\n 'IRSX008400': 'IRS43',\n 'IRSX009800': 'IRS44',\n 'IRSX009900': 'IRS45',\n 'IRSX010000': 'IRS46',\n 'IRSX010100': 'IRS47',\n 'IRSX008900': 'IRS48',\n 'IRSX010200': 'IRS49',\n 'IRSX010300': 'IRS50',\n 'IRSX010400': 'IRS51.1',\n 'IRSX011600': 'IRS51.2',\n 'IRSX011400': 'IRS52',\n 'IRSX009400': 'IRS53',\n 'IRSX009500': 'IRS54',\n 'IRSX010600': 'IRS55',\n 'IRSX010700': 'IRS56',\n 'IRSX010800': 'IRS57.1',\n 'IRSX011700': 'IRS57.2',\n 'IRSX010900': 'IRS58.1',\n 'IRSX011800': 'IRS58.2',\n 'IRSX011900': 'IRS58.3',\n 'IRSX011000': 'IRS59.1',\n 'IRSX012000': 'IRS59.2',\n 'IRSX011100': 'IRS60',\n 'IRSX012200': 'IRS61.1',\n 'IRSX011200': 'IRS61.2'\n}\n\nclass IRAC(Camera):\n \"\"\"Spitzer's Infrared Array Camera\n\n Attributes\n ----------\n\n Examples\n --------\n\n \"\"\"\n\n def __init__(self):\n w = [3.550, 4.493, 5.731, 7.872] * u.um\n shape = (256, 256)\n ps = 1.22 * u.arcsec\n location = Spitzer\n Camera.__init__(self, w, shape, ps, location=location)\n\n def ccorrection(self, sf, channels=[1, 2, 3, 4]):\n \"\"\"IRAC color correction.\n\n Seems to agree within 1% of the IRAC Instrument Handbook.\n Thier quoted values are good to ~1%.\n\n Parameters\n ----------\n sf : function\n A function that generates source flux density as a Quantity\n given wavelength as a Quantity.\n channels : list, optional\n A list of the IRAC channels for which to compute the color\n correction, e.g., `[1, 2]` for 3.6 and 4.5 um.\n\n Returns\n -------\n K : ndarray\n Color correction factor, where `Fcc = F / K`.\n\n \"\"\"\n\n from scipy import interpolate\n import astropy.constants as const\n from ..calib import filter_trans\n from ..util import davint, takefrom\n\n nu0 = (const.c.si / self.wave).to(u.teraHertz).value\n K = np.zeros(len(channels))\n for ch in channels:\n tw, tr = filter_trans('IRAC CH{:}'.format(ch))\n nu = (const.c / tw).to(u.teraHertz).value\n\n sfnu = sf(tw).to(u.Jy, u.spectral_density(tw)).value\n\n i = ch - 1 # self.wave index\n sfnu /= sf(self.wave[i]).to(u.Jy, u.spectral_density(self.wave[i])).value\n\n sfnu, tr, nu = takefrom((sfnu, tr, nu), nu.argsort())\n K[i] = (davint(nu, sfnu * tr * nu0[i] / nu, nu[0], nu[-1])\n / davint(nu, tr * (nu0[i] / nu)**2, nu[0], nu[-1]))\n\n return K\n\n# def ccorrection_tab(self, sw, sf):\n# \"\"\"IRAC color correction of a tabulated spectrum.\n#\n# Parameters\n# ----------\n# sw : Quantity\n# Source wavelength.\n# sf : Quantity\n# Source flux density.\n#\n# Returns\n# -------\n# K : ndarray\n# Color correction: `Fcc = F / K`.\n#\n# \"\"\"\n#\n# from scipy import interpolate\n# import astropy.constants as const\n# from ..calib import filter_trans\n# from ..util import davint, takefrom\n#\n# nu0 = (const.c.si / self.wave).to(u.teraHertz).value\n# K = np.zeros(4)\n# for i in range(4):\n# tw, tr = filter_trans('IRAC CH{:}'.format(i + 1))\n# nu = (const.c / tw).to(u.teraHertz).value\n#\n# # interpolate the filter transmission to a higher\n# # resolution\n# t\n#\n# s = interpolate.splrep(sw.value, sf.value)\n# _sf = interpolate.splev(fw.value, s, ext=1)\n# _sf /= interpolate.splev(self.wave[i].value, s, ext=1)\n#\n# _sf *= sf.unit.to(u.Jy, u.spectral_density(fw))\n#\n# _sf, ft, nu = takefrom((_sf, ft, nu), nu.argsort())\n# K[i] = (davint(nu, _sf * ft * nu0[i] / nu, nu[0], nu[-1])\n# / davint(nu, ft * (nu0[i] / nu)**2, nu[0], nu[-1]))\n# return K\n\nclass IRS(Instrument):\n \"\"\"Spitzer's Infrared Spectrometer.\n\n Attributes\n ----------\n module : The current IRS module: SL1, SL2, Blue, Red, etc. SH, LH, SL3, LL3 not yet implemented.\n\n Examples\n --------\n\n \"\"\"\n\n modes = ['sl1', 'sl2', 'll1', 'll2', 'blue', 'red']\n\n def __init__(self):\n self.sl2 = LongSlitSpectrometer(\n 6.37 * u.um,\n [32, 128],\n 1.8 * u.arcsec,\n 2.0,\n 0.073 * u.um,\n R=90,\n location=Spitzer)\n self.sl1 = LongSlitSpectrometer(\n 10.88 * u.um,\n [32, 128],\n 1.8 * u.arcsec,\n 2.06,\n 0.12 * u.um,\n R=90,\n location=Spitzer)\n self.ll2 = LongSlitSpectrometer(\n 17.59 * u.um,\n [33, 128],\n 5.1 * u.arcsec,\n 2.1,\n 0.21 * u.um,\n R=90,\n location=Spitzer)\n self.ll1 = LongSlitSpectrometer(\n 29.91 * u.um,\n [33, 128],\n 5.1 * u.arcsec,\n 2.1,\n 0.35 * u.um,\n R=85,\n location=Spitzer)\n self.blue = Camera(\n 15.8 * u.um,\n [31, 44],\n 1.8 * u.arcsec,\n location=Spitzer)\n self.red = Camera(\n 22.3 * u.um,\n [32, 43],\n 1.8 * u.arcsec,\n location=Spitzer)\n\n self._mode = 'sl1'\n\n @property\n def mode(self):\n if self._mode in self.modes:\n return self.__dict__[self._mode]\n else:\n raise KeyError(\"Invalid mode: {}\".format(self._mode))\n\n @mode.setter\n def mode(self, m):\n if m.lower() in self.modes:\n self._mode = m.lower()\n else:\n raise KeyError(\"Invalid mode: {}\".format(m.lower()))\n\n def sed(self, *args, **kwargs):\n \"\"\"Spectral energy distribution of a target.\n\n Parameters\n ----------\n *args\n **kwargs\n Arguments and keywords depend on the current IRS mode.\n\n Returns\n -------\n sed : ndarray\n\n \"\"\"\n return self.mode.sed(*args, **kwargs)\n\n def lightcurve(self, *args, **kwargs):\n \"\"\"Secular lightcurve of a target.\n\n Parameters\n ----------\n *args\n **kwargs\n Arguments and keywords depend on the current IRS mode.\n\n Returns\n -------\n lc : astropy Table\n\n \"\"\"\n return self.mode.lightcurve(*args, **kwargs)\n\ndef irsclean(im, h, bmask=None, maskval=28672,\n rmask=None, func=None, nan=True, sigma=None, box=3,\n **fargs):\n \"\"\"Clean bad pixels from an IRS image.\n\n Parameters\n ----------\n im : ndarray\n The image.\n h : dict-like\n FITS header keywords from the original data file.\n bmask : ndarray, optional\n The SSC pipeline BMASK array.\n maskval : int, optional\n Bitmask value applied to BMASK array to generate a bad pixel\n map. These values will be removed from `bmask` and returned.\n rmask : ndarray, optional\n The rogue mask array.\n func : function, optional\n Use this function to clean the image: first argument is the\n image to clean, the second is the mask (`True` for each bad\n pixel). The default is `image.fixpix`.\n nan : bool, optional\n Set to `True` to also clean any pixels set to NaN.\n sigma : float, optional\n Set to sigma clip the image using a filter of width `box` and\n clipping at `sigma`-sigma outliers.\n box : int, optional\n The size of the filter for sigma clipping.\n **fargs\n Additional keyword arguments are pass to `func`.\n\n Returns\n -------\n cleaned : ndarray\n The cleaned data.\n h : dict-like\n The updated header.\n new_mask : ndarray, optional\n The cleaned mask.\n\n \"\"\"\n\n import scipy.ndimage as nd\n from ..image import fixpix\n\n cleaner = fixpix if func is None else func\n\n mask = np.zeros_like(im, bool)\n if nan:\n mask += ~np.isfinite(im)\n if bmask is not None:\n mask += (bmask & maskval).astype(bool)\n new_mask = bmask & (32767 | maskval)\n if rmask is not None:\n mask += rmask.astype(bool)\n\n if sigma is not None:\n stdev = nd.generic_filter(im, np.std, size=box)\n m = nd.median_filter(im, size=box)\n mask += ((im - m) / stdev) > sigma\n \n h.add_history('Cleaned with mskpy.instruments.spitzer.irsclean.')\n h.add_history('irsclean: function={}, arguments={}'.format(\n str(cleaner), str(fargs)))\n \n cleaned = cleaner(im, mask, **fargs)\n if bmask is None:\n return cleaned, h\n else:\n return cleaned, h, new_mask\n\ndef irsclean_files(files, outfiles, uncs=True, bmasks=True,\n maskval=16384, rmasks=True, func=None, nan=True,\n sigma=None, box=3, **fargs):\n \"\"\"Clean bad pixels from a list of IRS files.\n\n For automatic rogue mask file name gleaning, this function\n requires that irs.rogue_masks_path is set in mskpy.cfg to the\n location of the rogue masks files from the Spitzer Science Center.\n\n Parameters\n ----------\n files : array of strings\n A list of image names to clean.\n outfiles : array-like\n Save cleaned images to these files. Existing files will be\n overwritten. Uncertainty file names will be based on `outfiles`.\n uncs : list or bool, optional\n Also clean these uncertainty arrays. Set to `True` and the\n uncertainty filename will be guessed. Otherwise set to `False`.\n bmasks : ndarray, optional\n The SSC pipeline BMASK arrays. Same format as `uncs`.\n maskval : int, optional\n Bitmask value applied to BMASK array to generate bad pixel\n maps.\n rmasks : ndarray, optional\n The rogue mask arrays. Same format as `uncs`. When `True` but\n the IRS campaign is not in the `capaign2rogue` array (e.g., for\n early release observations), then no rogue mask will be used.\n func : function, optional\n Use this function to clean the images: first argument is the\n image to clean, the second is the mask (`True` for each bad\n pixel). The default is `image.fixpix`.\n nan : bool, optional\n Set to `True` to also clean any pixels set to NaN.\n sigma : float, optional\n Set to sigma clip the image using a filter of width `box` and\n clipping at `sigma`-sigma outliers.\n box : int, optional\n The size of the filter for sigma clipping.\n **fargs\n Additional keyword arguments are pass to `func`.\n\n \"\"\"\n\n from astropy.io import fits\n \n def file_generator(in_list, optional_list, replace_string):\n for i in range(len(in_list)):\n if optional_list is True:\n f = in_list[i].replace('_bcd', replace_string)\n elif np.iterable(optional_list):\n f = optional_list[i]\n else:\n f = None\n\n if f is None:\n yield None, None\n else:\n yield f, fits.getdata(f)\n\n def rmask_file_generator(in_list, optional_list):\n import os.path\n from ..config import config\n path = config.get('irs', 'rogue_masks_path')\n for i in range(len(in_list)):\n if optional_list is True:\n h = fits.getheader(in_list[i])\n if h['CAMPAIGN'] in campaign2rogue:\n f = 'b{}_rmask_{}.fits'.format(\n h['CHNLNUM'], campaign2rogue[h['CAMPAIGN']])\n f = os.path.join((path, f))\n else:\n f = None\n elif np.iterable(optional_list):\n f = optional_list[i]\n else:\n f = None\n\n if f is None:\n yield None, None,\n else:\n yield f, fits.getdata(f)\n\n unc_files = file_generator(files, uncs, '_func')\n bmask_files = file_generator(files, bmasks, '_bmask')\n rmask_files = rmask_file_generator(files, rmasks)\n\n for i in range(len(files)):\n unc_file, unc = next(unc_files)\n bmask_file, bmask = next(bmask_files)\n rmask_file, rmask = next(rmask_files)\n\n im, h = fits.getdata(files[i], header=True)\n cleaned = irsclean(im, h, bmask=bmask, maskval=maskval,\n rmask=rmask, func=func, sigma=sigma,\n box=box, **fargs)\n fits.writeto(outfiles[i], cleaned[0], cleaned[1], clobber=True)\n\n if len(cleaned) == 3:\n # bmask was updated, save it\n bmask = cleaned[2] # update array for use with unc cleaning\n h_bmask = fits.getheader(bmask_file)\n h_bmask.add_history('Updated with mskpy.instruments.spitzer.irsclean')\n fits.writeto(bmask_file, bmask, h_bmask, clobber=True)\n \n if unc is not None:\n if '_bcd' in outfiles[i]:\n outf = outfiles[i].replace('_bcd', '_func')\n elif outfiles[i].endswith('.fits'):\n outf = outfiles[i].replace('.fits', '_func.fits')\n else:\n outf = outfiles[i] + '_func.fits'\n\n h = fits.getheader(unc_file)\n\n # do not use sigma clipping with unc array!\n cleaned = irsclean(unc, h, bmask=bmask, maskval=maskval,\n rmask=rmask, func=func, sigma=None, **fargs)\n \n fits.writeto(outf, cleaned[0], cleaned[1], clobber=True)\n\ndef moving_wcs_fix(files, ref=None):\n \"\"\"Correct IRS FITS WCS for the motion of the targeted moving object.\n\n Parameters\n ----------\n files : array of strings\n A list of files to update. The files are updated in place.\n ref : tuple\n The \"reference\" RA and Dec of the target expressed as a tuple:\n `(ra_ref, dec_ref)`. This is usually the position of the moving\n target at the start of the IRS observation. The difference\n between ra_ref, dec_ref and the RA_REF, DEC_REF in the FITS\n headers is the motion of the target. Set ref to `None` to use\n RA_REF and DEC_REF from the first file in the file list as the\n initial position. [units: degrees]\n\n \"\"\"\n\n from astropy.io import fits\n from ..util import spherical_coord_rotate\n \n assert np.iterable(files), \"files must be an array of file names\"\n\n ra_ref0, dec_ref0 = ref\n\n for f in files:\n im, h = fits.getdata(f, header=True)\n ra_ref1 = h[\"RA_REF\"]\n dec_ref1 = h[\"DEC_REF\"]\n\n # I found CRVALx missing in some LH files\n if h.get(\"CRVAL1\") is not None:\n crval1, crval2 = spherical_coord_rotate(\n ra_ref1, dec_ref1, ra_ref0, dec_ref0,\n h[\"CRVAL1\"], h[\"CRVAL2\"])\n rarqst, decrqst = spherical_coord_rotate(\n ra_ref1, dec_ref1, ra_ref0, dec_ref0,\n h[\"RA_RQST\"], h[\"DEC_RQST\"])\n\n raslt, decslt = spherical_coord_rotate(\n ra_ref1, dec_ref1, ra_ref0, dec_ref0,\n h[\"RA_SLT\"], h[\"DEC_SLT\"])\n\n print(\"{} moved {:.3f} {:.3f}\".format(f, (ra_ref1 - ra_ref0) * 3600.,\n (dec_ref1 - dec_ref0) * 3600.))\n\n if h.get(\"CRVAL1\") is not None:\n h[\"CRVAL1\"] = crval1\n h[\"CRVAL2\"] = crval2\n h[\"RA_RQST\"] = rarqst\n h[\"RA_SLT\"] = raslt\n h[\"DEC_RQST\"] = decrqst\n h[\"DEC_SLT\"] = decslt\n h.add_history(\"WCS updated for moving target motion with mskpy.instrum3ents.spitzer.moving_wcs_fix\")\n fits.update(f, im, h)\n\n# update module docstring\nfrom ..util import autodoc\nautodoc(globals())\ndel autodoc\n","sub_path":"mskpy/instruments/spitzer.py","file_name":"spitzer.py","file_ext":"py","file_size_in_byte":17305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"483456731","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis file is a suite of control functions.\n\"\"\"\n\nfrom epics import caput\nimport math\n\n\n__author__ = \"Barbara Frosik\"\n__copyright__ = \"Copyright (c) 2016, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = [\n 'intensity_rate_adj',\n 'Npix_oversat_cnt_rate_adj',\n 'Npix_undersat_cnt_rate_adj',\n 'adjust']\n\n\ndef intensity_rate_adj(**kws):\n \"\"\"\n This method adjusts pv that affects intensity od data.\n\n Parameters\n ----------\n event : Event\n Event instance containing result value, and tuple with acquire time pv name and value\n bounds : dict\n dictionary of bounds, including target value\n\n Returns\n -------\n nothing\n \"\"\"\n bounds = kws['bounds']\n target = bounds['target']\n # Event instance for this adjuster contains result and a tuple with acquire time pv name and value\n event = kws['event']\n res = event.result\n acq_time_pair = event.acq_time\n\n # the rate (intensity sum/acq_time) should be adjusted towards target by changing acq_time\n new_ack_time = res / target * acq_time_pair[1]\n caput(acq_time_pair[0], new_ack_time)\n\n\ndef Npix_oversat_cnt_rate_adj(**kws):\n \"\"\"\n This method adjusts pv that affects saturation count rate.\n\n Parameters\n ----------\n event : Event\n Event instance containing rate value, and tuple with acquire time pv name and value\n bounds : dict\n dictionary of bounds, including target\n Returns\n -------\n nothing\n \"\"\"\n bounds = kws['bounds']\n target = bounds['target']\n event = kws['event']\n points_over_threshold = event.points_over_threshold\n acq_time_pair = event.acq_time\n\n # Too many points over saturation threshold\n adjust = math.log(points_over_threshold/target)\n\n new_ack_time = acq_time_pair[1] / adjust\n print ('old acq_time, new_acq_time', acq_time_pair[1], new_ack_time)\n caput(acq_time_pair[0], new_ack_time)\n\n\ndef Npix_undersat_cnt_rate_adj(**kws):\n \"\"\"\n This method adjusts pv that affects saturation count rate.\n\n Parameters\n ----------\n event : Event\n Event instance containing rate value, and tuple with acquire time pv name and value\n bounds : dict\n dictionary of bounds, including target\n Returns\n -------\n nothing\n \"\"\"\n bounds = kws['bounds']\n target = bounds['target']\n event = kws['event']\n points_over_threshold = event.points_over_threshold\n acq_time_pair = event.acq_time\n\n # Too little points over saturation threshold\n adjust = math.log(target/points_over_threshold)\n\n new_ack_time = acq_time_pair[1] / adjust\n print ('old acq_time, new_acq_time', acq_time_pair[1], new_ack_time)\n caput(acq_time_pair[0], new_ack_time)\n\n\n\n# maps the adjuster ID to the function object\nfunction_mapper = {\n 'intensity_rate': intensity_rate_adj,\n 'Npix_oversat_cnt_rate': Npix_oversat_cnt_rate_adj,\n 'Npix_undersat_cnt_rate': Npix_undersat_cnt_rate_adj,\n }\n\ndef adjust(events, bounds):\n \"\"\"\n This function runs validation methods applicable to the frame data type and enqueues results.\n This function calls all the quality checks and creates Results object that holds results of each quality check, and\n attributes, such data type, index, and status. This object is then enqueued into the \"resultsq\" queue.\n Parameters\n ----------\n events : dict\n dictionary with the key of check function name, and value of tuple containing event and result\n bounds : dictionary\n a dictionary containing target values for the checks\n Returns\n -------\n events : dict\n\n \"\"\"\n\n for ev in events:\n function = function_mapper[ev]\n function(event=events[ev], bounds=bounds)\n","sub_path":"controller/response/adjusters.py","file_name":"adjusters.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"372085227","text":"# -*- encoding=utf-8 -*-\n# Copyright (c) 2019 hippo91 \n# Copyright (c) 2019 Ashley Whetter \n\n# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html\n# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER\nimport unittest\n\ntry:\n import numpy # pylint: disable=unused-import\n\n HAS_NUMPY = True\nexcept ImportError:\n HAS_NUMPY = False\n\nfrom astroid import builder\n\n\n@unittest.skipUnless(HAS_NUMPY, \"This test requires the numpy library.\")\nclass BrainNumpyCoreMultiarrayTest(unittest.TestCase):\n \"\"\"\n Test the numpy core multiarray brain module\n \"\"\"\n\n numpy_functions_returning_array = (\n (\"array\", \"[1, 2]\"),\n (\"bincount\", \"[1, 2]\"),\n (\"busday_count\", \"('2011-01', '2011-02')\"),\n (\"busday_offset\", \"'2012-03', -1, roll='forward'\"),\n (\"concatenate\", \"([1, 2], [1, 2])\"),\n (\"datetime_as_string\", \"['2012-02', '2012-03']\"),\n (\"dot\", \"[1, 2]\", \"[1, 2]\"),\n (\"empty_like\", \"[1, 2]\"),\n (\"inner\", \"[1, 2]\", \"[1, 2]\"),\n (\"is_busday\", \"['2011-07-01', '2011-07-02', '2011-07-18']\"),\n (\"lexsort\", \"(('toto', 'tutu'), ('riri', 'fifi'))\"),\n (\"packbits\", \"np.array([1, 2])\"),\n (\"ravel_multi_index\", \"np.array([[1, 2], [2, 1]])\", \"(3, 4)\"),\n (\"unpackbits\", \"np.array([[1], [2], [3]], dtype=np.uint8)\"),\n (\"vdot\", \"[1, 2]\", \"[1, 2]\"),\n (\"where\", \"[True, False]\", \"[1, 2]\", \"[2, 1]\"),\n (\"empty\", \"[1, 2]\"),\n (\"zeros\", \"[1, 2]\"),\n )\n\n numpy_functions_returning_bool = (\n (\"can_cast\", \"np.int32, np.int64\"),\n (\"may_share_memory\", \"np.array([1, 2])\", \"np.array([3, 4])\"),\n (\"shares_memory\", \"np.array([1, 2])\", \"np.array([3, 4])\"),\n )\n\n numpy_functions_returning_dtype = (\n # (\"min_scalar_type\", \"10\"), # Not yet tested as it returns np.dtype\n # (\"result_type\", \"'i4'\", \"'c8'\"), # Not yet tested as it returns np.dtype\n )\n\n numpy_functions_returning_none = ((\"copyto\", \"([1, 2], [1, 3])\"),)\n\n numpy_functions_returning_tuple = (\n (\n \"unravel_index\",\n \"[22, 33, 44]\",\n \"(6, 7)\",\n ), # Not yet tested as is returns a tuple\n )\n\n def _inferred_numpy_func_call(self, func_name, *func_args):\n node = builder.extract_node(\n \"\"\"\n import numpy as np\n func = np.{:s}\n func({:s})\n \"\"\".format(\n func_name, \",\".join(func_args)\n )\n )\n return node.infer()\n\n def test_numpy_function_calls_inferred_as_ndarray(self):\n \"\"\"\n Test that calls to numpy functions are inferred as numpy.ndarray\n \"\"\"\n for func_ in self.numpy_functions_returning_array:\n with self.subTest(typ=func_):\n inferred_values = list(self._inferred_numpy_func_call(*func_))\n self.assertTrue(\n len(inferred_values) == 1,\n msg=\"Too much inferred values ({}) for {:s}\".format(\n inferred_values, func_[0]\n ),\n )\n self.assertTrue(\n inferred_values[-1].pytype() == \".ndarray\",\n msg=\"Illicit type for {:s} ({})\".format(\n func_[0], inferred_values[-1].pytype()\n ),\n )\n\n def test_numpy_function_calls_inferred_as_bool(self):\n \"\"\"\n Test that calls to numpy functions are inferred as bool\n \"\"\"\n for func_ in self.numpy_functions_returning_bool:\n with self.subTest(typ=func_):\n inferred_values = list(self._inferred_numpy_func_call(*func_))\n self.assertTrue(\n len(inferred_values) == 1,\n msg=\"Too much inferred values ({}) for {:s}\".format(\n inferred_values, func_[0]\n ),\n )\n self.assertTrue(\n inferred_values[-1].pytype() == \"builtins.bool\",\n msg=\"Illicit type for {:s} ({})\".format(\n func_[0], inferred_values[-1].pytype()\n ),\n )\n\n def test_numpy_function_calls_inferred_as_dtype(self):\n \"\"\"\n Test that calls to numpy functions are inferred as numpy.dtype\n \"\"\"\n for func_ in self.numpy_functions_returning_dtype:\n with self.subTest(typ=func_):\n inferred_values = list(self._inferred_numpy_func_call(*func_))\n self.assertTrue(\n len(inferred_values) == 1,\n msg=\"Too much inferred values ({}) for {:s}\".format(\n inferred_values, func_[0]\n ),\n )\n self.assertTrue(\n inferred_values[-1].pytype() == \"numpy.dtype\",\n msg=\"Illicit type for {:s} ({})\".format(\n func_[0], inferred_values[-1].pytype()\n ),\n )\n\n def test_numpy_function_calls_inferred_as_none(self):\n \"\"\"\n Test that calls to numpy functions are inferred as None\n \"\"\"\n for func_ in self.numpy_functions_returning_none:\n with self.subTest(typ=func_):\n inferred_values = list(self._inferred_numpy_func_call(*func_))\n self.assertTrue(\n len(inferred_values) == 1,\n msg=\"Too much inferred values ({}) for {:s}\".format(\n inferred_values, func_[0]\n ),\n )\n self.assertTrue(\n inferred_values[-1].pytype() == \"builtins.NoneType\",\n msg=\"Illicit type for {:s} ({})\".format(\n func_[0], inferred_values[-1].pytype()\n ),\n )\n\n def test_numpy_function_calls_inferred_as_tuple(self):\n \"\"\"\n Test that calls to numpy functions are inferred as tuple\n \"\"\"\n for func_ in self.numpy_functions_returning_tuple:\n with self.subTest(typ=func_):\n inferred_values = list(self._inferred_numpy_func_call(*func_))\n self.assertTrue(\n len(inferred_values) == 1,\n msg=\"Too much inferred values ({}) for {:s}\".format(\n inferred_values, func_[0]\n ),\n )\n self.assertTrue(\n inferred_values[-1].pytype() == \"builtins.tuple\",\n msg=\"Illicit type for {:s} ({})\".format(\n func_[0], inferred_values[-1].pytype()\n ),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"mqtt-covid/Lib/site-packages/tests/unittest_brain_numpy_core_multiarray.py","file_name":"unittest_brain_numpy_core_multiarray.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"413279092","text":"from django.conf.urls import url\n\nfrom . import views, book_views\n\napp_name = 'cbvs'\n\nurlpatterns = [\n url(r'^$', views.CBVSTemplateView.as_view(), name='index'),\n url(r'^list/$', views.LibraryListView.as_view(), name='list'),\n url(r'^(?P\\d+)/$', views.LibraryDetailView.as_view(), name='detail'),\n url(r'^list/(?P\\d+)/$', views.LibraryDetailView.as_view(), name='detail'),\n url(r'^create/$', views.LibraryCreateView.as_view(), name='create'),\n url(r'^update/(?P\\d+)/$', views.LibraryUpdateView.as_view(), name='update'),\n url(r'^delete/(?P\\d+)/$', views.LibraryDeleteView.as_view(), name='delete'),\n url(r'^book_list/$', book_views.BookListView.as_view(), name='book_list'),\n url(r'^book_list/(?P\\d+)/$', book_views.BookDetailView.as_view(), name='book_detail'),\n url(r'^book_create/$', book_views.BookCreateView.as_view(), name='book_create'),\n url(r'^book_update/(?P\\d+)/$', book_views.BookUpdateView.as_view(), name='book_update'),\n url(r'^book_delete/(?P\\d+)/$', book_views.BookDeleteView.as_view(), name='book_delete'),\n]\n\n","sub_path":"cbvs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"165160730","text":"import datetime\n\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import QuerySet\nfrom django.db import transaction\n\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom apps.operate.filters import PraiseFilter\nfrom apps.operate.tasks import after_collect, after_delete_user_category\nfrom apps.post.filters import PostFilter\nfrom apps.post.models import Post\nfrom apps.post.serializer import PostSerializer\nfrom apps.user.serializer import ListCreateUserSerializer\nfrom sheep.constant import RET, error_map\nfrom utils.viewsets import ExtensionViewMixin\nfrom utils.django_util.util import field_sort_queryset\nfrom utils.mixins import CreateModelMixin\nfrom utils.pagination import LimitOffsetPagination\nfrom utils.viewsets import ModelViewSet, ReadOnlyModelViewSet\nfrom apps.operate.models import CollectCategory, TYPE_SERIALIZER_MAPPING, Praise, Focus, \\\n CollectRedisModel\nfrom apps.operate.serializer import CollectCategorySerializer, CreateCollectSerializer, \\\n CreateFocusSerializer, CollectPostSerializer\n\n\nUser = get_user_model()\n\n\nclass UserCollectCategoryViewSet(ModelViewSet):\n \"\"\"用户收藏类别视图\"\"\"\n serializer_class = CollectCategorySerializer\n filter_backends = (SearchFilter,)\n search_fields = ('name', )\n\n def get_serializer_context(self):\n context = super().get_serializer_context()\n context['resource_id'] = self.request.query_params.get('resource_id', None)\n context['user_id'] = self.request.user.id\n return context\n\n def get_queryset(self):\n return CollectCategory.objects.filter(user_id=self.request.user.id).all()\n\n def perform_destroy(self, instance):\n CollectCategory.objects.filter(id=instance.id).update(is_active=False)\n after_delete_user_category.delay(instance.user_id, instance.id)\n\n\nclass CollectCategoryViewSet(ReadOnlyModelViewSet):\n \"\"\"所有用户收藏类别视图\"\"\"\n serializer_class = CollectCategorySerializer\n lookup_field = 'user_id'\n queryset = CollectCategory.objects.filter(is_show=True)\n permission_classes = ()\n\n\nclass CollectViewSet(CreateModelMixin,\n ExtensionViewMixin,\n ListModelMixin,\n GenericViewSet):\n \"\"\"用户个人收藏的帖子\"\"\"\n serializer_class = {\n 'create': CreateCollectSerializer,\n 'list': CollectPostSerializer\n }\n c_serializer_class = CreateCollectSerializer\n filter_backends = (DjangoFilterBackend, SearchFilter)\n pagination_class = LimitOffsetPagination\n filter_class = PostFilter\n search_fields = ('name',)\n\n def get_queryset(self):\n category_id = self.request.query_params.get('collect_id')\n if not category_id:\n raise ValidationError({'success': False,\n 'code': RET.PARAMERR,\n 'msg': error_map[RET.PARAMERR]})\n self.collect_dict = CollectRedisModel(self.request.user.id, category_id).get_all(self.request)\n collect_resource_ids = self.collect_dict.keys()\n return field_sort_queryset(Post.objects, collect_resource_ids)\n\n def paginate_queryset(self, queryset):\n queryset_list = super().paginate_queryset(queryset)\n for i in queryset_list:\n date_array = datetime.datetime.fromtimestamp(self.collect_dict.get(i.id))\n i.created_like_time = date_array.strftime(\"%Y-%m-%d %H:%M\")\n return queryset_list\n\n @transaction.atomic()\n def perform_create(self, serializer):\n attr = serializer.save()\n after_collect.delay(**attr)\n\n\nclass PraiseViewSet(CreateModelMixin,\n ListModelMixin,\n GenericViewSet):\n \"\"\"用户点赞视图\"\"\"\n r_serializer_class = None\n c_serializer_class = CreateCollectSerializer\n filter_backends = (DjangoFilterBackend,)\n pagination_class = LimitOffsetPagination\n filterset_class = PraiseFilter\n\n # 资源类型和serializer的映射\n TYPE_SERIALIZER = TYPE_SERIALIZER_MAPPING\n\n @property\n def type(self):\n \"\"\"request的get参数type的值\"\"\"\n if not hasattr(self, '_type'):\n self._type = int(self.request.query_params.get('type'))\n return self._type\n\n def get_queryset(self):\n return Praise.objects.filter(user_id=self.request.user.id)\n\n def get_serializer_class(self):\n if self.action == 'create':\n return self.c_serializer_class\n return self.TYPE_SERIALIZER[self.type]\n\n def filter_queryset(self, queryset: QuerySet):\n queryset = super().filter_queryset(queryset)\n return Praise.TYPE_MODEL[self.type].objects.filter(id=queryset.values_list('resource_id', flat=True)).all()\n\n @transaction.atomic()\n def perform_create(self, serializer):\n instance = serializer.save()\n # 增加或减少对应资源的点赞数\n Praise.add_or_del_praise_num(instance)\n\n\nclass FocusViewSet(ListModelMixin,\n CreateModelMixin,\n GenericViewSet):\n \"\"\"关注视图\"\"\"\n c_serializer_class = CreateFocusSerializer\n r_serializer_class = ListCreateUserSerializer\n filter_backends = (DjangoFilterBackend,)\n filterset_class = None\n\n def get_queryset(self):\n user_id = self.request.query_params.get('user_id', self.request.user.id)\n type = int(self.request.query_params.get('type', 1))\n # type为1时, 查看我关注的人\n if type == 1:\n focus_ids = Focus.objects.filter(user_id=user_id).values_list('focus_id', flat=True)\n # type为其它值时, 查看关注我的人\n else:\n focus_ids = Focus.objects.filter(focus_id=user_id).values_list('user_id', flat=True)\n return field_sort_queryset(User, focus_ids)\n\n def get_serializer_class(self):\n if self.action == 'create':\n return self.c_serializer_class\n return self.r_serializer_class\n\n def get_permissions(self):\n if self.action != 'create':\n return []\n return super().get_permissions()\n","sub_path":"sheep/apps/operate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"250427961","text":"#!/usr/bin/env python3\n\n# read text files and plot them\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nfrom scipy import signal, fftpack\n# data file to read given as argument\nif len(sys.argv) < 2:\n\tprint(\"Give the name of the file to read as an argument\\n\")\n\texit()\n\n##Just to test\n\n\nfile = np.loadtxt(sys.argv[1] ,skiprows=1)\n#print(file)\npos = file[1::,0:3]\nvel = file[1::,3:6]\nacc = file[1::,6:9]\nkf_pos = file[1::,9:12]\nkf_vel = file[1::,12:15]\nkf_acc = file[1::,15:18]\ng_local = file[1::,18:21]\n\ntime = np.arange(np.size(pos[:,0]))\ntime = time/1000\n\n\n#TUM colors rgb\nblue = (0,0.3961,0.7412)\nred = (0.7686,0.0275,0.1059)\ngreen =(0,0.4863,0.1882)\norange = (0.8902, 0.4471, 0.1333)\npurple = (0.4118, 0.0314, 0.3529)\ngrey = (0.6118, 0.6157, 0.6235)\nyellow = (0.9765, 0.7294, 0)\n#turquoise = (0, 0.4667, 0.5412) \n\ncolors = blue, red, green, orange, purple, grey, yellow\n\n\n\n\ndef PlotNoLimOne(time, F_x1, F_y1, F_z1, label_name_x1,label_name_y1, label_name_z1, axis_label_x, axis_label_y):\n\tplt.figure()\n\tplt.plot(time, F_x1, c=blue, label = label_name_x1)\n\tplt.plot(time, F_y1, c=red, label = label_name_y1)\n\tplt.plot(time, F_z1, c=green, label = label_name_z1)\n\tplt.xlabel(axis_label_x)\n\tplt.ylabel(axis_label_y)\n\tplt.xlim(xmin=0)\n\tplt.legend()\n\n\ndef PlotCompare(counter, time, x_1, x_2, y_1, y_2, z_1, z_2, title_1, title_2, title_3, label_1, label_2,title, savename):\n\tplt.figure(counter, figsize=(7,8))\n\tplt.title(title)\n\tplt.subplot(311)\n\tplt.tight_layout()\n\tplt.plot(time, x_1, c=blue, label= label_1)\n\tplt.plot(time, x_2, c=red, label= label_2)\n\tplt.title(title_1)\n\tplt.legend()\n\t# plt.xlabel(\"Elapsed time in s\")\n\tplt.subplot(312)\n\tplt.tight_layout()\n\tplt.plot(time, y_1, c=blue, label= label_1)\n\tplt.plot(time, y_2, c=red, label= label_2)\n\tplt.title(title_2)\n\tplt.legend()\n\t# plt.xlabel(\"Elapsed time in s\")\n\tplt.subplot(313)\n\tplt.tight_layout()\n\tplt.plot(time, z_1, c=blue, label= label_1)\n\tplt.plot(time, z_2, c=red, label= label_2)\n\tplt.title(title_3)\n\tplt.legend()\n\tplt.savefig(savename)\n\n\n\n\n\n# PlotNoLimOne(time, pos[:,0], pos[:,1], pos[:,2], \"pos_x\", \"pos_y\", \"pos_z\", \"Elapsed time in s\", \"Position\", \"position.png\")\n# PlotNoLimOne(time, vel[:,0], vel[:,1], vel[:,2], \"v_x\", \"v_y\", \"v_z\", \"Elapsed time in s\", \"Velocity\", \"velocity.png\")\n# PlotNoLimOne(time, acc[:,0], acc[:,1], acc[:,2], \"acc_x\", \"acc_y\", \"acc_z\", \"Elapsed time in s\", \"Acceleration\", \"acceleration.png\")\n# PlotNoLimOne(time, kf_pos[:,0], kf_pos[:,1], kf_pos[:,2], \"kf_pos_x\", \"kf_pos_y\", \"kf_pos_z\", \"Elapsed time in s\", \"KF Position\", \"kf_position.png\")\n# PlotNoLimOne(time, kf_vel[:,0], kf_vel[:,1], kf_vel[:,2], \"kf_v_x\", \"kf_v_y\", \"kf_v_z\", \"Elapsed time in s\", \"KF Velocity\", \"kf_velocity.png\")\n# PlotNoLimOne(time, kf_acc[:,0], kf_acc[:,1], kf_acc[:,2], \"kf_acc_x\", \"kf_acc_y\", \"kf_acc_z\", \"Elapsed time in s\", \"KF Acceleration\", \"kf_acceleration.png\")\n\n\n\nPlotCompare(1, time, pos[:,0], kf_pos[:,0], pos[:,1], kf_pos[:,1],pos[:,2], kf_pos[:,2],\"Position in x\",\"Position in y\",\"Position in z\", \"Kinematics\", \"Kalman Filter\",\"Position Simulation\" ,\"position.png\")\nPlotCompare(2, time, vel[:,0], kf_vel[:,0], vel[:,1], kf_vel[:,1],vel[:,2], kf_vel[:,2],\"Velocity in x\",\"Velocity in y\",\"Velocity in z\", \"Kinematics\", \"Kalman Filter\", \"Velocity Simulation\", \"velocity.png\")\nPlotCompare(3, time, acc[:,0], kf_acc[:,0], acc[:,1], kf_acc[:,1],acc[:,2], kf_acc[:,2],\"Acceleration in x\",\"Acceleration in y\",\"Acceleration in z\", \"Sensor\", \"Kalman Filter\", \"Acceleration Simulation\", \"acceleration.png\")\nPlotNoLimOne(time, g_local[:,0], g_local[:,1], g_local[:,2], \"Gravity in x\", \"Gravity in y\", \"Gravity in z\", \"Elapsed time in s\", \"Gravity in m/s^2\" )\nsig = acc[:,0] - np.mean(acc[:,0])\nprint(np.mean(sig))\nsig_fft = fftpack.fft(sig)\npower = np.abs(sig_fft)\nprint(power[0])\nsample_freq = fftpack.fftfreq(sig.size, d= 1/500)\nplt.figure()\nplt.plot(sample_freq, power)\nplt.xlabel(\"Frequency in Hz\")\nplt.ylabel(\"Power\")\nplt.title(\"FFT linear acceleration\")\n\nplt.show()\n\n","sub_path":"data_collection/kalman_filter/simulation/kf_plotter.py","file_name":"kf_plotter.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"93696722","text":"# Main.py\n# M. Schultes\n# 4/7/2019\n\n# Input: filepath\n# Output: spreadsheet\n\nfrom nltk.corpus import names\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom collections import Counter\nfrom nltk.util import ngrams\n\nimport nltk\nimport glob\nimport os\nimport string\nimport xlwt\n\n\ndef Load_Text_Files(file_path):\n whitepapers = []\n for filename in glob.glob(os.path.join(file_path, '*.txt')):\n with open(filename, 'r', encoding=\"ISO-8859-1\") as infile:\n s = infile.read()\n # Removes punctuation so only raw text\n whitepapers.append(s.translate(\n str.maketrans('', '', string.punctuation)))\n return whitepapers\n\n\ndef Letters_Only(astr):\n return astr.isalpha()\n\n\ndef Remove_Unicode(docs):\n docs_with_no_unicode = []\n for doc in docs:\n filtered_word = ''\n filtered_word = ''.join(\n [x if x in string.printable else ' ' for x in doc])\n docs_with_no_unicode.append(filtered_word)\n return docs_with_no_unicode\n\n\ndef Clean_Text(docs):\n cleaned_docs = []\n all_names = set(names.words())\n lemmatizer = WordNetLemmatizer()\n for doc in docs:\n cleaned_docs.append(' '.join([lemmatizer.lemmatize(word.lower())\n for word in doc.split()\n if Letters_Only(word)\n and word not in all_names\n and len(word) > 2]))\n return cleaned_docs\n\n\ndef Tokenize_Text(docs):\n stopWords = set(stopwords.words('english'))\n tokenized_docs = []\n tokens = []\n for doc in docs:\n tokenized_docs = word_tokenize(doc)\n for token in tokenized_docs:\n if token not in stopWords:\n tokens.append(token)\n return tokens\n\n\ndef Dictionary_to_List(docs):\n keys = []\n values = []\n for k, v in docs.items():\n if v > 2:\n keys.append(str(k))\n values.append(v)\n return keys, values\n\n\ndef Remove_Duplicate(list1, freq1, list2, freq2):\n not_duplicates = []\n not_duplicates_freq = []\n match_variable = 0\n for x in list1:\n for y in list2:\n if x == y:\n match_variable = 1\n if match_variable == 0:\n not_duplicates.append(x)\n not_duplicates_freq.append(freq1[list1.index(x)])\n match_variable = 0\n return not_duplicates, not_duplicates_freq\n\n\ndef Output(filename, sheet, list1, list2):\n book = xlwt.Workbook()\n sh = book.add_sheet(sheet)\n\n col1_name = 'Terms'\n col2_name = 'Frequencies'\n\n n = 0\n\n sh.write(n, 0, col1_name)\n sh.write(n, 1, col2_name)\n\n for m, e1 in enumerate(list1, n+1):\n sh.write(m, 0, e1)\n\n for m, e2 in enumerate(list2, n+1):\n sh.write(m, 1, e2)\n\n book.save(filename)\n\n\ndef main():\n print(\"Program Init.\")\n\n # Load in raw text (could prompt user here for filepath)\n whitepapers_raw_text = []\n file_path = \"C:/Users/michael/Documents/NotScamRawText\"\n whitepapers_raw_text = Load_Text_Files(file_path)\n\n scam_whitepapers_raw_text = []\n scam_file_path = \"C:/Users/michael/Documents/ScamRawText\"\n scam_whitepapers_raw_text = Load_Text_Files(scam_file_path)\n\n # Clean Text\n # - Remove non-alpha characters\n # - Strip non-printable characters\n # - Remove names\n # - Remove words less than 2 characters long\n # - Lemmantize (sort words by grouping inflected or variant forms of the same word)\n whitepapers_with_no_unicode = []\n whitepapers_with_no_unicode = Remove_Unicode(whitepapers_raw_text)\n\n whitepapers_cleaned = []\n whitepapers_cleaned = Clean_Text(whitepapers_with_no_unicode)\n\n scam_whitepapers_with_no_unicode = []\n scam_whitepapers_with_no_unicode = Remove_Unicode(scam_whitepapers_raw_text)\n\n scam_whitepapers_cleaned = []\n scam_whitepapers_cleaned = Clean_Text(scam_whitepapers_with_no_unicode)\n\n # - Remove stopwords\n # - Tokenize text\n whitepapers_tokens = []\n whitepapers_tokens = Tokenize_Text(whitepapers_cleaned)\n\n scam_whitepapers_tokens = []\n scam_whitepapers_tokens = Tokenize_Text(scam_whitepapers_cleaned)\n\n # Group words into ngrams\n unigrams = nltk.FreqDist(whitepapers_tokens)\n bigrams = Counter(ngrams(whitepapers_tokens, 2))\n trigrams = Counter(ngrams(whitepapers_tokens, 3))\n fourgrams = Counter(ngrams(whitepapers_tokens, 4))\n fivegrams = Counter(ngrams(whitepapers_tokens, 5))\n\n scam_unigrams = nltk.FreqDist(scam_whitepapers_tokens)\n scam_bigrams = Counter(ngrams(scam_whitepapers_tokens, 2))\n scam_trigrams = Counter(ngrams(scam_whitepapers_tokens, 3))\n scam_fourgrams = Counter(ngrams(scam_whitepapers_tokens, 4))\n scam_fivegrams = Counter(ngrams(scam_whitepapers_tokens, 5))\n\n unigrams_terms = []\n unigrams_frequency = []\n unigrams_terms, unigrams_frequency = Dictionary_to_List(unigrams)\n\n bigrams_terms = []\n bigrams_frequency = []\n bigrams_terms, bigrams_frequency = Dictionary_to_List(bigrams)\n\n trigrams_terms = []\n trigrams_frequency = []\n trigrams_terms, trigrams_frequency = Dictionary_to_List(trigrams)\n\n fourgrams_terms = []\n fourgrams_frequency = []\n fourgrams_terms, fourgrams_frequency = Dictionary_to_List(fourgrams)\n\n fivegrams_terms = []\n fivegrams_frequency = []\n fivegrams_terms, fivegrams_frequency = Dictionary_to_List(fivegrams)\n\n scam_unigrams_terms = []\n scam_unigrams_frequency = []\n scam_unigrams_terms, scam_unigrams_frequency = Dictionary_to_List(scam_unigrams)\n\n scam_bigrams_terms = []\n scam_bigrams_frequency = []\n scam_bigrams_terms, scam_bigrams_frequency = Dictionary_to_List(scam_bigrams)\n\n scam_trigrams_terms = []\n scam_trigrams_frequency = []\n scam_trigrams_terms, scam_trigrams_frequency = Dictionary_to_List(scam_trigrams)\n\n scam_fourgrams_terms = []\n scam_fourgrams_frequency = []\n scam_fourgrams_terms, scam_fourgrams_frequency = Dictionary_to_List(scam_fourgrams)\n\n scam_fivegrams_terms = []\n scam_fivegrams_frequency = []\n scam_fivegrams_terms, scam_fivegrams_frequency = Dictionary_to_List(scam_fivegrams)\n\n # Subtract lists\n only_scam_unigrams = []\n only_scam_unigrams_frequencies = []\n only_scam_unigrams, only_scam_unigrams_frequencies = Remove_Duplicate(scam_unigrams_terms, scam_unigrams_frequency, unigrams_terms, unigrams_frequency)\n\n only_scam_bigrams = []\n only_scam_bigrams_frequencies = []\n only_scam_bigrams, only_scam_bigrams_frequencies = Remove_Duplicate(scam_bigrams_terms, scam_bigrams_frequency, bigrams_terms, bigrams_frequency)\n\n only_scam_trigrams = []\n only_scam_trigrams_frequencies = []\n only_scam_trigrams, only_scam_trigrams_frequencies = Remove_Duplicate(scam_trigrams_terms, scam_trigrams_frequency, trigrams_terms, trigrams_frequency)\n\n only_scam_fourgrams = []\n only_scam_fourgrams_frequencies = []\n only_scam_fourgrams, only_scam_fourgrams_frequencies = Remove_Duplicate(scam_fourgrams_terms, scam_fourgrams_frequency, fourgrams_terms, fourgrams_frequency)\n\n only_scam_fivegrams = []\n only_scam_fivegrams_frequencies = []\n only_scam_fivegrams, only_scam_fivegrams_frequencies = Remove_Duplicate(scam_fivegrams_terms, scam_fivegrams_frequency, fivegrams_terms, fivegrams_frequency)\n\n Output('subtracted_unigrams.xls', 'Sheet 1', only_scam_unigrams, only_scam_unigrams_frequencies)\n Output('subtracted_bigrams.xls', 'Sheet 1', only_scam_bigrams, only_scam_bigrams_frequencies)\n Output('subtracted_trigrams.xls', 'Sheet 1', only_scam_trigrams, only_scam_trigrams_frequencies)\n Output('subtracted_fourgrams.xls', 'Sheet 1', only_scam_fourgrams, only_scam_fourgrams_frequencies)\n Output('subtracted_fivegrams.xls', 'Sheet 1', only_scam_fivegrams, only_scam_fivegrams_frequencies)\n\n Output('scam_unigrams.xls', 'Sheet 1', scam_unigrams_terms, scam_unigrams_frequency)\n Output('scam_bigrams.xls', 'Sheet 1', scam_bigrams_terms, scam_bigrams_frequency)\n Output('scam_trigrams.xls', 'Sheet 1', scam_trigrams_terms, scam_trigrams_frequency)\n Output('scam_fourgrams.xls', 'Sheet 1', scam_fourgrams_terms, scam_fourgrams_frequency)\n Output('scam_fivegrams.xls', 'Sheet 1', scam_fivegrams_terms, scam_fivegrams_frequency)\n\n print(\"Program Comp.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"venv/kmeans_clustering.py.py","file_name":"kmeans_clustering.py.py","file_ext":"py","file_size_in_byte":8378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"501948956","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 27 12:05:41 2014\n\n@author: jake\n\"\"\"\n\nfrom __future__ import print_function, division\n\nfrom mpi4py import MPI\nimport itertools\n\nimport numpy as np\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nn = int(15504/size)\n\n# Overhead\nif rank == 0:\n models = np.array(list(itertools.combinations(xrange(20), 5)))\nelse:\n models = None\n\n\nif rank < size - 1:\n local_models = np.zeros(n)\nelse:\n local_models = np.zeros(n + 15504%size)\n\ncounts = [n]*size\ncounts[-1] = n + 15504%size\n\nstarts = [n*r for r in xrange(0,size)]\n\ncomm.Scatterv([models, tuple(counts), tuple(starts), MPI.DOUBLE],local_models)\n\nprint('process ', rank, 'with', len(local_models), 'models')\nprint(local_models[0], local_models[-1])","sub_path":"python/testMPI.py","file_name":"testMPI.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"351538754","text":"from selenium import webdriver\r\nimport os\r\nimport time\r\nimport consts\r\n\r\nURL = \"https://internshala.com/student/dashboard\"\r\ndriver_path = os.path.join(os.getcwd(), \"driver\", \"geckodriver.exe\")\r\nbrowser = webdriver.Firefox()\r\n\r\n# Delay timers\r\nlogin_delay = 8\r\nnavigation_delay = 3\r\nmin_delay = 1 # delay after application\r\n\r\ndef login():\r\n\t\"\"\"\r\n\tFunction will log you into your \r\n\tinternshalla account\r\n\t\"\"\"\r\n\tmail_field = browser.find_element_by_id(\"email\")\r\n\tpass_field = browser.find_element_by_id(\"password\")\r\n\tlogin_btn = browser.find_element_by_id(\"login_submit\")\r\n\r\n\tmail_field.send_keys(consts.mail)\r\n\tpass_field.send_keys(consts.password)\r\n\tlogin_btn.click()\r\n\ttime.sleep(login_delay)\r\n\r\n\tlink_main = browser.find_element_by_xpath('//*[@id=\"internships_new_superscript\"]')\r\n\tlink_main.click()\r\n\r\ndef gotoNextPage():\r\n\t\"\"\"\r\n\tNavigate to next page\r\n\t\"\"\"\r\n\tnxt_btn = browser.find_element_by_id(\"next\")\r\n\tnxt_btn.click()\r\n\ttime.sleep(navigation_delay)\r\n\r\ndef applyAllJobs():\r\n\t\"\"\"\r\n\tApply for all the jobs listed \r\n\ton the currently opened page\r\n\t\"\"\"\r\n\tcurr_url = browser.current_url # store base url before navigating\r\n\ttry:\r\n\t\tlink_driver = browser.find_elements_by_xpath(\"//a[@class='view_detail_button']\") # list of all driver object contains all job links\r\n\t\tlinks = [] # html links \r\n\t\tfor l in link_driver:\r\n\t\t\tlink = l.get_attribute('href') # extract link from object\r\n\t\t\tlinks.append(link)\r\n\r\n\t\t# open link one by one and apply for internship\r\n\t\tfor link in links:\r\n\t\t\tbrowser.get(link)\r\n\t\t\tapply_btn = browser.find_element_by_xpath('//button[normalize-space()=\"Apply now\"]')\r\n\t\t\tapply_btn.click()\r\n\t\t\t\r\n\t\t\tapply_btn_proceed = browser.find_element_by_xpath('//button[normalize-space()=\"Proceed to application\"]')\r\n\t\t\tapply_btn_proceed.click()\r\n\r\n\t\t\ttext_fields_driver = browser.find_elements_by_xpath(\"//textarea\")\r\n\t\t\ttf_index = 0 # index of the form field being filled\r\n\t\t\tfor tf in text_fields_driver:\r\n\t\t\t\ttf.clear()\r\n\t\t\t\tif tf_index == 0:\r\n\t\t\t\t\ttf.send_keys(consts.app_letter)\r\n\t\t\t\telif tf_index == 1:\r\n\t\t\t\t\ttf.send_keys(\"Yes\")\r\n\t\t\t\telse: \r\n\t\t\t\t\ttf.send_keys(\"Yes. www.github.com/bing101\")\r\n\r\n\t\t\t\ttf_index += 1\r\n\r\n\t\t\t# Submit form for application\r\n\t\t\tsubmit_btn = browser.find_element_by_id(\"submit\")\r\n\t\t\tsubmit_btn.click()\r\n\r\n\t\t\ttime.sleep(navigation_delay)\r\n\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\tpass\r\n\r\n\tbrowser.get(curr_url)\r\n\r\ndef main():\r\n\tprint(URL)\r\n\tbrowser.get(URL)\r\n\ttime.sleep(navigation_delay)\r\n\t\r\n\tlogin()\r\n\t\r\n\ttotal_pages = int(browser.find_element_by_id(\"total_pages\").get_attribute(\"innerHTML\"))\r\n\tprint(total_pages, \" Pages Found . .\")\r\n\tcurr_page = 1\r\n\twhile(curr_page <= total_pages):\r\n\t\tprint(\"Current Page\", curr_page)\r\n\t\tapplyAllJobs()\r\n\t\tgotoNextPage()\r\n\t\tcurr_page += 1\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()","sub_path":"Automate-Jobs-Bot/run-bot.py","file_name":"run-bot.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"366291657","text":"import os\nfrom pathlib import Path\n\nimport numpy as np \nimport networkx as nx\nimport pandas as pd\n\nimport itertools\n\nimport PyBoolNet\nfrom PyBoolNet import Attractors\nfrom PyBoolNet import StateTransitionGraphs as STGs\n\nfrom utils import select_states, build_STG_and_determine_attractors, compute_average_activation\n\nimport argparse\n\ndef touch(filename):\n Path(filename).touch()\n\ndef parse_args():\n '''\n Parse from command line\n '''\n parser = argparse.ArgumentParser(description=\"Compute activations for boolean networks\")\n\n parser.add_argument(\"-i\", \n dest=\"i\", type=int, default=0,\n help=\"index of list of modification.\")\n\n parser.add_argument(\"--edgelist\", \n dest=\"edgelist\", type=str, default=None,\n help=\"edgelist to load.\")\n\n parser.add_argument(\"--primes\", \n dest=\"primes\", type=str, default=None,\n help=\"primes to load.\")\n parser.add_argument(\"--cancer_mutation\", \n dest=\"cancer_mutation\", type=str, default=None, nargs=\"*\",\n help=\"genes to set to overexpressed.\")\n \n\n parser.add_argument(\"--output\", dest=\"output\", \n type=str, default=None,\n help=\"Directory to save results.\")\n\n parser.add_argument(\"--output_genes\", dest=\"output_genes\", \n type=str, default=None, nargs=\"+\",\n help=\"Directory to save results.\")\n\n return parser.parse_args()\n\ndef main():\n\n chunksize = 100\n\n args = parse_args()\n\n output_dir = args.output\n output_dir = os.path.join(output_dir, \"chunks\")\n if not os.path.exists(output_dir):\n print (\"making\", output_dir)\n os.makedirs(output_dir, exist_ok=True)\n\n output_genes = args.output_genes\n\n edgelist_filename = args.edgelist\n print (\"loading interaction graph from\", edgelist_filename)\n g = nx.read_weighted_edgelist(edgelist_filename, \n delimiter=\"\\t\",\n create_using=nx.DiGraph())\n \n for gene in output_genes:\n assert gene in g, gene\n\n core = max(nx.strongly_connected_components(g), \n key=len)\n\n # remove any cancer genes from consideration\n cancer_mutuation = args.cancer_mutation\n if cancer_mutuation is not None:\n core = core - set(cancer_mutuation)\n core -= set(output_genes)\n\n core = sorted(core)\n\n print (\"core is\", core)\n\n possible_candidates = [genes \n for n_genes in (1, 2, 3) \n for genes in itertools.combinations(core, \n n_genes)]\n\n possible_candidates = list(filter(\n lambda x: nx.is_weakly_connected(g.subgraph(x)),\n possible_candidates\n ))\n possible_candidates = [(\"cancer\",)] + possible_candidates\n\n print (\"number of possible candidates is\", \n len(possible_candidates))\n\n i = args.i\n chosen_candidates = possible_candidates[i*chunksize : \n (i+1)*chunksize]\n\n output_filenames = {output_gene: \n os.path.join(output_dir, \n \"{}_expressions_chunk_{}.csv\".format(output_gene, i))\n for output_gene in output_genes}\n\n output_dfs = {output_gene: pd.DataFrame() \n for output_gene in output_genes}\n\n\n primes_filename = args.primes\n if primes_filename.endswith(\".bnet\"):\n print (\"loading from bnet file\", primes_filename)\n # json_filename = primes_filename.replace(\".bnet\", \".json\")\n # print(\"saving primes json to\", json_filename)\n primes = PyBoolNet.FileExchange.bnet2primes(primes_filename, \n # FnamePRIMES=json_filename\n ) # write to json\n else:\n assert primes_filename.endswith(\".json\")\n print (\"loading primes from json\", primes_filename)\n primes = PyBoolNet.FileExchange.read_primes(primes_filename)\n\n if cancer_mutuation is not None:\n print (\"turning on\", \"_\".join(cancer_mutuation),\n \"to simulate cancer mutation\")\n primes = PyBoolNet.PrimeImplicants.\\\n create_constants(primes, \n {mutation: 1 for mutation in cancer_mutuation}, # turn on cancer genes\n Copy=True)\n\n for gene in output_genes:\n assert gene in primes, gene\n\n states = select_states(primes)\n\n update = \"synchronous\"\n\n for chosen_candidate in chosen_candidates:\n chosen_candidate_identifier = \"+\".join(chosen_candidate)\n print (\"chosen candidate is\", chosen_candidate_identifier)\n\n\n # mad modification to network if necessary\n if chosen_candidate_identifier is not \"cancer\":\n \n print (\"switching off\", chosen_candidate)\n \n modified_network = PyBoolNet.PrimeImplicants.\\\n create_constants(primes, \n {gene: 0 for gene in chosen_candidate}, # turn off targets \n Copy=True)\n\n else:\n # do nothing for cancer (control case)\n modified_network = primes\n\n print (\"determining attractors\")\n attractors = build_STG_and_determine_attractors(\n modified_network, \n states)\n\n print (\"determing activations for output genes\")\n gene_counts = compute_average_activation(\n modified_network, \n genes=output_genes,\n attractors=attractors)\n\n for output_gene in output_genes:\n output_dfs[output_gene] = output_dfs[output_gene]\\\n .append(pd.Series(gene_counts[output_gene], \n name=chosen_candidate_identifier))\n\n print ()\n\n print (\"writing results to files\")\n for output_gene in output_genes:\n output_dfs[output_gene].to_csv(\n output_filenames[output_gene])\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/evaluation/calculate_expressions.py","file_name":"calculate_expressions.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"133485867","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 10 09:20:51 2019\r\n\r\n@author: AriesXiao\r\n\"\"\"\r\nimport requests\r\nimport json\r\nimport time\r\nimport tkinter as tk\r\nimport tkinter.messagebox\r\nfrom tkinter import filedialog\r\nimport xlrd\r\n\r\n######################################################################\r\ndef getAccessToken():\r\n \"\"\"\r\n 获取access_token\r\n \"\"\"\r\n appID='wx0e2f23aa31f654ed'\r\n appSecret='a1dbc442e3129d7baa30b73319e6690b'\r\n url='https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid='+appID+'&secret='+appSecret\r\n try:\r\n html=requests.get(url)\r\n data=json.loads(html.text)\r\n #print(data['access_token'])\r\n return(data['access_token'])\r\n except:\r\n print('failed')\r\naccess_token=getAccessToken()\r\ntiming=time.time()\r\ndef accessToken():\r\n \"\"\"\r\n 两百秒刷新一次access_token\r\n \"\"\"\r\n global access_token,timing\r\n if (time.time()-timing)<200:\r\n return access_token\r\n else:\r\n access_token=getAccessToken()\r\n timing=time.time()\r\n return access_token\r\n\r\n######################################################################\r\n## HTTP函数部分\r\n######################################################################\r\ndef getCurrentMatchName():\r\n \"\"\"\r\n 获取当前比赛match_id\r\n \"\"\"\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databasequery?access_token='+access_token\r\n query='db.collection(\"my_data_records\").doc(\"current_match\").get()'\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n return (json.loads(data['data'][0]))['match_name']\r\n\r\ndef getALLMatches():\r\n \"\"\"\r\n 获取所有比赛\r\n \"\"\"\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databasequery?access_token='+access_token\r\n query='db.collection(\"matches\").get()'\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n match_list=[]\r\n for t in data['data']:\r\n t=json.loads(t)\r\n match_list.append(t['name'])\r\n return match_list\r\n\r\ndef getAllTeams(match_name):\r\n \"\"\"\r\n 获取当前match所有队伍信息\r\n \"\"\"\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databasequery?access_token='+access_token\r\n query='db.collection(\"teams\").where({{match_name:\"{}\"}}).get()'.format(match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n #print(data)\r\n team_list=[]\r\n for t in data['data']:\r\n t=json.loads(t)\r\n team_list.append(t['name'])\r\n return team_list\r\n\r\ndef changeCurrentMatch(match_name):\r\n \"\"\"\r\n 更改当前比赛\r\n \"\"\"\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseupdate?access_token='+access_token\r\n query='db.collection(\"my_data_records\").doc(\"current_match\").set({{data:{{match_name:\"{}\"}}}})'.format(match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n print(data)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='修改成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\n\r\ndef addMatch(new_match_name):\r\n \"\"\"\r\n 新建比赛\r\n \"\"\"\r\n global matches\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseadd?access_token='+access_token\r\n query='db.collection(\"matches\").add({{data:{{name:\"{}\"}}}})'.format(new_match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n global E1\r\n E1.delete(0,tk.END)\r\n tkinter.messagebox.showinfo(title='提示',message='添加成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\ndef addTeam(team_name):\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseadd?access_token='+access_token\r\n query='db.collection(\"teams\").add({{data:{{name:\"{}\",match_name:\"{}\"}}}})'.format(team_name,current_match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n global E_add_team\r\n E_add_team.delete(0,tk.END)\r\n tkinter.messagebox.showinfo(title='提示',message='添加成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\ndef deleteAllTeams():\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databasedelete?access_token='+access_token\r\n query='db.collection(\"teams\").where({{match_name:\"{}\"}}).remove()'.format(current_match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='删除成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='删除失败!')\r\n\r\ndef importTeams(teams_list_dic):\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseadd?access_token='+access_token\r\n query='db.collection(\"teams\").add({{data:{}}})'.format(teams_list_dic)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='导入成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\ndef deleteAllImages():\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databasedelete?access_token='+access_token\r\n query='db.collection(\"images\").where({{match_name:\"{}\"}}).remove()'.format(current_match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='删除成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='删除失败!')\r\n\r\ndef deleteAllInfo():\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databasedelete?access_token='+access_token\r\n query='db.collection(\"informations\").where({{match_name:\"{}\"}}).remove()'.format(current_match_name)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='删除成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='删除失败!')\r\n\r\ndef importUsers(list_dic):\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseadd?access_token='+access_token\r\n query='db.collection(\"allow_login\").add({{data:{}}})'.format(list_dic)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='导入成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\ndef importQualifications(list_dic):\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseadd?access_token='+access_token\r\n query='db.collection(\"qualifications\").add({{data:{}}})'.format(list_dic)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='导入成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\ndef confirmDivision(list_dic):\r\n global current_match_name\r\n access_token=accessToken()\r\n url='https://api.weixin.qq.com/tcb/databaseadd?access_token='+access_token\r\n query='db.collection(\"divisions\").add({{data:{}}})'.format(list_dic)\r\n post_data={\r\n \"env\":\"tj-vex-spy-xiao\",\r\n \"query\":query\r\n }\r\n post_data=json.dumps(post_data)\r\n r=requests.post(url,post_data)\r\n data=json.loads(r.text)\r\n if data['errcode']==0:\r\n refresh()\r\n tkinter.messagebox.showinfo(title='提示',message='导入成功!')\r\n else:\r\n tkinter.messagebox.showwarning(title='提示',message='修改失败!')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef showList(the_list_Var,root_frame,x,y,lines,width=10,font_size=15):\r\n box=tk.Frame(root_frame)\r\n sb=tk.Scrollbar(box)\r\n sb.pack(side=tk.RIGHT,fill=tk.Y)\r\n listbox=tk.Listbox(box,yscrollcommand=sb.set,listvariable=the_list_Var,font=('',font_size),width=width,height=lines)\r\n listbox.pack(side=tk.LEFT,fill=tk.BOTH)\r\n sb.config(command=listbox.yview)\r\n box.place(x=x,y=y,anchor='nw')\r\n\r\n#################################################\r\n#获取数据\r\n#################################################\r\nmatches=getALLMatches()\r\ncurrent_match_name=getCurrentMatchName()\r\ncurrent_teams=getAllTeams(current_match_name)\r\n\r\ndef refresh():\r\n global matches,current_match_name,current_teams\r\n matches=getALLMatches()\r\n current_match_name=getCurrentMatchName()\r\n current_teams=getAllTeams(current_match_name)\r\n current_match_var.set('当前比赛:{}'.format(current_match_name))\r\n matchVar.set(tuple(matches))\r\n current_teams_Var.set(tuple(current_teams))\r\n\r\n\r\n#################################################\r\n#建立GUI界面\r\n#################################################\r\n\r\n\r\n#################################################\r\nwin=tk.Tk()\r\nscreenwidth = win.winfo_screenwidth()\r\nscreenheight = win.winfo_screenheight()\r\nwin.title('spy vex 小程序后端')\r\nwidth=1920\r\nheight=1080\r\nalignstr = '%dx%d+%d+%d' % (width, height, (screenwidth-width)/2, (screenheight-height)/2)\r\nwin.geometry(alignstr)\r\n\r\n#主框架\r\nframe=tk.Frame(win)\r\nframe.pack()\r\n\r\n#各部分\r\nmatch_frame=tk.Frame(frame,height=1080,width=400)\r\nmatch_frame.pack(side='left')\r\n\r\nteam_frame=tk.Frame(frame,height=1080,width=800)\r\nteam_frame.pack(side='left')\r\n\r\nright_frame=tk.Frame(frame,height=1080,width=500)\r\nright_frame.pack(side='left')\r\n\r\n#第一部分 match_frame\r\n##标题\r\ncurrent_match_var=tk.StringVar()\r\ntk.Label(match_frame,textvariable=current_match_var,font=('黑体',15,'bold')).place(x=20,y=20,anchor='nw')\r\ncurrent_match_var.set('当前比赛:{}'.format(current_match_name))\r\n##展示所有比赛\r\n\r\nmatchVar=tk.StringVar()\r\nmatchVar.set(tuple(matches))\r\nmatch_lb=tk.Listbox(match_frame,listvariable=matchVar,font=('',15),width=10,height=5)\r\nmatch_lb.place(x=20,y=60,anchor='nw')\r\n#print(match_lb.curselection())\r\n##更改比赛\r\ndef changeMatchBtn():\r\n choose_index=match_lb.curselection()\r\n if len(choose_index)==0:\r\n #未选择\r\n tkinter.messagebox.showwarning(title='错误', message='请先选择比赛') \r\n else:\r\n choice=match_lb.get(choose_index[0])\r\n result=tkinter.messagebox.askquestion(title='attention',message='是否确定将比赛更改为:'+choice)\r\n if result=='yes':\r\n changeCurrentMatch(choice)\r\ntk.Button(match_frame,text='更改比赛',font=('',13,'bold'),width=10,height=1,command=changeMatchBtn).place(x=20,y=180,anchor='nw')\r\n##新建比赛\r\nE1=tk.Entry(match_frame,font=('',15),width=15)\r\nE1.place(x=150,y=145,anchor='nw')\r\ndef addMatchBtn():\r\n new_team_name=E1.get()\r\n if len(new_team_name)==0:\r\n #未选择\r\n tkinter.messagebox.showwarning(title='错误', message='请先输入') \r\n else:\r\n result=tkinter.messagebox.askquestion(title='attention',message='是否确定新建比赛:'+new_team_name)\r\n if result=='yes':\r\n addMatch(new_team_name)\r\n\r\ntk.Button(match_frame,text='新建比赛',font=('',13,'bold'),width=10,height=1,command=addMatchBtn).place(x=170,y=180,anchor='nw')\r\n\r\ndef deleteAllTeamsBtn():\r\n global current_match_name\r\n result=tkinter.messagebox.askquestion(title='attention',message='是否确定删除 {} 的所有队伍'.format(current_match_name))\r\n if result=='yes':\r\n deleteAllTeams() \r\ntk.Button(match_frame,text='删除当前比赛所有队伍',fg='red',font=('',12,'bold'),width=20,height=1,command=deleteAllTeamsBtn).place(x=30,y=920,anchor='nw')\r\ndef deleteAllImagesBtn():\r\n global current_match_name\r\n result=tkinter.messagebox.askquestion(title='attention',message='是否确定删除 {} 的所有图片'.format(current_match_name))\r\n if result=='yes':\r\n deleteAllImages() \r\ntk.Button(match_frame,text='删除当前比赛所有图片',fg='red',font=('',12,'bold'),width=20,height=1,command=deleteAllImagesBtn).place(x=30,y=960,anchor='nw')\r\ndef deleteAllInfoBtn():\r\n global current_match_name\r\n result=tkinter.messagebox.askquestion(title='attention',message='是否确定删除 {} 的所有情报'.format(current_match_name))\r\n if result=='yes':\r\n deleteAllInfo() \r\ntk.Button(match_frame,text='删除当前比赛所有情报',fg='red',font=('',12,'bold'),width=20,height=1,command=deleteAllInfoBtn).place(x=30,y=1000,anchor='nw')\r\n\r\n\r\n\r\n#第二部分\r\ntk.Label(team_frame,text='当前队伍:',font=('黑体',12,'bold')).place(x=20,y=60,anchor='nw')\r\n#print(current_teams)\r\ncurrent_teams_Var=tk.StringVar()\r\ncurrent_teams_Var.set(tuple(current_teams))\r\nshowList(current_teams_Var,team_frame,20,90,10)\r\n##单独添加队伍\r\nE_add_team=tk.Entry(team_frame,font=('',15),width=15)\r\nE_add_team.place(x=20,y=320,anchor='nw')\r\ndef addTeamBtn():\r\n new_team_name=E_add_team.get()\r\n if len(new_team_name)==0:\r\n #未选择\r\n tkinter.messagebox.showwarning(title='错误', message='请先输入') \r\n else:\r\n result=tkinter.messagebox.askquestion(title='attention',message='是否确定添加队伍:'+new_team_name)\r\n if result=='yes':\r\n addTeam(new_team_name)\r\ntk.Button(team_frame,text='添加队伍',font=('',13,'bold'),width=10,height=1,command=addTeamBtn).place(x=20,y=350,anchor='nw')\r\n\r\n\r\ndef sortTeamName(team_list):\r\n \"\"\"\r\n 将队伍名以队伍名首字母排序\r\n \"\"\"\r\n def quickSort(left,right):\r\n if left>right:return\r\n key=team_list[left]\r\n i=left\r\n j=right\r\n while iright:return\r\n key=team_list[left]\r\n i=left\r\n j=right\r\n while i')\n# def route_template(template):\n # print (\"-------------------Home. Invalid Page[Catch all]-------------\")\n # return redirect(url_for('home_blueprint.Dashboard'))\n # #return render_template('errors/page_404.html'), 404\n \n\n@blueprint.route('/index')\ndef index():\n return redirect(url_for('home_blueprint.Dashboard'))\n\n@blueprint.route('/Dashboard')\n@login_required\ndef Dashboard():\n Entity_dict = {}\n Entity_List = mongo.db.AchillesEnv.distinct(\"Entity\")\n for item in Entity_List:\n Env_List = list(mongo.db.AchillesEnv.find({\"Entity\" : item}))\n Entity_dict[item] = Env_List\n #End_for\n \n Application_List = mongo.db.AchillesApp.distinct(\"Name\" , {})\n\n return render_template('Dashboard.html' , Entity_List = Entity_List , Entity_dict = Entity_dict , Application_List = Application_List)\n\n\n@blueprint.route('/AppDetails')\n@login_required\ndef Home():\n #Entity_List = list(mongo.db.AchillesEnv.find({},{\"_id\": 0, \"Entity\": 1}))\n Entity_List = mongo.db.AchillesApp.distinct(\"Entity\")\n\n #This does not work properly when browser is refreshed.\n #EnvList = mongo.db.AchillesEnv.distinct(\"Env\" , {\"Entity\" : Entity_List[0]})\n #print EnvList\n #return render_template('Home.html' , Entity_List = Entity_List , EnvList = EnvList)\n\n return render_template('AppDetails.html' , Entity_List = Entity_List)\n\n@blueprint.route('/Embeddors')\n@login_required\ndef Embeddors():\n Entity_List = mongo.db.AchillesApp.distinct(\"Entity\")\n return render_template('Embeddors.html' , Entity_List = Entity_List)\n\n\n@blueprint.route('/Servers')\n@login_required\ndef Servers():\n Entity_List = mongo.db.AchillesEnv.distinct(\"Entity\")\n return render_template('Servers.html' , Entity_List = Entity_List)\n \n@blueprint.route('/Runtime')\n@login_required\ndef Runtime():\n Entity_List = mongo.db.AchillesEnv.distinct(\"Entity\")\n return render_template('Runtime.html' , Entity_List = Entity_List)\n\n@blueprint.route('/AppList')\n@login_required\ndef AppList():\n Entity_List = mongo.db.AchillesEnv.distinct(\"Entity\")\n return render_template('AppList.html' , Entity_List = Entity_List)\n\n\n@blueprint.route('/Monitoring')\n@login_required\ndef getMonitoringList():\n return render_template('Monitoring.html')\n\n\n@blueprint.route('/BlankEar')\n@login_required\ndef BlankEar():\n Entity_List = mongo.db.AchillesEnv.distinct(\"Entity\")\n return render_template('BlankEar.html' , Entity_List = Entity_List)\n\n@blueprint.route('/MigrateEar')\n@login_required\ndef MigrateEar():\n Entity_List = mongo.db.AchillesEnv.distinct(\"Entity\")\n return render_template('MigrateEar.html' , Entity_List = Entity_List)\n \n@blueprint.app_errorhandler(403)\ndef access_forbidden(error):\n return render_template('errors/page_403.html'), 403\n\n\n@blueprint.app_errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/page_404.html'), 404\n\n\n@blueprint.app_errorhandler(500)\ndef internal_error(error):\n return render_template('errors/page_500.html'), 500 \n","sub_path":"app/home/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"222399184","text":"class Solution(object):\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n m = len(grid)\n n = len(grid[0])\n print(grid[m - 1][n - 1])\n if m == 0 or n == 0:\n return 0\n dp = [[-1 for j in range(n + 1)] for i in range(m + 1)]\n dp[m - 1][n - 1] = grid[m - 1][n - 1]\n \n for i in range(m - 1, -1, -1):\n for j in range(n - 1, -1, -1):\n min_val = min(dp[i + 1][j], dp[i][j + 1])\n if min_val < 0:\n max_val = max(dp[i + 1][j], dp[i][j + 1])\n s = max_val if max_val > 0 else 0\n else:\n s = min_val\n dp[i][j] = s + grid[i][j]\n\n return dp[0][0]\n","sub_path":"64-minPathSum.py","file_name":"64-minPathSum.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"334518534","text":"class Client(object):\n def __init__(self, numero):\n self.numero = numero # type VARCHAR\n self.liste_article_client = {} # Initialisation d'un dictionnaire\n\n def ajout_article(self, numero_article, quantite):\n self.liste_article_client[numero_article] = quantite\n\n\n# Fin de la fonction ajout_article\n# Fin de la classe Client\n\ndef creation_ou_maj_client(numero_client, nom_article, quantite, liste_clients):\n existant_client = False\n for un_client in liste_clients:\n if un_client.numero == numero_client: # Alors on va mettre à jour notre client\n existant_client = True\n un_client.ajout_article(nom_article, quantite) # On ajout notre nouvelle article avec sa quantite\n # Je dois trouver le moyen d'écrire dans mon client\n break\n # Si la variable demeure à false, ça veut dire que nous n'avosn pas trouver notre client\n if not existant_client:\n nouveau_client = Client(numero_client)\n nouveau_client.ajout_article(nom_article, quantite)\n liste_clients.append(nouveau_client)\n# Fin de la fonction creation_ou_maj_client\n","sub_path":"mes_classes/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"21963245","text":"#encoding:utf-8\nfrom .views import bp\nfrom flask import session,g\nimport config\nfrom .models import CMSUser,CMSPermission\n\n\n#判断用户是否登录\n#钩子函数,请求之前执行,判断session中是否存在用户的ID信息,如果有拿出来,就是每次发送请求之前首先执行\n@bp.before_request\ndef brefore_request():\n if config.CMS_USER_ID in session:\n user_id = session[config.CMS_USER_ID]\n user = CMSUser.query.filter_by(id=user_id).first()\n if user:\n g.cms_user = user\n\n\n#前端需要用到CMSPermission ,使用上下文处理器,前端可以使用CMSPermission 这个属性\n@bp.context_processor\ndef cms_context_processor():\n return {'CMSPermission':CMSPermission}","sub_path":"apps/cms/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"138465381","text":"from abc import ABC\nfrom typing import Any, Optional, Union\n\nfrom easydata.parsers.text import Text\nfrom easydata.utils import price\n\n__all__ = (\n \"BasePriceFloat\",\n \"PriceFloat\",\n \"PriceInt\",\n \"PriceText\",\n)\n\n\nclass BasePriceFloat(Text, ABC):\n def __init__(\n self,\n *args,\n decimals: Optional[int] = None,\n min_value: Optional[Union[float, int]] = None,\n max_value: Optional[Union[float, int]] = None,\n **kwargs,\n ):\n\n self.__decimals = decimals\n self.__min_value = min_value\n self.__max_value = max_value\n\n super().__init__(\n *args,\n **kwargs,\n )\n\n @property\n def _decimals(self):\n if self.__decimals is False or isinstance(self.__decimals, int):\n return self.__decimals\n\n return self.__decimals or self._decimals_config\n\n @property\n def _min_value(self):\n return self.__min_value or self._min_value_config\n\n @property\n def _max_value(self):\n return self.__max_value or self._max_value_config\n\n @property\n def _decimals_config(self):\n return None\n\n @property\n def _min_value_config(self):\n return None\n\n @property\n def _max_value_config(self):\n return None\n\n def _parse_value(\n self,\n value: Any,\n data: Any,\n ):\n\n value = super()._parse_value(value=value, data=data)\n\n if not value:\n return None\n\n value = price.to_float(\n price_value=value,\n decimals=self._decimals,\n )\n\n if value is None:\n return None\n\n return self._process_min_max_value(value)\n\n def _process_min_max_value(self, value: float) -> Optional[float]:\n min_value = self._min_value\n max_value = self._max_value\n\n if min_value and value < min_value:\n return None\n\n if max_value and value > max_value:\n return None\n\n return value\n\n\nclass PriceFloat(BasePriceFloat):\n @property\n def _decimals_config(self):\n return self.config[\"ED_PRICE_DECIMALS\"]\n\n @property\n def _min_value_config(self):\n return self.config[\"ED_PRICE_MIN_VALUE\"]\n\n @property\n def _max_value_config(self):\n return self.config[\"ED_PRICE_MAX_VALUE\"]\n\n\nclass PriceInt(PriceFloat):\n def _parse_value(\n self,\n value: Any,\n data: Any,\n ):\n\n value = super()._parse_value(value=value, data=data)\n\n return int(value) if value else None\n\n\nclass PriceText(PriceFloat):\n def _parse_value(\n self,\n value: Any,\n data: Any,\n ):\n\n value = super()._parse_value(value=value, data=data)\n\n return str(value) if value else None\n","sub_path":"easydata/parsers/price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"443291219","text":"import os,uuid\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\nfrom core.configBase import _merge_a_into_b\n\n__C = edict()\n# Consumers can get config by:\n# from core.config import cfg\ncfg = __C\n\n\ncfg.expName = 'default'\ncfg.loadOnlyClsStr = None # shortcut for development; only load the class name speficied; e.g. \"cat\" for cifar_10\ncfg.verbose = False # only use this if we want to print a lot.\n\ncfg.netInfo = edict()\ncfg.netInfo.modelName = None\ncfg.netInfo.modelType = None\n\ncfg.routingAnalysisInfo = edict()\n#cfg.routingAnalysisInfo.layers = [\"conv1\",\"conv2\",\"ip1\",\"cls_score\"]\ncfg.routingAnalysisInfo.layers = [\"conv1\"]\ncfg.routingAnalysisInfo.clsReferenceRouteStr = 'Pos'\n\n# cfg.routingAnalysisInfo.densityEstimationType = 'cluster'\n# cfg.routingAnalysisInfo.densityEstimationClusterType = 'kmeans'\n# cfg.routingAnalysisInfo.densityEstimationTypeConfig = None\n# cfg.routingAnalysisInfo.densityEstimationTypeConfigFilename = None\n# cfg.routingAnalysisInfo.densityEstimationTypeConfigByClass = True # always true currently; we can specify how we want classes grouped together in [routingStatisticsByClass] (e.g. not just by class )\n\ncfg.routingAnalysisInfo.comboType = 'all'\ncfg.routingAnalysisInfo.comboInfo = None #[None _ for combo in comboList]\ncfg.routingAnalysisInfo.routeFunction = None # generated from ...\"str\"\ncfg.routingAnalysisInfo.routeDifference = None # generated from ...\"str\"\ncfg.routingAnalysisInfo.routeFunctionStr = None # function type to creating a route from activ.\ncfg.routingAnalysisInfo.routeDifferenceStr = None # function type to take the difference between two routes\n\ncfg.routingAnalysisInfo.densityEstimation = edict()\ncfg.routingAnalysisInfo.densityEstimation.typeStr = 'cluster'\ncfg.routingAnalysisInfo.densityEstimation.clusterTypeStr = 'kmeans'\ncfg.routingAnalysisInfo.densityEstimation.typeConfig = None\ncfg.routingAnalysisInfo.densityEstimation.typeConfigFilename = None\ncfg.routingAnalysisInfo.densityEstimation.allClassesSameParameters = True\ncfg.routingAnalysisInfo.densityEstimation.classSeparate = True # always true currently; we can specify how we want classes grouped together in [routingStatisticsByClass] (e.g. not just by class )\n\n# training dataset for routing analysis\ncfg.routingAnalysisInfo.train = edict()\ncfg.routingAnalysisInfo.train.name = None\ncfg.routingAnalysisInfo.train.split = None\ncfg.routingAnalysisInfo.train.config = None\ncfg.routingAnalysisInfo.train.size = None\n# testing dataset for routing analysis\ncfg.routingAnalysisInfo.test = edict()\ncfg.routingAnalysisInfo.test.name = None\ncfg.routingAnalysisInfo.test.split = None\ncfg.routingAnalysisInfo.test.config = None\ncfg.routingAnalysisInfo.test.size = None\n\n\n#\n# classification experiment info\n#\n\ncfg.clsExperimentInfo = edict()\ncfg.clsExperimentInfo.clsModelType = 'Svm'\n# reference routing information\ncfg.clsExperimentInfo.referenceRoute = edict()\ncfg.clsExperimentInfo.referenceRoute.referenceName = None\ncfg.clsExperimentInfo.referenceRoute.indexWeightStr = None\ncfg.clsExperimentInfo.referenceRoute.dataset = edict()\ncfg.clsExperimentInfo.referenceRoute.dataset.name = None\ncfg.clsExperimentInfo.referenceRoute.dataset.split = None\ncfg.clsExperimentInfo.referenceRoute.dataset.config = None\n\n#\n# if we use clustering for density esimation, these are the configs for each method\n#\ncfg.kmeans = edict()\ncfg.kmeans.nClusters = 50\n\ncfg.dbscan = edict()\ncfg.dbscan.eps = 12500\ncfg.dbscan.minSamples = 2\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file (NO merging) it into the default options.\"\"\"\n yaml_cfg = loadYmlFile(filename)\n _merge_a_into_b(yaml_cfg, __C)\n\ndef loadYmlFile(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n return yaml_cfg\n\ndef initRouteConfig(densityEstimationConfigFile):\n initDensityEsimationParameterConfig(densityEstimationConfigFile)\n initComboInfo()\n\ndef initDensityEsimationParameterConfig(filename):\n cfg.routingAnalysisInfo.densityEstimation.typeConfigFilename = osp.splitext(osp.basename(filename))[0]\n cfg.routingAnalysisInfo.densityEstimation.typeConfig = loadYmlFile(filename)\n\ndef initComboInfo():\n layers = cfg.routingAnalysisInfo.layers\n comboType = cfg.routingAnalysisInfo.comboType\n print(layers)\n if comboType == 'all':\n comboInfo = createCombinationForDensityEstimationAll(layers)\n elif comboType == 'pair':\n comboInfo = createCombinationForDensityEstimationPairwise(layers)\n elif comboType == 'sep':\n comboInfo = createCombinationForDensityEstimationSeparate(layers)\n print(\"combo type is ~{}~.... nice (^_^)\".format(comboType))\n cfg.routingAnalysisInfo.comboInfo = comboInfo\n\ndef createRecordsPath(modelType,imdbName,netName):\n recordsPath = \"./output/{modelType}/tp_fn_records/{imdbName}/records_cls_{netName}.pkl\"\\\n .format(modelType=modelType,imdbName=imdbName,netName=netName)\n return recordsPath\n \ndef createActivityVectorPath(modelType,imdbName,netName):\n recordsPath = \"./output/{modelType}/tp_fn_records/{imdbName}/records_cls_{netName}.pkl\"\\\n .format(modelType=modelType,imdbName=imdbName,netName=netName)\n return recordsPath\n\ndef createDensityEstimationCacheStrID(comboID,recordIncludeTypeStr,imdb,clsName):\n dsName,dsSplit,dsConfig = imdbToVariableStrings(imdb)\n allClassesSameParameters = cfg.routingAnalysisInfo.densityEstimation.allClassesSameParameters\n if allClassesSameParameters:\n comboParameters = cfg.routingAnalysisInfo.comboInfo[comboID]\n else:\n comboParameters = cfg.routingAnalysisInfo.comboInfo[comboID][clsName]\n typeStr = cfg.routingAnalysisInfo.densityEstimation.typeStr\n comboStr = createComboInfoStr(comboID,comboParameters,typeStr,clsName)\n return '{comboStr}_{dsName}-{dsSplit}-{dsConfig}_{clsName}_{recordIncludeTypeStr}'.\\\n format(comboStr=comboStr,dsName=dsName,dsSplit=dsSplit,dsConfig=dsConfig,\n clsName=clsName,recordIncludeTypeStr=recordIncludeTypeStr)\n\ndef createClassificationExperimentCacheName():\n dirPath = \"data/routing_cache/{modelName}\".format(modelName=cfg.netInfo.modelName)\n if not osp.exists(dirPath): os.makedirs(dirPath)\n fn = \"{dirPath}/clsCache.pkl\".format(dirPath=dirPath)\n return fn\n\ndef imdbFromDatasetDict(datasetDict):\n return \"{}-{}-{}\".format(datasetDict.name,datasetDict.split,datasetDict.config)\n\ndef imdbToVariableStrings(imdb):\n dsName = imdb.name\n dsSplit = imdb._image_set\n dsConfig = 'default'\n return dsName,dsSplit,dsConfig\n\n\"\"\" \n\nconflict currently\n\nwe have density estimation parameters for both 'layerNames' and 'classNames'\n\nwe can only do one or have some set of rules for combining them...\n\ncurrently:\n-> save name is off of global configuration\n-> actual density methods depend on \"comboIDs\" and \"classNames\"\n\n\"\"\"\n\ndef getDensityEstimationHyperparameters(typeStr = None, clusterTypeStr = None):\n if typeStr is None:\n typeStr = cfg.routingAnalysisInfo.densityEstimation.typeStr\n if clusterTypeStr is None:\n clusterTypeStr = cfg.routingAnalysisInfo.densityEstimation.clusterTypeStr\n params = None\n # add more types of density information here\n if cfg.routingAnalysisInfo.densityEstimation.allClassesSameParameters:\n if typeStr == 'cluster':\n if clusterTypeStr == 'kmeans':\n print(\"--- using some kmeans cluster. aaand that's how the news goes. ----\")\n params = templateKMeans(cfg.kmeans.nClusters)\n elif clusterTypeStr == 'dbscan':\n params = templateDBSCAN(cfg.dbscan.minSamples,cfg.dbscan.eps)\n elif typeStr == 'aNewTypeOfDensityEstimation':\n print(\"A new type of density estimation goes here. BALLERZ\")\n else:\n print(\"--- using the plain old arithmetic average. aaand that's how the news goes. ----\")\n else:\n paramsByClass = cfg.routingAnalysisInfo.densityEstimation.typeConfig\n flattenParams = {}\n for name,classParams in params.items():\n flattenParams[name] = paramsByClassclassParams[typeStr][clusterTypeStr]\n params = flattenParams\n return params\n\ndef createCombinationForDensityEstimationAll(layers):\n densityEsimtationType = cfg.routingAnalysisInfo.densityEstimation.typeStr\n # currently each layer uses the same stuff\n combos = []\n keyStr = '-'.join(layers)\n densityEstimationInfo = {\"method\": \"cluster\"}\n densityEstimationInfo[densityEsimtationType] = getDensityEstimationHyperparameters()\n combos = {keyStr:densityEstimationInfo}\n return combos\n \ndef createCombinationForDensityEstimationPairwise(layers):\n # currently each layer uses the same stuff\n typeStr = cfg.routingAnalysisInfo.densityEstimation.typeStr\n deTypeConfig = cfg.routingAnalysisInfo.densityEstimation.typeConfig\n clusterTypeStr = cfg.routingAnalysisInfo.densityEstimation.clusterTypeStr\n combos = {}\n for idx,layerNameA in enumerate(layers):\n for jdx,layerNameB in enumerate(layers):\n if idx > jdx: continue\n densityEstimationInfo = {\"method\": typeStr}\n densityEstimationInfo[typeStr] = getDensityEstimationHyperparameters()\n keyStr = \"{}-{}\".format(layerNameA,layerNameB)\n if layerNameA == layerNameB: keyStr = \"{}\".format(layerNameA)\n combos[keyStr] = densityEstimationInfo\n return combos\n\ndef createCombinationForDensityEstimationSeparate(layers):\n # currently each layer uses the same stuff\n densityEsimtationType = cfg.routingAnalysisInfo.densityEstimation.typeStr\n combos = {}\n for layer in layers:\n densityEstimationInfo = {\"method\": \"cluster\"}\n densityEstimationInfo[densityEsimtationType] = getDensityEstimationHyperparameters()\n keyStr = layer\n combos[keyStr] = densityEstimationInfo\n return combos\n\ndef templateKMeans(nClusters):\n kmeans = {}\n kmeans[\"type\"] = \"kmeans\"\n kmeans[\"parameters\"] = {}\n kmeans[\"parameters\"]['nCluster'] = nClusters\n return kmeans\n\ndef templateDBSCAN(minSamples,eps):\n dbscan = {}\n dbscan[\"type\"] = \"dbscan\"\n dbscan[\"parameters\"] = {}\n dbscan[\"parameters\"]['minSamples'] = minSamples\n dbscan[\"parameters\"]['eps'] = eps\n return dbscan\n\ndef getResultsBaseFilenameRouting():\n fn = \"{}_{}_{}_{}\".format(cfg.routingAnalysisInfo.densityEstimation.typeStr,\n cfg.routingAnalysisInfo.densityEstimation.clusterTypeStr,\n cfg.routingAnalysisInfo.densityEstimation.typeConfigFilename,\n cfg.routingAnalysisInfo.densityEstimation.classSeparate)\n # if cfg.clusterParams is not None and cfg.clusterType is not None:\n # fn += \"_{}\".format(cfg.clusterType)\n # for key,value in cfg.clusterParams.items():\n # fn += \"_{key}{value}\".format(key=key,value=value)\n return fn\n\ndef getResultsDirectory():\n fn = \"./output/routing/{modelName}\".format(modelName=cfg.netInfo.modelName)\n return fn\n\ndef startTextFile(prefix,**kwargs):\n dirPath = getResultsDirectory()\n uuidStr = str(uuid.uuid4())\n resultsFn = \"{}/results_{}.txt\".format(dirPath,uuidStr)\n if not osp.exists(dirPath): os.makedirs(dirPath)\n fid = open(resultsFn,'w+')\n addConfigToText(fid,**kwargs)\n return fid\n\ndef addConfigToText(fid,**kwargs):\n clsExpClassName = cfg.loadOnlyClsStr\n comboTypeStr = cfg.routingAnalysisInfo.comboType\n deTypeStr = cfg.routingAnalysisInfo.densityEstimation.typeStr\n clusterTypeStr = cfg.routingAnalysisInfo.densityEstimation.clusterTypeStr\n dsRefRouteImdbStr = imdbFromDatasetDict(cfg.clsExperimentInfo.referenceRoute.dataset)\n comboStr = \"\"\n for comboID,comboParameters in cfg.routingAnalysisInfo.comboInfo.items():\n comboStr += createComboInfoStr(comboID,comboParameters,deTypeStr,clsExpClassName)\n configHeaderStr = \"{}\\n{}]\\n{}\\n{}\\n{}\\n{}\\n\".format(clsExpClassName,comboTypeStr,\\\n deTypeStr,clusterTypeStr,\\\n dsRefRouteImdbStr,comboStr)\n for key,value in kwargs.items():\n configHeaderStr += '{}: {}\\n'.format(key,value)\n fid.write(configHeaderStr)\n \ndef getClassificationExperimentResultsTxtFilenameRouting(dsName,dsSplitWRTRoute,dsConfigWRTRoute,dsSplitOg,dsConfigOg):\n\n secondDir = \"{resultDirName}/{dsName}\".format(resultDirName=resultDirName,dsName=dsName)\n thirdDir = \"{dsSplitWRTRoute}-{dsConfigWRTRoute}_{dsSplitOg}-{dsConfigOg}_{comboType}\".\\\n format(dsSplitWRTRoute=dsSplitWRTRoute,dsConfigWRTRoute=dsConfigWRTRoute,\n dsSplitOg=dsSplitOg,dsConfigOg=dsConfigOg,comboType=cfg.routingAnalysisInfo.comboType)\n fullPath = \"{firstDir}/{secondDir}/{thirdDir}/\".\\\n format(firstDir=firstDir,secondDir=secondDir,thirdDir=thirdDir)\n if not osp.exists(fullPath): os.makedirs(fullPath)\n fn = fullPath\n typeStr = cfg.routingAnalysisInfo.densityEstimation.typeStr\n clsExpClassName = cfg.loadOnlyClsStr\n for comboID,comboParameters in cfg.routingAnalysisInfo.comboInfo.items():\n fn += comboID\n #fn += createComboInfoStr(comboID,comboParameters,typeStr,clsExpClassName,short=True)\n refRouteInfo = cfg.clsExperimentInfo.referenceRoute\n fn += \"_{}_{}\".format(refRouteInfo.referenceName,imdbFromDatasetDict(refRouteInfo.dataset))\n return fn + \".txt\"\n\ndef createComboInfoStr(comboID,comboParameters,densityEsimtationType,clsName,short=False):\n typeStr = cfg.routingAnalysisInfo.densityEstimation.typeStr\n clusterTypeStr = cfg.routingAnalysisInfo.densityEstimation.clusterTypeStr\n comboStr = \"{}+{}+{}\".format(comboID,typeStr,clusterTypeStr)\n if not short:\n for key,value in comboParameters[typeStr].items():\n if type(value) is edict:\n comboStr += \"_{}\".format(key)\n for k,v in value.items():\n comboStr += \"+{}{}\".format(k,v)\n else:\n comboStr += \"_{}{}\".format(key,value)\n return comboStr\n\ndef checkConfigEquality(validConfig,proposedConfig):\n \"\"\"\n check if the input config edict is the same\n as the current config edict\n \"\"\"\n for key,validValue in validConfig.items(): # iterate through the \"truth\"\n proposedValue = proposedConfig[key]\n if type(validValue) is edict or type(validValue) is dict:\n isValid = checkConfigEquality(validValue,proposedValue)\n if not isValid: return False\n continue\n if proposedValue != validValue: return False\n return True\n\ndef unpackClassificationInformation(info):\n avDict = info['avDict']\n records = info['records']\n layerOrder = info['layerOrder']\n referenceRoute = info['referenceRoute']\n datasetSize = info['datasetSize']\n clsName = info['clsName']\n imdb = info['imdb']\n return avDict,records,layerOrder,referenceRoute,datasetSize,clsName,imdb\n\ndef packClassificationInformation(avDict,records,layerOrder,referenceRoute,\\\n datasetSize,clsName,imdb):\n info = {}\n info['avDict'] = avDict\n info['records'] = records\n info['layerOrder'] = layerOrder\n info['referenceRoute'] = referenceRoute\n info['datasetSize'] = datasetSize\n info['clsName'] = clsName\n info['imdb'] = imdb\n return info\n\n\"\"\"\n- network information \n - model name\n - model architecture\n - dataset used for parameter estimation\n- routing analysis info\n - dataset name & split for training *routing analysis*\n - dataset name & split for testing *routing analysis*\n - layers used in the routing analysis\n- combination of the layers for density estimation (kmeans, dbscan, etc)\n - split\n - pairs\n - all\n for each combination, density estimation information \n - type of density estimation\n - parameters for density estimation\n- reference route information\n - type (all,pos,neg)\n - dataset used for reference route\n - index weight\n- route function information\n - route creation function\n - route difference function\n\"\"\"\n\n\n\n","sub_path":"lib/core/routingConfig.py","file_name":"routingConfig.py","file_ext":"py","file_size_in_byte":16138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"5097225","text":"import matplotlib\nmatplotlib.use('TkAgg')\nimport numpy as np\nimport matplotlib.pyplot as plt # TODO(abhishek): consider removing pyplot and just stick w/ matplotlib\n# Check: http://stackoverflow.com/questions/25839795/opening-a-plot-in-tkinter-only-no-matplotlib-popup\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nimport Tkinter as tk\n# import Tkinter.ttk as ttk\nimport sys\nimport numpy as np\nfrom matplotlib.gridspec import GridSpec\n\nimport multiprocessing\nfrom multiprocessing import Process, Queue\nimport capnp\nimport ip_proto_capnp\nimport custom_config\nimport logging\nimport gflags\nimport Tkinter\nfrom Tkinter import *\nfrom bitarray import bitarray\n\n\n\nclass AMON_App(Frame):\n\n\n def __init__(self, master=None, data_queue=None, interactive_filter_queue=None):\n Frame.__init__(self, master)\n self.data_queue = data_queue\n self.interactive_filter_queue = interactive_filter_queue\n self.create_widgets()\n self.create_graphs()\n self.pack(fill='both', expand=True)\n\n\n def create_widgets(self):\n # Frame that resides on the left side of the window\n self.widgets_frame = Frame(self)\n self.widgets_frame.pack(side='left', padx=10, pady=10, fill='both')\n\n # Source bucket widgets\n # self.label_src = Label(master=self.widgets_frame, text=\"Enter the src bin to filter: \", font=('bold'))\n # self.label_src.grid(sticky=W)\n # self.entry_src = Entry(master=self.widgets_frame)\n # self.entry_src.grid(sticky=W)\n # self.entry_src.bind(\"\",self.evaluate_src)\n\n\n # Destination bucket widgets\n self.label_dst = Label(master=self.widgets_frame, text=\"Enter the dst bin to filter: \", font=('bold'))\n self.label_dst.grid(sticky=W)\n self.entry_dst = Entry(master=self.widgets_frame)\n self.entry_dst.grid(sticky=W)\n # self.entry_dst.bind(\"\",self.evaluate_dst)\n self.entry_dst.bind(\"\",self.parse_bins_from_dst_entry_box)\n\n # Button widget that handles both source and destination widgets\n self.button = Button(master=self.widgets_frame, text=\"Click to Filter!\")\n self.button.bind(\"\",self.evaluate_src_dst) # Binding event -- Button 1 (left mouse click)!\n self.button.grid(sticky=W)\n\n def create_graphs(self):\n\n # Frame that resides on right side of window\n self.graphFrame = Frame(self)\n self.graphFrame.pack(side='right', fill='both', expand=True)\n Grid.rowconfigure(self.graphFrame, 0, weight=1)\n Grid.columnconfigure(self.graphFrame, 0, weight=1)\n\n # databricks visualization\n self.fig = plt.figure(figsize=(10,10))\n self.gs = GridSpec(3,1)\n ax = self.fig.add_subplot(self.gs[0:2,:])\n self.im = ax.imshow(-np.random.random([128,128]), origin = 'upper', cmap=plt.cm.RdYlGn, interpolation = 'nearest', vmax=0, vmin=-400000)\n\n\n # bar graph visualization\n self.num_graphed = 0\n self.topkplot = self.fig.add_subplot(self.gs[2,:])\n self.topkplot.set_title('Top K hitters Stats')\n\n # Canvas and Toolbar definitions for the graphs\n self.canvas = FigureCanvasTkAgg(self.fig, master=self.graphFrame)\n self.canvas.get_tk_widget().pack(side='bottom', fill='both', expand=True)\n toolbar = NavigationToolbar2TkAgg(self.canvas, self.graphFrame)\n toolbar.update()\n self.canvas._tkcanvas.pack(side='top', fill='both', expand=True)\n self.canvas.show()\n\n def refresher(self):\n\n proto_obj = self.data_queue.get()\n if proto_obj[0].size == 0:\n\n root.after(1000, self.refresher)\n else:\n # databrick sketch\n num_intensities = proto_obj[0].reshape(128,128)\n self.im.set_array(-num_intensities)\n\n combined = []\n for idx, src in enumerate(proto_obj[1]):\n combined.append(src + \"\\n - \\n\" + proto_obj[2][idx])\n y_pos = np.arange(len(combined))\n\n #TODO(abhishek): Push this to below ==0: block (to avoid re-drawing all the time)?\n\n\n if self.num_graphed == 0:\n self.bar_rect = plt.bar(y_pos, proto_obj[3], align='center', alpha=0.3, width=0.2, color='maroon')\n self.num_graphed += 1\n else:\n\n for i in range(len(combined)):\n for rect, h in zip(self.bar_rect, proto_obj[3]):\n rect.set_height(h)\n\n plt.xticks(y_pos, combined)\n plt.ylabel('# of Bytes')\n plt.xlabel('Top %d hitters (src ip - dst ip)' %len(combined))\n self.canvas.draw()\n root.after(1000, self.refresher) # every second...\n\n\n\n def evaluate_src_dst(self, event):\n # logging.info(\"Filtering Src and Dst bins:%d and %d\"%(self.entry_src.get(), self.entry_dst.get()))\n logging.info(\"Filtering Src and Dst bins:%d and %d\"%(0, int(self.entry_dst.get())))\n #filter_src_bin = int(self.entry_src.get())\n filter_dst_bin = int(self.entry_dst.get())\n self.interactive_filter_queue.put(filter_dst_bin)\n\n def evaluate_src(self,val):\n #logging.info(\"Filtering Src bin value:%d\"%int(self.entry_src.get()))\n #filter_src_bin = int(self.entry_src.get())\n logging.warn(\"Sorry, this textbox is not configured yet!\")\n\n def evaluate_dst(self,val):\n logging.warn(\"Filtering Dst bin value:%d\"%int(self.entry_dst.get()))\n filter_dst_bin = int(self.entry_dst.get())\n bins = bitarray(128)\n bins.setall(0)\n bins[filter_dst_bin] = 1\n # TODO(abhishek): Here we will have to get multiple bins and put them into the bitmap\n\n consolidated_bins = self.shifting(bins)\n logging.warn(\"Entered Consolidated into Queue:%d\"%consolidated_bins)\n self.interactive_filter_queue.put(consolidated_bins)\n\n def parse_bins_from_dst_entry_box(self, val):\n bins = bitarray(128)\n bins.setall(0)\n string = self.entry_dst.get()\n\n logging.warn(\"Entered string was: %s\"%string)\n split_bins_list = string.split(\" \")\n if int(split_bins_list[0]) == -1:\n logging.warn(\"EXPLICIT Entered Consolidated into Queue:%d\"%-1)\n self.interactive_filter_queue.put(-1)\n else:\n for each_bin in split_bins_list:\n bins[int(each_bin)] = 1\n\n consolidated_bins = self.shifting(bins)\n logging.warn(\"Entered Consolidated into Queue:%d\"%consolidated_bins)\n self.interactive_filter_queue.put(consolidated_bins)\n\n\n def shifting(self, bitlist):\n out = 0\n for bit in reversed(bitlist):\n out = (out << 1) | bit\n return out\n\n\nclass FlowtransmitImpl(ip_proto_capnp.Flowtransmit.Server):\n \"Implementation of Flow Transmit Interface in the schema file\"\n def __init__(self):\n self.sent_bin = None\n self.already_sent = False\n\n def src(self, databrick, hitters, _context, **kwargs):\n\n np_databrick = np.array(databrick)\n #print np_databrick\n\n #print np_databrick[0]\n count_zeros = 0\n for d in range(len(np_databrick)):\n if np_databrick[d] == 0:\n count_zeros += 1\n\n\n logging.info(\"# of zeros :%d, max: %d, min: %d\"%(count_zeros, max(np_databrick), min(np_databrick)))\n\n src_hitters_li = []\n dst_hitters_li = []\n hitter_bytes = []\n\n for h in hitters:\n src_hitters_li.append(h.ipsrc)\n dst_hitters_li.append(h.ipdst)\n hitter_bytes.append(h.bytes)\n\n logging.info(\"Putting data brick and hitters in the shared process queue\")\n\n data_queue.put((np_databrick, src_hitters_li, dst_hitters_li, hitter_bytes))\n\n try:\n filter_bin = interactive_filter_queue.get(block = False)\n logging.warn(\"filter bin entered: %d\",int(filter_bin))\n self.sent_bin = int(filter_bin)\n return int(filter_bin)\n except:\n\n if not self.sent_bin:\n logging.warn(\"Nothing was entered\")\n return 0\n else:\n return int(self.sent_bin)\n\ndef listen_conn_process(data_queue, interactive_filter_queue, port_number):\n address = \"*:\"+ str(port_number)\n already_sent = False\n sent_bin = None\n server = capnp.TwoPartyServer(address,bootstrap=FlowtransmitImpl())\n custom_config.ConfigLoggerAndFlags()\n logging.info(\"Listening to Incoming connections on port:%d\"%port_number)\n server.run_forever()\n\n\n\nif __name__ == '__main__':\n FLAGS = gflags.FLAGS\n capnp.remove_import_hook()\n gflags.DEFINE_string('port', 8000, 'Port number to bind the socket to (Default:8000)')\n\n port_number = int(FLAGS.port)\n custom_config.ConfigLoggerAndFlags()\n\n manager = multiprocessing.Manager()\n data_queue = manager.Queue()\n\n #interactive_filter_queue = manager.Queue()\n interactive_filter_queue = Queue()\n\n cap_conn_process = Process(target=listen_conn_process, args=(data_queue,interactive_filter_queue, port_number))\n cap_conn_process.start()\n logging.info(\"Cap'n Proto process started on port:%d\"%port_number)\n\n root = Tk()\n app = AMON_App(master=root, data_queue=data_queue,interactive_filter_queue=interactive_filter_queue)\n root.title(\"AMON Viz\")\n app.refresher()\n app.mainloop()\n","sub_path":"PlaygroundLearning/bit_set.py","file_name":"bit_set.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"535550768","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'otoniel'\n\nimport json\n\nclass Usuario:\n\n id = []\n nombre = []\n apellido = []\n cedula = []\n id_rol = []\n telefono = []\n pin = []\n\n rol = []\n id_particular = []\n\n def __init__(self):\n self.id = []\n self.nombre = []\n self.apellido = []\n self.cedula = []\n self.id_rol = []\n self.id_particular = []\n self.rol = []\n self.telefono = []\n self.pin = []\n\n def setId(self, id):\n self.id.append(id)\n\n def setNombre(self, nombre):\n self.nombre.append(nombre)\n\n def setApellido(self, apellido):\n self.apellido.append(apellido)\n\n def setCedula(self, cedula):\n self.cedula.append(cedula)\n\n def setIdRol(self, id_rol):\n self.id_rol.append(id_rol)\n\n def setIdParticular(self, id_particular):\n self.id_particular.append(id_particular)\n\n def setRol(self, rol):\n self.rol.append(rol)\n\n def setTelefono(self, telefono):\n self.telefono.append(telefono)\n\n def setPin(self, pin):\n self.pin.append(pin)\n\n def generarJSON(self):\n lista = []\n\n registros = len(self.id)\n for i in range(0, registros):\n elemento = {}\n\n elemento['id'] = self.id[i]\n\n if registros == len(self.nombre):\n elemento['nombre'] = self.nombre[i]\n\n if registros == len(self.apellido):\n elemento['apellido'] = self.apellido[i]\n\n if registros == len(self.cedula):\n elemento['cedula'] = self.cedula[i]\n\n if registros == len(self.id_rol):\n elemento['id_rol'] = self.id_rol[i]\n\n if registros == len(self.id_particular):\n elemento['id_particular'] = self.id_particular[i]\n\n if registros == len(self.rol):\n elemento['rol'] = self.rol[i]\n\n if registros == len(self.telefono):\n elemento['telefono'] = self.telefono[i]\n\n if registros == len(self.pin):\n elemento['pin'] = self.pin[i]\n\n lista.append(elemento)\n\n \"\"\"\n data = {\n 'Usuario': lista,\n }\n \"\"\"\n\n #data_string = json.dumps(data)\n\n return lista\n\n def generarDiccionario(self):\n lista = []\n\n for i in range(0, len(self.id)):\n diccionario = {}\n\n diccionario['id_usuario'] = self.id[i]\n diccionario['id_rol'] = self.id_rol[i]","sub_path":"WebServiceProeza/Usuario.py","file_name":"Usuario.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"398080569","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for Tunnel RPC methods.\n\n\"\"\"\nfrom tunnel_rpc.methods import eval_commands, parse_output, run\n\n\ndef test_eval_commands(docker_client, container_factory, tarball64_factory):\n \"\"\"Tests the command evaluation method.\n\n eval_commands should return a string\n eval_commands logs should contain both stderr and stdout\n eval_commands should extract tarballs correctly for the container\n\n \"\"\"\n ls_container = container_factory()\n ls_logs = eval_commands(docker_client, ls_container, [\"ls\"])\n assert isinstance(ls_logs, str), \"eval_commands should return a string\"\n\n stdout_container = container_factory()\n stdout_logs = eval_commands(\n docker_client, stdout_container, [\"echo 43\"]\n )\n assert \"43\" in stdout_logs, \"eval_commands should contain stdout\"\n\n stderr_container = container_factory()\n stderr_logs = eval_commands(\n docker_client, stderr_container, [\"echo 43 1>&2\"]\n )\n assert \"43\" in stderr_logs, \"eval_commands should contain stderr\"\n\n tarball_base64 = tarball64_factory({\"test_file.txt\": \"43\"})\n tarball_container = container_factory()\n tarball_logs = eval_commands(\n docker_client,\n tarball_container,\n [\"ls test_file.txt\"],\n source_base64=tarball_base64,\n )\n assert (\n \"test_file.txt\" in tarball_logs\n ), \"eval_commands should extract tarballs correctly for the container\"\n\n\ndef test_parse_output():\n \"\"\"Test the parse output method.\n\n parse_output should return a list of dicts\n parse_output should ignore preambles\n parse_output should append every command log\n\n \"\"\"\n\n response = parse_output(\"---\\n[TEST] test\\n\")\n assert isinstance(response, list), \"parse_output should return a list\"\n assert isinstance(\n response[0], dict\n ), \"parse_output should return a list of dicts\"\n\n response = parse_output(\"should not show up---\\n[COMMAND] ls\\n\")\n assert all(\n \"should not show up\" not in output\n for command in response\n for _, output in command.items()\n ), \"parse_output should ignore preambles\"\n\n for length in range(2, 10):\n output = \"---\\n[TEST] test\\n\" * length\n response = parse_output(output)\n assert (\n len(response) == length\n ), \"parse_output should append every command log\"\n\n\ndef test_run():\n \"\"\"Test the run method.\n\n run should not raise errors\n\n \"\"\"\n try:\n run({\"commands\": [\"ls\"]})\n run({\"commands\": [\"ls\"], \"foo\": \"bar\"})\n run({\"foo\": \"bar\"})\n run({\"commands\": [\"false\"]})\n run({\"commands\": [], \"dist\": {\"artifacts\": [\"*.txt\"]}})\n run({\"commands\": [], \"dist\": {\"artifacts\": [\"*\"]}})\n except Exception as err: # pylint: disable=broad-except\n raise AssertionError(err)\n","sub_path":"tests/test_methods.py","file_name":"test_methods.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"233811744","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 21 10:30:53 2021\n\n@author: joann\n\"\"\"\n\n## 함수\ndef add_data(friend) : # 함수 = function = 기능\n Katok.append(None)\n Klen=len(Katok)\n Katok[Klen-1]=friend\n\ndef insert_data(position, friend) :\n Katok.append(None)\n Klen=len(Katok)\n for i in range(Klen-1, position, -1):\n Katok[i]=Katok[i-1]\n Katok[i-1]=None\n Katok[position]=friend\n \ndef delete_data(position) :\n Klen=len(Katok)\n Katok[position]=None\n for i in range(position+1, Klen, 1):\n Katok[i-1]=Katok[i]\n Katok[i]=None\n del(Katok[Klen-1])\n\n## 전역변수\nKatok=[] # 빈 배열(리스트)\n\n## 메인코드\n# 데이터 생성\nadd_data('다현')\nadd_data('정연')\nadd_data('쯔위')\nadd_data('사나')\nadd_data('지효')\nadd_data('모모')\n\n# 데이터 삽입\ninsert_data(3,'미나')\n\n# 데이터 삭제\ndelete_data(4)\n\n\nprint(Katok)","sub_path":"Code03-01.py","file_name":"Code03-01.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"389397777","text":"\"\"\"\nPOST-TREATMENT BIAS - This occurs when variables that are consequences of\nother variables are including in an analysis.\n\nE.g.: Plants grown in a greenhouse subject to growth-inhibiting fungi.\nGoal is to measure effects of anti fungal treatment on plant height.\n1. Initial height, h0, measured\n2. Treatment is applied\n3. Measure final height, h1, and presence of fungus.\n\nThere are four variables: h0, h1, whether treatment was applied,\nfungus presence. h1 is the outcome of interest. What else to include?\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\nimport arviz as ar\nfrom sklearn.preprocessing import scale\nimport matplotlib.pyplot as pl\nfrom causalgraphicalmodels import CausalGraphicalModel\n\nnp.random.seed(71)\nn_plants = 100\n\n# simulate initial height\nh0 = np.random.normal(10, 2, size=n_plants)\n\n# assign treatments and simulate fungus and growth\ntreatment = np.repeat([0, 1], n_plants/2)\nfungus = np.random.binomial(n=1, p=0.5-treatment*0.4, size=n_plants)\nh1 = h0 + np.random.normal(5 - 3 * fungus)\nd = pd.DataFrame(dict(h0=h0, h1=h1, treatment=treatment, fungus=fungus))\nd.head()\npd.plotting.scatter_matrix(d);\nd.describe()\n\n\"\"\"\nPut the parameters on a scale of proportion of height at tme t=0.\nh1,i ~ Normal(μi, σ)\nμi = h0,i * p\np = h1,i / h0,i\np = 1 → the plant's height hasn't changed\np = 2 → the plant's height has doubled\n1>p>0 → plant has degraded or died\n\nso i'll use a lognormal prior for p\n\"\"\"\n\nwith pm.Model() as m6:\n σ = pm.Exponential('σ', 1)\n p = pm.Lognormal('p', mu=0, sd=0.25)\n μ = d.h0.values * p\n h1 = pm.Normal('h1', mu=μ, sd=σ, observed=d.h1.values)\n trc6 = pm.sample(tune=1000)\npm.summary(trc6)\n\"\"\"\nThe above suggests about 40% average growth. Now to include both treatment and\nfungus variables with the intention of measuring the effect of both. They will\nbe part of the proportionality model like so:\n\"\"\"\n\nwith pm.Model() as m7:\n σ = pm.Exponential('σ', 1)\n βT = pm.Normal('βT', 0, 0.5)\n βF = pm.Normal('βF', 0, 0.5)\n α = pm.Lognormal('α', 0, 0.25)\n p = α + βT * d.treatment.values + βF * d.fungus.values\n μi = d.h0.values * p\n h1 = pm.Normal('h1', mu=μi, sd=σ, observed=d.h1.values)\n trc7 = pm.sample(tune=1000)\npm.summary(trc7)\n\"\"\"\nTreatment appears to have negligible effect even though βF posterior indicates fungus impacts\ngrowth.\nThe problem is that fungus is a consequence of treatment; i.e. fungus is a post-treatment variable.\nThe model asked the question \"Once we know fungus is present does treatment matter?\" ⇒ No.\nThe next model ignores the fungus variable\n\"\"\"\n\nwith pm.Model() as m8:\n σ = pm.Exponential('σ', 1)\n α = pm.Lognormal('α', 0, 0.2)\n βT = pm.Normal('βT', 0, 0.5)\n p = α + βT * d.treatment.values\n μ = d.h0.values * p\n h1 = pm.Normal('h1', mu=μ, sd=σ, observed=d.h1.values)\n trc8 = pm.sample(tune=1000)\n\npm.summary(trc8)\n\"\"\"\nNow the treatment effect is plain to see. Note that:\n1. It makes sense to control for pre-treatment differences such as initial height, h0, here.\n2. Including post-treatment variables can mask the treatment itself.\n3. Note that model m7 is still useful to identify the causal mechanism!\n\"\"\"\nplant_dag = CausalGraphicalModel(nodes=['H0', 'H1', 'T', 'F'],\n edges=[('H0', 'H1'), ('T', 'F'), ('F', 'H1')])\nplant_dag.draw()\nplant_dag.is_d_separated('T', 'H1')\nplant_dag.is_d_separated('T', 'H1', 'F')\nplant_dag.get_all_independence_relationships()\n","sub_path":"ch6_2.py","file_name":"ch6_2.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"509257623","text":"\"\"\"\nhttps://github.com/XML-Security/signxml/blob/master/LICENSE\n\n Copyright 2014 Andrey Kislyuk\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport M2Crypto\nimport hashlib\nfrom base64 import b64decode\nfrom lxml import etree\nfrom signxml import XMLVerifier, VerifyResult,\\\n ds_tag, fromstring, InvalidInput, InvalidDigest, InvalidSignature, namespaces\n\n\nclass M2XMLVerifier(XMLVerifier):\n \"\"\"\n Uses M2Crypto instead of pyOpenSSL\n\n Allows us to ignore certificate signing as self signed certificates\n are fine to use provided you prefetch the SPs certificate from\n their metadata AND do so over a secure connection.\n \"\"\"\n\n def verify(self, data, require_x509=True, x509_cert=None, cert_subject_name=None, ca_pem_file=None, ca_path=None,\n hmac_key=None, validate_schema=True, parser=None, uri_resolver=None, id_attribute=None,\n expect_references=1):\n \"\"\"\n See XMLVerifier.verify\n \"\"\"\n # Using args, kwargs makes it easier to call super\n # ...sort of\n args = [data]\n kwargs = {\n 'require_x509': require_x509,\n 'x509_cert': x509_cert,\n 'cert_subject_name': cert_subject_name,\n 'ca_pem_file': ca_pem_file,\n 'ca_path': ca_path,\n 'hmac_key': hmac_key,\n 'validate_schema': validate_schema,\n 'parser': parser,\n 'uri_resolver': uri_resolver,\n 'id_attribute': id_attribute,\n 'expect_references': expect_references,\n }\n self.hmac_key = hmac_key\n self.require_x509 = require_x509\n self.x509_cert = x509_cert\n self._parser = parser\n\n if self.x509_cert:\n self.require_x509 = True\n\n if id_attribute is not None:\n self.id_attributes = (id_attribute, )\n\n root = self.get_root(data)\n if root.tag == ds_tag(\"Signature\"):\n signature_ref = root\n else:\n signature_ref = self._find(root, \"Signature\", anywhere=True)\n\n # HACK: deep copy won't keep root's namespaces\n signature = fromstring(etree.tostring(signature_ref), parser=parser)\n\n if validate_schema:\n self.schema().assertValid(signature)\n\n signed_info = self._find(signature, \"SignedInfo\")\n c14n_method = self._find(signed_info, \"CanonicalizationMethod\")\n c14n_algorithm = c14n_method.get(\"Algorithm\")\n signature_method = self._find(signed_info, \"SignatureMethod\")\n signature_value = self._find(signature, \"SignatureValue\")\n signature_alg = signature_method.get(\"Algorithm\")\n raw_signature = b64decode(signature_value.text)\n x509_data = signature.find(\"ds:KeyInfo/ds:X509Data\", namespaces=namespaces)\n signed_info_c14n = self._c14n(signed_info, algorithm=c14n_algorithm)\n\n if x509_data is not None or self.require_x509:\n if self.x509_cert is None:\n if x509_data is None:\n raise InvalidInput(\"Expected a X.509 certificate based signature\")\n certs = [cert.text for cert in self._findall(x509_data, \"X509Certificate\")]\n if not certs:\n msg = \"Expected to find an X509Certificate element in the signature\"\n msg += \" (X509SubjectName, X509SKI are not supported)\"\n raise InvalidInput(msg)\n\n elif len(certs) > 1:\n msg = \"Currently cannot handle more than 1 certificate.\"\n raise InvalidInput(msg)\n\n signing_cert = certs[0] # TODO: handle chains\n else:\n signing_cert = self.x509_cert\n\n signature_digest_method = self._get_signature_digest_method(signature_alg).name\n try:\n x509_cert_str = '-----BEGIN CERTIFICATE-----\\n' + signing_cert + '\\n-----END CERTIFICATE-----'\n x509_cert = M2Crypto.X509.load_cert_string(x509_cert_str.encode('utf-8'))\n\n # Digest SignedInfo\n signed_info_hash = hashlib.new(signature_digest_method)\n signed_info_hash.update(signed_info_c14n)\n signed_info_digest = signed_info_hash.digest()\n\n pub_key = x509_cert.get_pubkey()\n pub_key.reset_context(md=signature_digest_method)\n pub_key.verify_init()\n assert pub_key.verify_update(signed_info_digest) == 1, 'verify_update failed'\n result = pub_key.verify_final(raw_signature)\n assert result != -1, 'Error in verify_final'\n except AssertionError:\n raise InvalidSignature(\"Signature verification failed\")\n elif \"hmac-sha\" in signature_alg:\n return super(M2XMLVerifier, self).verify(*args, **kwargs)\n else:\n key_value = signature.find(\"ds:KeyInfo/ds:KeyValue\", namespaces=namespaces)\n if key_value is None:\n raise InvalidInput(\"Expected to find either KeyValue or X509Data XML element in KeyInfo\")\n\n self._verify_signature_with_pubkey(signed_info_c14n, raw_signature, key_value, signature_alg)\n\n verify_results = []\n for reference in self._findall(signed_info, \"Reference\"):\n transforms = self._find(reference, \"Transforms\", require=False)\n digest_algorithm = self._find(reference, \"DigestMethod\").get(\"Algorithm\")\n digest_value = self._find(reference, \"DigestValue\")\n payload = self._resolve_reference(root, reference, uri_resolver=uri_resolver)\n payload_c14n = self._apply_transforms(payload, transforms, signature_ref, c14n_algorithm)\n if digest_value.text != self._get_digest(payload_c14n, self._get_digest_method(digest_algorithm)):\n raise InvalidDigest(\"Digest mismatch for reference {}\".format(len(verify_results)))\n\n # We return the signed XML (and only that) to ensure no access to unsigned data happens\n try:\n payload_c14n_xml = fromstring(payload_c14n)\n except etree.XMLSyntaxError:\n payload_c14n_xml = None\n verify_results.append(VerifyResult(payload_c14n, payload_c14n_xml, signature))\n\n if type(expect_references) is int and len(verify_results) != expect_references:\n msg = \"Expected to find {} references, but found {}\"\n raise InvalidSignature(msg.format(expect_references, len(verify_results)))\n\n return verify_results if expect_references > 1 else verify_results[0]\n","sub_path":"saml2idp/signxml.py","file_name":"signxml.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"48139056","text":"\n# ============LICENSE_START==========================================\n# org.onap.vvp/test-engine\n# ===================================================================\n# Copyright © 2017 AT&T Intellectual Property. All rights reserved.\n# ===================================================================\n#\n# Unless otherwise specified, all software contained herein is licensed\n# under the Apache License, Version 2.0 (the “License”);\n# you may not use this software except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n#\n# Unless otherwise specified, all documentation contained herein is licensed\n# under the Creative Commons License, Attribution 4.0 Intl. (the “License”);\n# you may not use this documentation except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://creativecommons.org/licenses/by/4.0/\n#\n# Unless required by applicable law or agreed to in writing, documentation\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ============LICENSE_END============================================\n#\n# ECOMP is a trademark and service mark of AT&T Intellectual Property.\nfrom datetime import datetime\n\nfrom selenium.webdriver.support.ui import Select\n\nfrom services.constants import Constants, ServiceProvider\nfrom services.frontend.base_actions.click import Click\nfrom services.frontend.base_actions.enter import Enter\nfrom services.frontend.base_actions.wait import Wait\nfrom services.helper import Helper\nfrom services.logging_service import LoggingServiceFactory\nfrom services.session import session\n\n\nlogger = LoggingServiceFactory.get_logger()\n\n\nclass FEWizard:\n\n E2Edate = None\n\n @staticmethod\n def add_vf():\n try:\n logger.debug(\"Tab Add Virtual Functions\")\n Wait.text_by_css(\n Constants.Dashboard.Wizard.Title.CSS,\n Constants.Dashboard.Wizard.AddVF.Title.TEXT,\n wait_for_page=True)\n vfName = \"newVF\" + Helper.rand_string(\"randomString\")\n vfVersion = \"newVFVersion\" + \\\n Helper.rand_string(\n \"randomNumber\") + Helper.rand_string(\"randomString\")\n Enter.text_by_name(\"virtualFunction\", vfName)\n Enter.text_by_name(\"VFversion\", vfVersion, wait_for_page=True)\n FEWizard.date_picker_wizard()\n Select(session.ice_driver.find_element_by_id(\n Constants.Dashboard.Wizard.AddVF.AIC_Version.TEXT\n )).select_by_visible_text(\"AIC 3.5\")\n Select(session.ice_driver.find_element_by_id(\n Constants.Dashboard.Wizard.AddVF.ECOMP_Release.TEXT\n )).select_by_visible_text(\"Unknown\")\n session.E2Edate = FEWizard.get_lab_entry_date()\n Click.css(Constants.SubmitButton.CSS, wait_for_page=True)\n Wait.page_has_loaded()\n Wait.name_to_dissappear(\"Add Virtual Function\")\n return vfName\n # If failed - count the failure and add the error to list of errors.\n except Exception as e:\n errorMsg = \"Failed to add a Virtual Function via modal window. \" +\\\n \"Exception \" +\\\n str(e)\n raise Exception(errorMsg)\n\n @staticmethod\n def get_lab_entry_date():\n E2Edate = session.ice_driver.find_element_by_id(\n \"add-vf-hidden-target-lab-date\").get_attribute(\"value\")\n return str(E2Edate)\n\n @staticmethod\n def add_vendor_contact():\n logger.debug(\"Tab Add Vendor Contact\")\n Wait.text_by_css(\n Constants.Dashboard.Wizard.Title.CSS,\n Constants.Dashboard.Wizard.AddVendorContact.Title.TEXT,\n wait_for_page=True)\n Select(session.ice_driver.find_element_by_name(\n \"company\")).select_by_visible_text(\"Ericsson\")\n fullname = Helper.rand_string(\n \"randomString\") + Helper.rand_string(\"randomString\")\n Enter.text_by_name(\"fullname\", fullname)\n email = Helper.rand_string(\"randomString\") + \"@ericson.com\"\n Enter.text_by_name(\"email\", email)\n phone = \"201\" + Helper.rand_string(\"randomNumber\", 6)\n Enter.text_by_name(\"phone\", phone)\n Click.css(Constants.SubmitButton.CSS, wait_for_page=True)\n Wait.name_to_dissappear(\"Add Vendor Contact\", wait_for_page=True)\n vendor = {\"company\": \"Ericsson\", \"full_name\": fullname,\n \"email\": email, \"phone\": phone}\n return vendor\n\n @staticmethod\n def add_service_provider_internal():\n logger.debug(\n \"Tab Add \" + ServiceProvider.MainServiceProvider + \" Sponsor\")\n Wait.text_by_css(\n Constants.Dashboard.Wizard.Title.CSS,\n \"Add \" +\n ServiceProvider.MainServiceProvider +\n \" Sponsor\")\n fullname = Helper.rand_string(\n \"randomString\") + Helper.rand_string(\"randomString\")\n Enter.text_by_name(\"fullname\", fullname)\n email = Helper.rand_string(\n \"randomString\") + \"@\" + ServiceProvider.email\n Enter.text_by_name(\"email\", email)\n phone = \"201\" + Helper.rand_string(\"randomNumber\", 6)\n logger.debug(phone)\n Enter.text_by_name(\"phone\", phone)\n Click.css(Constants.SubmitButton.CSS)\n Wait.name_to_dissappear(\"Add AT&T Sponsor\")\n sponsor = {\"company\": ServiceProvider.MainServiceProvider,\n \"full_name\": fullname, \"email\": email, \"phone\": phone}\n return sponsor\n\n @staticmethod\n def invite_team_members(email):\n try:\n logger.debug(\"Tab Invite Team Members\")\n Wait.text_by_name(\n Constants.Dashboard.Wizard.InviteTeamMembers.Title.NAME,\n Constants.Dashboard.Wizard.InviteTeamMembers.Title.TEXT)\n Enter.text_by_name(\"email\", email)\n Wait.text_by_css(\n Constants.SubmitButton.CSS,\n Constants.Dashboard.Wizard.InviteTeamMembers.Button.TEXT)\n Click.css(Constants.SubmitButton.CSS)\n Wait.name_to_dissappear(\n Constants.Dashboard.Wizard.InviteTeamMembers.Title.NAME)\n # If failed - count the failure and add the error to list of errors.\n except Exception as e:\n errorMsg = \"FAILED in Tab Invite Team Members. Exception = %s\" % e\n raise Exception(errorMsg)\n\n @staticmethod\n def add_ssh_key(is_negative=False):\n logger.debug(\"About to add an SSH Key in modal window\")\n try: # Add SSH Key from modal window and return key value.\n Wait.text_by_name(Constants.Dashboard.Wizard.AddSSHKey.Title.NAME,\n Constants.Dashboard.Wizard.AddSSHKey.Title.TEXT)\n # Generate an SSH Public Key.\n sshKey = Helper.generate_sshpub_key()\n if is_negative:\n sshKey = sshKey[8:]\n Enter.text_by_name(\"key\", sshKey)\n\n # Check that the submit button exists.\n Wait.text_by_css(\n Constants.SubmitButton.CSS,\n Constants.Dashboard.Wizard.AddSSHKey.Title.TEXT)\n\n Click.css(Constants.SubmitButton.CSS) # Click on submit button.\n if is_negative:\n Wait.text_by_id(\n Constants.Toast.ID,\n Constants.Dashboard.Avatar.Account\n .SSHKey.UpdateFailed.TEXT)\n else:\n Wait.name_to_dissappear(\n Constants.Dashboard.Wizard.AddSSHKey.Title.NAME)\n logger.debug(\"SSH Key added via modal window.\")\n return sshKey\n # If failed - count the failure and add the error to list of errors.\n except Exception as e:\n errorMsg = \"Failed to add an SSH Key in \" +\\\n \"the modal window. Exception=\" + \\\n str(e)\n raise Exception(errorMsg)\n\n @staticmethod\n def invite_team_members_modal(email, wait_modal_to_disappear=True):\n try:\n Click.id(\n Constants.Dashboard.Overview.TeamMember.ID,\n wait_for_page=True)\n Wait.text_by_css(\n Constants.Dashboard.Wizard.Title.CSS,\n Constants.Dashboard.Wizard.InviteTeamMembers.Title.TEXT)\n Enter.text_by_name(\"email\", email)\n Wait.text_by_css(\n Constants.SubmitButton.CSS,\n Constants.Dashboard.Wizard.InviteTeamMembers.Button.TEXT)\n Click.css(\".inviteMembers-form button.btn.btn-primary\", True)\n if wait_modal_to_disappear:\n Wait.modal_to_dissappear()\n # If failed - count the failure and add the error to list of errors.\n except Exception as e:\n errorMsg = \"FAILED in PopUp Invite Team Members. Exception=\" + \\\n str(e)\n raise Exception(errorMsg)\n\n @staticmethod\n def date_picker_add_ns(count):\n try:\n session.ice_driver.execute_script(\n \"var el = angular.element(document.querySelector\" +\n \"('.addNextSteps')); el.scope().vm.nextSteps[\" +\n str(count) +\n \"].duedate = new Date('\" +\n str(\n datetime.today().isoformat()) +\n \"')\")\n Click.css(\"div.modal-content\", wait_for_page=True)\n except Exception as e:\n errorMsg = \"Failed to select date with datePicker.\"\n\n raise Exception(errorMsg, str(e))\n\n @staticmethod\n def date_picker_wizard():\n Enter.date_picker('#e2e-lab-entry-date', 'choice.TargetLab')\n Click.css('input[name=\"virtualFunction\"]', wait_for_page=True)\n","sub_path":"services/frontend/fe_wizard.py","file_name":"fe_wizard.py","file_ext":"py","file_size_in_byte":10245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"500648432","text":"import synapse.common as s_common\n\nimport synapse.tests.utils as s_t_utils\n\nimport synapse.tools.autodoc as s_autodoc\n\nclass TestAutoDoc(s_t_utils.SynTest):\n\n async def test_tools_autodoc_docmodel(self):\n\n with self.getTestDir() as path:\n\n argv = ['--doc-model', '--savedir', path]\n\n outp = self.getTestOutp()\n self.eq(await s_autodoc.main(argv, outp=outp), 0)\n\n with s_common.genfile(path, 'datamodel_types.rst') as fd:\n buf = fd.read()\n\n s = buf.decode()\n self.isin('Base types are defined via Python classes.', s)\n self.isin('synapse.models.inet.Addr', s)\n self.isin('Regular types are derived from BaseTypes.', s)\n self.isin(r'inet\\:server', s)\n\n with s_common.genfile(path, 'datamodel_forms.rst') as fd:\n buf = fd.read()\n\n s = buf.decode()\n self.isin('Forms are derived from types, or base types. Forms represent node types in the graph.', s)\n self.isin(r'inet\\:ipv4', s)\n self.isin('Universal props are system level properties which may be present on every node.', s)\n self.isin('.created', s)\n","sub_path":"synapse/tests/test_tools_autodoc.py","file_name":"test_tools_autodoc.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"517840679","text":"'''\nThis script runs a closed-loop simulation where the controller is continuous\nand is allowed continuous, noise-free measurements.\n'''\n\nimport numpy as np\nfrom scipy.integrate import solve_ivp, solve_bvp\nimport scipy.io\nimport time\nimport sys\n\nfrom utilities.other import int_input, load_NN\nfrom examples.choose_problem import system, problem, config, time_dependent\n\nif time_dependent:\n from utilities.neural_networks import HJBnet\n system += '/tspan'\nelse:\n from utilities.neural_networks import HJBnet_t0 as HJBnet\n system += '/t0'\n\n# Loads pre-trained NN for control\n\nparameters, scaling = load_NN('examples/' + system + '/V_model.mat')\n\nmodel = HJBnet(problem, scaling, config, parameters)\n\n# Initializes some parameters\n\nt1 = problem.t1\nN_states = problem.N_states\n\nif len(sys.argv) > 1:\n np.random.seed(int(sys.argv[1]))\n\n# Generates the initial condition\n\nif system[:7] == 'burgers':\n X0 = -2. * np.sin(problem.xi * np.pi)\nelse:\n X0 = problem.sample_X0(1)\nbc = problem.make_bc(X0)\n\n# ---------------------------------------------------------------------------- #\n\n# Integrates the closed-loop system (NN controller)\n\nstart_time = time.time()\n\nSOL = solve_ivp(problem.dynamics, [0., t1], X0,\n method=config.ODE_solver, args=(model.eval_U,), rtol=1e-04)\n\nV_NN, A_NN, U_NN = model.bvp_guess(SOL.t.reshape(1,-1), SOL.y, eval_U=True)\n\nsave_dict = {'NN_time': time.time() - start_time, 't': SOL.t,\n 'X_NN': SOL.y, 'A_NN': A_NN, 'V_NN': V_NN, 'U_NN': U_NN}\n\n# Solves the two-point boundary value problem\n\nstart_time = time.time()\n\nSOL = solve_bvp(problem.aug_dynamics, bc, SOL.t, np.vstack((SOL.y, A_NN, V_NN)),\n verbose=2, tol=config.data_tol, max_nodes=config.max_nodes)\n\nX_aug = SOL.sol(save_dict['t'])\n\nsave_dict.update({'BVP_success': SOL.success,\n 'BVP_time': time.time() - start_time,\n 'X_BVP': X_aug[:N_states],\n 'A_BVP': X_aug[N_states:2*N_states],\n 'V_BVP': X_aug[-1:],\n 'U_BVP': problem.U_star(X_aug),\n 'H_BVP': problem.Hamiltonian(save_dict['t'], X_aug)})\n\n# Integrates the closed-loop system (LQR controller)\n\nstart_time = time.time()\n\nSOL = solve_ivp(problem.dynamics, [0., t1], X0,\n method=config.ODE_solver, args=(problem.U_LQR,), rtol=1e-04,\n dense_output=True)\n\nX = SOL.sol(save_dict['t'])\nU = np.empty((problem.N_controls, X.shape[1]))\nfor k in range(U.shape[1]):\n U[:,k] = problem.U_LQR(save_dict['t'][k], X[:,k])\n\nsave_dict.update({'LQR_time': time.time() - start_time, 'X_LQR': X, 'U_LQR': U})\n\n# ---------------------------------------------------------------------------- #\n\nNN_cost = problem.compute_cost(\n save_dict['t'], save_dict['X_NN'], save_dict['U_NN'])[0,0]\nLQR_cost = problem.compute_cost(\n save_dict['t'], save_dict['X_LQR'], save_dict['U_LQR'])[0,0]\n\nprint('')\nprint('NN cost: %.2f' % (NN_cost),\n ' (%.2f' % (100.*(NN_cost/save_dict['V_BVP'][0,0] - 1.)), '% suboptimal)')\nprint('LQR cost: %.2f' % (LQR_cost),\n ' (%.2f' % (100.*(LQR_cost/save_dict['V_BVP'][0,0] - 1.)), '% suboptimal)')\nprint('Optimal cost: %.2f' % (save_dict['V_BVP'][0,0]))\n\ntry:\n save_dict.update({'xi': problem.xi})\nexcept:\n pass\n\nscipy.io.savemat('examples/' + system + '/results/sim_data.mat', save_dict)\n","sub_path":"simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"27409347","text":"# // Time Complexity : O(n)\n# // Space Complexity : O(1)\n# // Did this code successfully run on Leetcode : Yes\n# // Any problem you faced while coding this : No\n\n\n# // Your code here along with comments explaining your approach\n# 1. find mid\n# 2. reverse from mid.next - \n# 1. using slow.next\n# 2. slow.next = None to detach\n# 3. Merge two - Use chaining\n\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if head == None or head.next == None: return None\n slow = head\n fast = head\n #reverse function\n def rev(cur):\n prev = None\n fast = cur.next\n while fast:\n cur.next = prev\n prev = cur\n cur = fast \n fast = fast.next\n cur.next = prev\n return cur\n \n # find mid\n while fast.next and fast.next.next:\n fast = fast.next.next\n slow = slow.next\n \n second = rev(slow.next)\n # detach\n slow.next = None\n cur = ListNode(-1)\n \n #merge\n while second and head:\n #taking from first LL\n cur.next = head\n cur = cur.next\n head = head.next\n \n #taking from second LL\n cur.next = second\n cur = cur.next\n second = second.next\n \n #remaining\n while second:\n cur.next = second\n cur = cur.next\n second = second.next\n \n #remaining\n while head:\n cur.next = head\n cur = cur.next\n head = head.next\n \n\n \n \n \n ","sub_path":"ReorderLL.py","file_name":"ReorderLL.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"513825633","text":"# -*- coding:cp936 -*-\n\"\"\"\n编写程序,循环从键盘输入任意字符串,把它们写到一个文本文件\"test.txt\"中,\n直到用户输入一个 # 为止,退出循环不再输入,最后关闭文件。\n\n\"\"\"\n#**********Program**********\nflag = 0\nf = open('test.txt', 'w+')\nwhile flag==0:\n a = input()\n if a == '#':\n f.close()\n flag = 1\n else:\n f.writelines(a)\n#********** End **********\n","sub_path":"第14周/03_设计/04 循环输入文字、写入文件/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"612730017","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@author = xijue\n\"\"\"\n\nimport requests\nimport re\nimport os\n\ndef getHtmlMessage(url):\n try:\n #爬取歌曲主页面\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n html = r.text\n #正则提取歌曲信息\n pattern = re.compile(r'window.__DATA__.*?kg_nick\":\"(.*?)\",\".*?playurl\":\"(.*?)\",\".*?song_name\":\"(.*?)\"', re.S)\n return pattern.search(html)\n except:\n return (\"爬取网页失败\")\n\n\ndef getmp3(url):\n \"\"\"\n 下载mp3\n \"\"\"\n message = getHtmlMessage(url)\n #获取歌手名\n singer = message.group(1)\n #获取歌名\n song_name = message.group(3)\n #获取歌曲链接\n songurl = message.group(2)\n #设置下载路径(以歌手名做文件夹名称) linux下请自行设置\n root = \"E://quanminsongs//\"+singer \n\t\n path = root + \"//\" +song_name + \"-\" + singer + \".m4a\"\n try:\n if not os.path.exists(root):\n os.mkdir(root)\n print(\"正在下载\" +\" \"+ song_name)\n if not os.path.exists(path):\n rqt = requests.get(songurl)\n with open(path, 'wb') as f:\n f.write(rqt.content)\n f.close()\n print(\"下载成功\")\n else:\n print(\"文件已存在,下载失败\")\n except Exception as c:\n print(\"爬取失败\")\n\n\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"122487456","text":"#The file name of this file must match the filename name which we import in __init__.py file\n# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, osv\nfrom openerp import api\nfrom datetime import date,time\nfrom openerp.tools import amount_to_text_en\n\n\nclass mutual_dummy_invoice(osv.osv):\n _name = \"mutual.dummy.invoice\"\n _rec_name = \"partner_id\"\n _columns = {\n 'partner_id': fields.many2one('res.partner','Customer', store=True,required=True),\n 'invoice_line': fields.one2many('mutual.dummy.invoice.line','name','Invoice Lines',store=True),\n 'ref_':fields.char('Ref',store=True),\n 'date_invoice':fields.date('Invoice Date',store=True),\n 'due_date': fields.date('Due Date', store=True),\n 'from_': fields.date('From',store=True),\n 'to_': fields.date('To',store=True),\n 'amount_untaxed': fields.float('Subtotal', store=True, readonly=True, default=0.0, compute='cal_tax_and_untaxedamount'),\n 'amount_tax': fields.float('Taxes', store=True, readonly=True, default=0.0, compute='cal_tax_and_untaxedamount'),\n 'amount_total': fields.float('Total', store=True, readonly=True, default=0.0, compute='cal_total_amount'),\n 'comment': fields.text('Comment', store=True)\n }\n\n @api.one\n @api.depends('invoice_line.price_subtotal')\n def cal_tax_and_untaxedamount(self):\n for line in self.invoice_line:\n self.amount_untaxed += line.price_subtotal\n self.amount_tax += (line.tax * line.price_subtotal)/100\n\n @api.one\n @api.depends('amount_untaxed','amount_tax')\n def cal_total_amount(self):\n self.amount_total = self.amount_tax + self.amount_untaxed\n\n @api.multi\n def amount_to_text(self, amount, currency):\n convert_amount_in_words = amount_to_text_en.amount_to_text(amount, lang='en', currency='')\n convert_amount_in_words = convert_amount_in_words.replace(' and Zero Cent', ' Only ')\n return convert_amount_in_words\n\n\nclass mutual_dummy_invoice_lines(osv.osv):\n _name = \"mutual.dummy.invoice.line\"\n _rec_name = 'products'\n _columns = {\n 'name': fields.many2one('mutual.dummy.invoice','Name', store=True),\n 'products': fields.many2one('product.template', 'Products', store=True),\n 'quantity': fields.float('Quantity', store=True,default=0.0),\n 'price_unit': fields.float('Unit Price', store=True,default=0.0),\n 'tax': fields.float('Tax%', store=True,default=0.0),\n 'price_subtotal': fields.float('Amount', store=True, readonly=True,default=0.0, compute='basic_amount'),\n }\n\n @api.one\n @api.depends('quantity','price_unit')\n def basic_amount(self):\n self.price_subtotal = self.quantity * self.price_unit\n","sub_path":"mutual_dummy_invoices/mutual_dummy_invoice.py","file_name":"mutual_dummy_invoice.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"558429057","text":"from bs4 import BeautifulSoup\r\nfrom tkinter import *\r\nfrom urllib.request import urlopen\r\nimport webbrowser\r\nimport lxml\r\n\r\nclass Feed:\r\n def __init__(self, parent):\r\n \r\n parse_xml_url = urlopen(\"http://rss.cnn.com/rss/cnn_topstories.rss\")\r\n xml_page = parse_xml_url.read()\r\n parse_xml_url.close()\r\n \r\n self.source = BeautifulSoup(xml_page, \"xml\")\r\n self.news_list = self.source.findAll(\"item\")\r\n self.entrynum = 0\r\n entry = self.news_list[self.entrynum]\r\n self.button = Button(root, text = 'Click and Quit', command=self.quit)\r\n self.button.pack(side=LEFT)\r\n self.label = Label(parent, text=str(entry.title.contents)[2:-2], font = \"Arial 20\")\r\n self.label.bind(\"\" ,lambda event, link=entry.link.text: self.callback(event, link))\r\n self.label.pack(side=LEFT)\r\n \r\n # start the timer\r\n self.label.after(3000, self.refresh_label)\r\n\r\n def refresh_label(self):\r\n \"\"\" refresh the content of the label every second \"\"\"\r\n #if we've reached the end of our list, cycle back to the start\r\n if(self.entrynum < len(self.news_list)):\r\n # increment the entry\r\n self.entrynum += 1\r\n else:\r\n self.entrynum = 0\r\n \r\n # display the new entry\r\n entry = self.news_list[self.entrynum]\r\n self.label.config(text=\"%s\" % str(entry.title.contents)[2:-2])\r\n self.label.bind(\"\" ,lambda event, link=entry.link.text: self.callback(event, link))\r\n # request tkinter to call self.refresh after 1s (the delay is given in ms)\r\n self.label.after(3000, self.refresh_label)\r\n def quit(self):\r\n root.destroy()\r\n def callback(self, event, article_link):\r\n webbrowser.open_new(article_link)\r\n\r\nif __name__ == \"__main__\":\r\n root = Tk()\r\n timer = Feed(root)\r\n root.overrideredirect(True)\r\n root.mainloop()","sub_path":"RSSFeed.py","file_name":"RSSFeed.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"224053893","text":"'''\nVypíš obsah textového súboru do grafickej plochy. Súbor obsahuje niekoľko riadkov a funkcia vypis_subor(meno_suboru)\ntieto riadky vypíše pod sebou nejakým fontom. V globálnej premennej canvas je referencia na grafickú plochu.\n\nWrites the contents of a text file to the graphics area. The file contains several lines and the function file_list(file_path)\nprints these lines below it in some font. In the global variable canvas, there is a reference to the graphics area.\n'''\n\nimport tkinter\nfile_path = \"text3.txt\"\nwidht,height = \"400\",\"800\"\n\ncanvas = tkinter.Canvas(width= widht, height= height)\ncanvas.pack()\n\nx,y = int(widht)/2,20\n\nwith open(file_path, \"r\", encoding=\"utf-8\")as t:\n for line in t:\n #canvas.create_text(x,y,text=line)\n\n #right alignment\n canvas.create_text(x-(x/2), y, text=line,anchor='nw')\n y += 20\n\ntkinter.mainloop()","sub_path":"7. Text files/10 Program text_to_graphic.py","file_name":"10 Program text_to_graphic.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"475717056","text":"from os import listdir\nfrom os.path import join\nfrom fnmatch import fnmatch\n\nvals = {}\n\nfor f in listdir(\"./\"):\n if fnmatch(f, \"*.*\"):\n try:\n float(f)\n if float(f)>=7:\n with open(join(\"./\", f, \"na.bcc.scf.{}.out\".format(f))) as scf:\n for line in scf.readlines():\n if \"!\" in line:\n scf_energy = float(line.split()[-2])\n with open(join(\"./\", f, \"na.bcc.fqha.calc\")) as fqha:\n for line in fqha.readlines():\n [temp, phon_energy] = line.split()\n temp = float(temp)\n phon_energy = float(phon_energy)\n if temp not in vals.keys():\n vals[temp] = {}\n vals[temp][float(f)]=phon_energy+scf_energy\n except: pass\n\ndef build_str(t):\n v_str = \"\"\n vol = sorted(vals[t].keys())\n for i in vol:\n v_str += str(i) + \"\\t\" + str(vals[t][i]) + \"\\n\"\n return v_str\nfor temp in vals.keys():\n print(temp)\n with open(\"ev_formatted/na.bcc.\"+str(temp), \"w\") as f:\n f.write(build_str(temp))\n","sub_path":"Na_Phonons/fcc_low_gpa_test/Na_BCC/phonons/ev_format.py","file_name":"ev_format.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"622310112","text":"from typing import Dict, Union\n\nclass Leaf:\n\n def __init__(self, word : str):\n self.str = word\n\n def pop_first(self):\n if self.str:\n out = self.str[0]\n self.str = self.str[1:]\n return out\n else:\n raise IndexError()\n\n def has_leaf(self, leaf):\n return True\n\n @property\n def isnull(self):\n return not self.str\n\nclass Node:\n\n def __init__(self):\n self.children = dict() # type:Dict[Union[Node, Leaf]]\n self.isleaf = False\n\n def add_leaf(self, leaf : Leaf):\n try:\n leaf_first_letter = leaf.pop_first()\n if leaf_first_letter in self.children:\n child = self.children[leaf_first_letter]\n if isinstance(child, Leaf):\n new_child = Node()\n self.children[leaf_first_letter] = new_child\n new_child.add_leaf(child)\n new_child.add_leaf(leaf)\n elif isinstance(child, Node):\n child.add_leaf(leaf)\n\n else:\n self.children[leaf_first_letter] = leaf\n except IndexError:\n self.isleaf = True\n\n def has_leaf(self, leaf : Leaf) -> bool:\n if leaf.isnull:\n return self.isleaf\n else:\n leaf_first_letter = leaf.pop_first()\n if leaf_first_letter in self.children:\n return self.children[leaf_first_letter].has_leaf(leaf)\n else:\n return False\n\n\n\n","sub_path":"tools/effects/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"513078264","text":"import time\nimport random\nimport multiprocessing\n\n#Worker function\ndef worker():\n crunch(10000,1000,2000,2)\n print (\"worker finished\")\n return\n\n#Crunch generator\ndef crunch (amountOfWork, rangeLower, rangeUpper, exponent):\n #Variables\n input = []\n output = []\n srandom = time.time()\n\n #Generate random numbers\n for x in range(amountOfWork):\n input.append(random.randint(rangeLower,rangeUpper))\n erandom = time.time()\n\n #Loop to Double exponent\n for x in input:\n output.append(exponize(x,exponent))\n eoutput = time.time()\n\n #Output\n a = erandom-srandom\n b = eoutput-erandom\n with open('your_file.txt', 'w') as f:\n for item in output:\n f.write(\"%s\\n\" % item)\n print (\"Randomtime: \" + str(a))\n print (\"Exponenttime: \" + str(b))\n\n#Double Exponent calculator\ndef exponize (number, exponent):\n res = number\n while (exponent > 1):\n res = res**number\n exponent = exponent-1\n #print(res)\n return(res)\n\n#Main thread\nif __name__ == '__main__':\n print(\"Program Started\")\n jobs = []\n for i in range(3):\n p = multiprocessing.Process(target=worker)\n jobs.append(p)\n p.start()","sub_path":"Projects/Python @ wurk/Crunch Multiprocessing.py","file_name":"Crunch Multiprocessing.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"594217745","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"SNLLS_1_IRISCLASS.py\n#------------------------------------------------------------------------------\n# Script to test 6 algorithms on a least-squares classification problem.\n# The full problem is \n#\n# minimize f(w) = ||r(w)||^2 / 2K\n# = ( sum ri^2 ) / 2K\n# = ( sum (model(x_i)-y_i)^2 ) / 2K, (1)\n# \n# where f: R^n->R (nonlinear least-squares NLLS) objective/loss, \n# r: R^n->R^N and \"K\" is a constant for averaging. Typically not all of\n# the data (x_i,y_i) i \\in [0,N] is available at once, because of large \n# sizes. Instead smaller \"batched\" portions of the data are used.\n# The data is divided into N=L*M with separation\n# \n# f(w) = ( sum fj(w) ), j \\in [1,M]\n# \n# where\n#\n# fj(w) = ( sum (model(x_l)-y_l)^2 ) / 2K, \n# \n# and l \\in [(j-1)*L,j*L]. One evalution of a loss function \n# (using e.g., TensorFlow) is\n# \n# 2K/L * fj(w) = \"loss_value\" = ( sum (model(x_l)-y_l)^2 ) / L (2)\n#\n# \n# This script compares \"stochastic\" algorithms using the information in\n# (2) to solve the problem in (1). The algorithms are\n# \n# NLLS (Full Jacobian nonlinear least-squares)\n# SNLLS1 (rank-1 stochastic jacobian least-squares)\n# SNLLSL (rank-L stochastic jacobian least-squares)\n# SGD \n# ADAM \n# ADAGRAD\n#------------------------------------------------------------------------------\n# MODIFICATIONS: \n# 01/18/21, X.X., Including \"MeanSquaredError()\"\n# 01/19/21, X.X., Using \"softmax\", and \"one_hot\" from TF\n# 02/03/21, X.X., Using the TF jacobian\n# 03/01/21, X.X., Using a non-linear least squares update\n# 03/09/21, X.X., Comparing outcomes from SGD and NLLS solver\n# 03/12/21, X.X., Trying Stochastic Jacobian\n# 03/15/21, X.X., Switching-off experimental \"parfor\" for speed-up \n# 06/21/21, X.X., Writing results to files\n# 07/01/21, X.X., Modifications for consistency \n\"\"\"\n\n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n## Setup program\n\n### General imports\nimport time\nimport os\nimport tensorflow as tf\nimport numpy as np\n\n### Solver imports\nimport NLLS\nimport SNLLS1\nimport SNLLSL\n\n# Setup for storing data\ndataPath = './DATA/'\nfilePrefix = 'EX_1'\nnumRuns = 5 # 10\n\n# Setting logging level\n#tf.get_logger().setLevel('ERROR')\n\n## DATA\n# [dataset of 120 Iris flowers](https://en.wikipedia.org/wiki/Iris_flower_data_set) \ntrain_dataset_url = \"https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv\"\ntrain_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),\n origin=train_dataset_url)\n# column order in CSV file\ncolumn_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']\n\nfeature_names = column_names[:-1]\nlabel_name = column_names[-1]\n\nbatch_size = 32\n\ntrain_dataset = tf.data.experimental.make_csv_dataset(\n train_dataset_fp,\n batch_size,\n column_names=column_names,\n label_name=label_name,\n num_epochs=1)\n\nfeatures, labels = next(iter(train_dataset))\n\ndef pack_features_vector(features, labels):\n \"\"\"Pack the features into a single array.\"\"\"\n features = tf.stack(list(features.values()), axis=1)\n return features, labels\n\ntrain_dataset = train_dataset.map(pack_features_vector)\n\nfeatures, labels_ = next(iter(train_dataset))\n\n# Define labels as \"one_hot\" indicators to use MSE loss\n# 01/19/21, Modification, X.X.\nlabels = tf.one_hot(labels_,3)\n\n## MODEL DEFINITION\n\n# Modification of output layer to include a \"softmax\" probability\n# 01/19/21, Modification, X.X.\n\n# Number of algorithms in this experiment\n# Algs. 1--3 correspond to NLLS methods, and 4--6 correspond to known methods\nnumAlgs = 6\n\n# Initialize one model per algorithm\nmodels = []\nfor i in range(numAlgs):\n models.append(tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)),\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(3, activation=tf.nn.softmax)]))\n\n\n# Initialize \"model0\" by calling on it\npredictions = models[0](features)\n\n# Using a mean squared loss function, 01/18/21\nloss_object = tf.keras.losses.MeanSquaredError()\n\ndef loss(model, x, y, training):\n # training=training is needed only if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n y_ = model(x, training=training)\n return loss_object(y_true=y, y_pred=y_)\n\n# Assign same initial variables to all models and\n# print losses \nlossesL = [] #np.zeros((numAlgs,1))\nfor m in range(numAlgs):\n for i in range(len(models[m].trainable_variables)):\n models[m].trainable_variables[i].assign(models[0].trainable_variables[i])\n lossesL.append(loss(models[m],features,labels,training=True))\n #losses[m] = loss(models[m],features,labels,training=True)\n \n#l1 = loss(model1, features, labels, training=True)\nprint(\"\\nLoss test (Initial): \\n\")\n \nsolverFormat = \"NLLS SNLLS1 SNLLSL SGD ADAM ADAGRAD \\n\"\nlossFormat = (\"{0[0]:.4f} {0[1]:.4f} {0[2]:.4f} {0[3]:.4f}\"\n \"{0[4]:.4f} {0[5]:.4f} \\n\")\n \n#print(\"NLLS (New Alg.) \\t SGD (Old Alg.) \\n\")\nprint(\"Epoch \\t\"+solverFormat)\nprint(\"Init. \\t\"+lossFormat.format(lossesL))\n#print(\"{} \\t {} \\n\".format(l,l1))\n\n# Gradient to define the training algorithm\n# The nonlinear least-squares methods will have additional\n# gradient computation functions, because Jacobians (or --\n# stochastic approximations) are also included\ndef grad(model, inputs, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(model, inputs, targets, training=True)\n return loss_value, tape.gradient(loss_value, model.trainable_variables)\n\n# This part defines the gradient and Jacobian computation \n# (with initial evaluation at iteration k = 0)\n# To be used by model0\nk = 0\n\n# Functions \"gradJac\" and \"update_step\"\n# This function call makes sure that all is in order\nbatch_size_Jac = 96 # 32 (Value 96 corresponds to the full data)\nloss_value, grads, errsV, jacV, idxJac = NLLS.gradJac(models[0],features,labels,batch_size_Jac)\n\n\n### PROBLEM DIMENSION\n# Counter for number of derivative computations, gradients and Jacobians\nnumSolv = 0 \n\nnumData = len(idxJac)\nnumLays = len(grads)\nvarDims = np.zeros((numLays,2))\nnumVars = 0\nshps = []\nfor i in range(numLays):\n shpi = grads[i].shape\n shps.append(shpi)\n numVars = numVars + np.prod(shpi)\n\n# Print size of variables and Data (for batch)\nprint(\"\\nSize variables: {}, Size data: {}\".format(numVars,\n numData))\n\n## KNOWN OPTIMIZER\nlearning_rate = 1.0 \n# SGD\noptimizerSG = tf.keras.optimizers.SGD(learning_rate=learning_rate)#(learning_rate=0.01)\n\n# ADAM\noptimizerAD = tf.keras.optimizers.Adam()#(learning_rate=0.01)\n\n# ADAGRAD\noptimizerAG = tf.keras.optimizers.Adagrad(learning_rate=learning_rate)#(learning_rate=0.01)\n \n\n# PARAMTERS\n\n# SNLLS1 (model2)\n\n# Indices for stoch. Jacobian\nnumData0 = numData\nidxE = tf.constant(range(numData0))\nidxJ = tf.constant(range(numVars))\nnDm1 = numData0-1\n\n# Jacobian accumulation\njk = tf.Variable(np.zeros(numVars),dtype=tf.float32)\nlk = 0\ndelta = 0.8 # np.sqrt(numData/(2*(numSamples/batchSize)))\ndelta1 = 1.0\n\nrho = 0.0 # 0.0005 # 0.0005 Parameter for error terms added to the diagonal\nek = np.zeros(numVars)\ngDiag2 = tf.ones(numVars)\ngDiag2 = 0.0000000001 + tf.zeros(numVars)\n#beta2 = 1 #7\nbeta2 = 0.05\n\n# SNLLSL (model3)\n\n#beta3 = 1#learning_rate # 0.5 0.8 #learning_rate #1#learning_rate\n\nbeta3 = 0.05\ngDiag3 = 0.0000000001 + tf.zeros(numVars)\njacs3 = tf.Variable(np.zeros(numVars),dtype=tf.float32)\n\n# Storing intermediate data values\ntrain_loss_results = []\ntrain_accuracy_results = []\ntrain_loss_results1 = []\ntrain_accuracy_results1 = []\n\n# Timings\ntimes_ave_Jacs = []\ntimes_ave_Updates = []\n\nnum_epochs = 50\n\nfor run in range(numRuns):\n \n print(\"\\nRun:\"+str(run)+\"\\n\")\n print(\"Epoch \\t\"+solverFormat)\n \n # Store loss values\n fileName = filePrefix+'_LOSS_RUN_'+str(run)\n fileW = open((dataPath+fileName+'.txt'),'w')\n \n # Reinitialize variables\n if run > 0:\n models.clear()\n for i in range(numAlgs):\n models.append(tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)),\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(3, activation=tf.nn.softmax)]))\n \n \n idxE = tf.constant(range(numData0))\n idxJ = tf.constant(range(numVars))\n nDm1 = numData0-1\n \n # Variables being accumulated\n jk = tf.Variable(np.zeros(numVars),dtype=tf.float32)\n lk = 0\n ek = np.zeros(numVars) \n gDiag2 = 0.0000000001 + tf.zeros(numVars) #tf.ones(numVars)\n gDiag3 = 0.0000000001 + tf.zeros(numVars) #tf.ones(numVars)\n jacs3 = tf.Variable(np.zeros(numVars),dtype=tf.float32)\n\n for epoch in range(num_epochs):\n epoch_loss_avg = tf.keras.metrics.Mean()\n # Modification of loss measure\n # epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n \n # For model1\n epoch_loss_avg1 = tf.keras.metrics.Mean()\n epoch_accuracy1 = tf.keras.metrics.SparseCategoricalAccuracy() \n \n # Loss values for storings\n losses = np.zeros((numAlgs))\n \n # Training loop - using batches of 32\n # Timing data\n tJs = 0.0\n tUs = 0.0\n denum = 0\n for x, y in train_dataset:\n # Convert labels \n y_one = tf.one_hot(y,3) \n numData = np.prod(y_one.shape)\n \n #---------------------- Optimize the model---------------------------------\n # This uses the \"nonlinear least squares (NLLS)\" update_step function\n # and gradient and Jacobian computations\n \n # Compute derivatives\n #loss_value, grads, errsV, jacV = gradJac(model,features,labels)\n tsJ = time.time()\n #loss_value, grads, errsV, jacV = gradJac(model,x,y_one)\n loss_value, grads, errsV, jacV, idxJac = NLLS.gradJac(models[0],x,y_one,numData) # batch_size_Jac\n k = k + 1\n teJ = time.time()\n tJ = teJ-tsJ\n \n # Updated step\n tsU = time.time()\n # numData\n NLLS.update_step(models[0],grads,jacV,numData,numVars,shps) # batch_size_Jac\n numSolv = numSolv + 1\n teU = time.time()\n tU = teU-tsU\n \n # Updates of total times\n tJs = tJs + tJ\n tUs = tUs + tU\n denum = denum+1\n \n # SNLLS1\n nDm1 = numData-1\n loss_value2,grads2,errs2 = SNLLS1.gradJacA(models[1], x, y_one) # idxAbsChng,\n \n # Shuffle indices\n idxE = tf.random.shuffle(idxE)\n idxJ = tf.random.shuffle(idxJ)\n idxJs = idxJ[0:nDm1]\n \n ser = tf.reduce_sum(errs2)\n \n gk12 = tf.concat([tf.reshape(grads2[i],[-1]) for i in range(len(grads2))],axis=0)\n \n g1g1 = gk12*gk12\n gg = tf.reduce_sum(g1g1)\n \n ek = ek + ser*g1g1 # ser, np.abs(ser)\n \n #gDiag2 = gDiag2 + tf.math.abs(gk12) + rho*ek\n gDiag2 = gDiag2 + g1g1\n gDiag2Use = np.sqrt(gDiag2)\n #gDiag = 0.1*ovars + tf.math.abs(gk1) # No accumulation\n \n #beta = beta2/gDiag\n lk = lk + loss_value2\n jk,s2 = SNLLS1.update_step(models[1],jk,gk12,beta2/gDiag2Use,shps,lk,delta,delta1) # gDiag2\n \n # SNLLSL\n ts2 = time.time()\n loss_value3,grads3,errs,errs_unsort,idxAbsChng = SNLLSL.gradJacA(models[2], x, y_one)\n \n gk1 = tf.concat([tf.reshape(grads3[i],[-1]) for i in range(len(grads3))],axis=0)\n \n gDiag3 = gDiag3 + gk1*gk1\n gDiag3Use = np.sqrt(gDiag3)\n #gDiag3 = gDiag3 + tf.math.abs(gk1)\n \n # Approximate Jacobian update\n # Using accumulation\n jacs3 = SNLLSL.update_stepA(models[2],grads3,jacs3,errs_unsort,numData,numVars,shps,(beta3/gDiag3Use),idxAbsChng) # gDiag3\n \n te2 = time.time()\n t2 = te2-ts2\n \n # model4 (SGD)\n loss_value4, grads4 = grad(models[3], x, y_one)\n optimizerSG.apply_gradients(zip(grads4, models[3].trainable_variables))\n \n # model5 (Adam)\n loss_value5, grads5 = grad(models[4], x, y_one)\n optimizerAD.apply_gradients(zip(grads5, models[4].trainable_variables))\n \n # model6 (Adagrad)\n loss_value6, grads6 = grad(models[5], x, y_one)\n optimizerAG.apply_gradients(zip(grads6, models[5].trainable_variables))\n #----------------------- End optimization ---------------------------------\n \n # Store losses\n losses[0] = losses[0] + loss_value\n losses[1] = losses[1] + loss_value2\n losses[2] = losses[2] + loss_value3\n losses[3] = losses[3] + loss_value4\n losses[4] = losses[4] + loss_value5\n losses[5] = losses[5] + loss_value6\n \n # Track progress\n epoch_loss_avg.update_state(loss_value) # Add current batch loss\n # Compare predicted label to actual label\n # training=True is needed only if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n epoch_accuracy.update_state(y, models[0](x, training=True))\n #epoch_accuracy.update_state(y_one, model(x, training=True))\n \n # For model1\n epoch_loss_avg1.update_state(loss_value4) # Add current batch loss\n # Compare predicted label to actual label\n # training=True is needed only if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n epoch_accuracy1.update_state(y, models[4](x, training=True))\n \n # End epoch\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n \n train_loss_results1.append(epoch_loss_avg1.result())\n train_accuracy_results1.append(epoch_accuracy1.result())\n \n # Timing updates\n times_ave_Jacs.append(tJs/denum)\n times_ave_Updates.append(tUs/denum)\n \n if epoch % 1 == 0: # 50\n print((\"{:03d} \\t{:.4f} {:.4f} {:.4f} {:.4f}\"\n \" {:.4f} {:.4f}\").format(epoch,\n losses[0]/denum,\n losses[1]/denum,\n losses[2]/denum,\n losses[3]/denum,\n losses[4]/denum,\n losses[5]/denum))\n \n fileW.write((\"{:03d},{:.10f},{:.10f},\"\n \"{:.10f},{:.10f},{:.10f},{:.10f} \").format(epoch,\n losses[0]/denum,\n losses[1]/denum,\n losses[2]/denum,\n losses[3]/denum,\n losses[4]/denum,\n losses[5]/denum))\n\n fileW.close()\n\n\n","sub_path":"SNLLS_1_IRISCLASS.py","file_name":"SNLLS_1_IRISCLASS.py","file_ext":"py","file_size_in_byte":15722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"320014995","text":"import logging\nimport tempfile\nimport mappyfile\nimport pytest\n\n\ndef test_open():\n\n fn = './tests/sample_maps/256_overlay_res.map'\n d = mappyfile.open(fn)\n assert d[\"name\"] == \"TEST\"\n\n d = mappyfile.open(fn, expand_includes=False)\n assert d[\"name\"] == \"TEST\"\n\n d = mappyfile.open(fn, include_position=True)\n assert d[\"name\"] == \"TEST\"\n\n d = mappyfile.open(fn, include_comments=True)\n assert d[\"name\"] == \"TEST\"\n\n\ndef test_loads():\n\n s = \"\"\"MAP NAME \"TEST\" END\"\"\"\n\n d = mappyfile.loads(s)\n assert d[\"name\"] == \"TEST\"\n\n d = mappyfile.loads(s, expand_includes=False)\n assert d[\"name\"] == \"TEST\"\n\n d = mappyfile.loads(s, include_position=True)\n assert d[\"name\"] == \"TEST\"\n\n d = mappyfile.loads(s, include_comments=True)\n assert d[\"name\"] == \"TEST\"\n\n\ndef test_write():\n\n s = \"\"\"MAP NAME \"TEST\" END\"\"\"\n fn = tempfile.mktemp()\n d = mappyfile.loads(s)\n mappyfile.write(d, fn)\n d = mappyfile.open(fn)\n assert d[\"name\"] == \"TEST\"\n\n mappyfile.write(d, fn, indent=2, spacer=\"\\t\", quote=\"'\", newlinechar=\"\")\n d = mappyfile.open(fn)\n assert d[\"name\"] == \"TEST\"\n\n\ndef test_dump():\n\n s = \"\"\"MAP NAME \"TEST\" END\"\"\"\n d = mappyfile.loads(s)\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as fp:\n mappyfile.dump(d, fp)\n\n with open(fp.name) as fp:\n d = mappyfile.load(fp)\n\n assert d[\"name\"] == \"TEST\"\n\n\ndef test_dictfind():\n\n s = \"\"\"\n MAP\n LAYER\n NAME \"Layer1\"\n TYPE POLYGON\n END\n LAYER\n NAME \"Layer2\"\n TYPE POLYGON\n CLASS\n NAME \"Class1\"\n COLOR 0 0 -8\n END\n END\n END\n \"\"\"\n\n d = mappyfile.loads(s)\n\n pth = [\"layers\", 1]\n cmp = mappyfile.dictfind(d, *pth)\n assert cmp[\"name\"] == \"Layer2\"\n\n pth = [\"layers\", 1, \"classes\", 0]\n cmp = mappyfile.dictfind(d, *pth)\n assert cmp[\"name\"] == \"Class1\"\n\n\ndef run_tests():\n pytest.main([\"tests/test_utils.py\"])\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n # run_tests()\n test_dump()\n print(\"Done!\")\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"270622356","text":"import random\n\n\"\"\"\n 0 nic nie rob\n 1 do przodu\n 2 do tylu\n 3 przod prawo\n 4 przod lewo\n 5 tyl prawo\n 6 tyl lewo\n 7 rotacja\n 8 to co przed\n\n\"\"\"\ndef StupidActions(floor, sensors):\n if(sensors[0] or sensors[1] > 0):\n return 1\n else:\n return 4\n\ndef action(floor, sensors):\n if(not touchFloor(floor)):\n if(seeEnemyNotFront(sensors)):\n if(sensors[3]):\n return 2\n elif(sensors[4]):\n return 3\n elif(sensors[5]):\n return 7\n else:\n return 8\n elif(seeEnemy(sensors)):\n if(sensors[0]):\n return 1\n elif(sensors[1]):\n return 6\n else:\n return 9\n else:\n if(not seeEnemyNotFront(sensors) and seeEnemy(sensors)):\n if(sensors[0]):\n return 1\n else:\n return 6\n if(bool(floor[0]) ^ bool(floor[1]) ^ bool(floor[2]) ^ bool(floor[3])):\n if(floor[0]):\n return 7\n if(floor[1]):\n return 8\n if(floor[2]):\n return 3\n if(floor[3]):\n return 2\n elif(floor[0] and floor[1]):\n return 6\n elif(floor[2] and floor[3]):\n return 1\n elif(floor[0] and (floor[3] or floor[2])):\n return 2 if dirEnemy(sensors) else 7\n elif(floor[1] and (floor[2] or floor[3])):\n return 3 if dirEnemy(sensors) else 8\n else:\n return 1\n\ndef touchFloor(floor):\n for i in floor:\n if(i == 1):\n return 1\n return 0\n\ndef seeEnemy(sensors):\n for i in sensors:\n if(i >= 1):\n return 1\n return 0\n\ndef seeEnemyNotFront(sensors):\n if(seeEnemy(sensors) and not (sensors[0] >= 1 or sensors[1] >= 1)):\n return 1\n else:\n return 0\n\ndef dirEnemy(sensors):\n if(sensors[0] or sensors[2] or sensors[3]):\n return 0\n else:\n return 1\n \n","sub_path":"robAlgoritm.py","file_name":"robAlgoritm.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"304108443","text":"#imports the modules and dependencies required\r\nfrom tkinter import *\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport numpy\r\nimport dateutil\r\nimport pytz\r\n\r\nroot = Tk()#creates a new tkinter (GUI) window\r\nroot.configure(background='light blue')#set background colour\r\nroot.title('menu')# sets the title of this window\r\n\r\n#declares global variables\r\ntkvar=\"\"\r\nnumber=\"\"\r\nresultLabel=\"\"\r\nwd=\"\"\r\nh=0\r\n\r\ndef WT_PerPerson(X):# Function to create a dictionary with key = stall name, and value = corresponding waiting time (in minutes) per person\r\n stall={'Mini Wok':5,'Western':6,'Japanese and Korean Delights':4,'McDonald\\'s':3,'Subway':2}\r\n return stall[X] #returns that value corresponding to the parameter key (X)\r\n\r\ndef getSDT():# Function to get the system date and time (along with corresponding weekday)\r\n global wd\r\n global h\r\n \r\n now = datetime.now() # gets the object containing the system date and time\r\n h = int(now.strftime(\"%H\"))# gets the hour (from the system time)\r\n today = datetime.today()\r\n x = today.weekday()# gets the weekday's code corresponding to the system date\r\n \r\n weekdays = [\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\r\n wkday_num = [0,1,2,3,4,5,6]\r\n \r\n for i in range(7): # looping structure to get the weekday corresponding to the weekday's code\r\n if(x==wkday_num[i]):\r\n wd = weekdays[i]\r\n \r\n \r\ndef display_menus_info_sun(*args): # Function to display the menu corresponding to the selected stall, if the system date is a sunday\r\n if(wd==\"Sunday\"):\r\n if(tkvar.get()=='Subway'):#checks if the selected stall is Subway\r\n if(h<11 or h>=18): #displays \"CLOSED\" if the current time is not within Subway's operating hours\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n else:\r\n #display's Subway's lunch/dinner menu\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\") #'pandas' library used to read the '.csv' file that stores lunch/dinner menus, and store it in 'data' variable\r\n list1 = (str(data.iloc[2,1])).split(\",\")#stores all items from Subway's lunch/dinner menu in a list\r\n for i in range(len(list1)): #looping structure to display each of these items as labels in the tkinter window\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n elif(tkvar.get()==\"McDonald's\"): #checks if the selected stall is McDonald's \r\n if(h<10 or h>=22): #displays \"CLOSED\" if the current time is not within McDonald's operating hours\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n else:\r\n if(h<11):\r\n data = pd.read_csv(\"Stalls_Menus_BK.csv\",encoding=\"ISO-8859-1\") #'pandas' library used to read the '.csv' file that stores breakfast menus, and store it in 'data' variable\r\n list1 = (str(data.iloc[4,1])).split(\",\")#stores all items from McDonald's breakfast menu in a list\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\") #'pandas' library used to read the '.csv' file that stores lunch/dinner menus, and store it in 'data' variable\r\n list1 = (str(data.iloc[4,1])).split(\",\")#stores all items from McDonald's lunch/dinner menu in a list\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n else: #displays \"CLOSED\" if the selected stall is Japanese and Korean Delights/Mini Wok/Western, as they are closed on Sundays\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n\r\n\r\ndef display_menus_info_sat(*args): # Function to display the menu corresponding to the selected stall, if the system date is a saturday\r\n if(wd==\"Saturday\" and tkvar.get()!=\"Subway\" and tkvar.get()!=\"McDonald's\"):\r\n # Also checks if the selected stall is Japanese and Korean Delights/Mini Wok/Western (neither Subway nor McDonald's)\r\n if(h<7 or h>=15): #displays \"CLOSED\" if the current time is not within their operating hours\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n elif(h<11):\r\n data = pd.read_csv(\"Stalls_Menus_BK.csv\",encoding=\"ISO-8859-1\")\r\n for i in range(5):\r\n if(tkvar.get()==data.iloc[i,0]): \r\n list1 = (str(data.iloc[i,1])).split(\",\") #stores all items from the chosen stall's breakfast menu in a list\r\n for j in range(len(list1)):\r\n rl=Label(root,text=list1[j],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*j))\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\")\r\n for i in range(5):\r\n if(tkvar.get()==data.iloc[i,0]):\r\n list1 = (str(data.iloc[i,1])).split(\",\") #stores all items from the chosen stall's lunch/dinner menu in a list\r\n for j in range(len(list1)):\r\n rl=Label(root,text=list1[j],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*j))\r\n elif(wd==\"Saturday\" and tkvar.get()==\"Subway\"): # checks if the selected stall is Subway \r\n if(h<11 or h>=18):\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[2,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n \r\n \r\n elif(wd==\"Saturday\" and tkvar.get()==\"McDonald's\"): # checks if the selected stall is McDonald's\r\n if(h<7):\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n elif(h<11):\r\n data = pd.read_csv(\"Stalls_Menus_BK.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[4,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[4,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n\r\n\r\ndef display_menus_info_wkday(*args):# Function to display the menu corresponding to the selected stall, if the system date is a weekday (Monday - Friday)\r\n if(tkvar.get()!=\"Subway\" and tkvar.get()!=\"McDonald's\"):\r\n if(h<7 or h>=21):\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n elif(h<11):\r\n data = pd.read_csv(\"Stalls_Menus_BK.csv\",encoding=\"ISO-8859-1\")\r\n for i in range(5):\r\n if(tkvar.get()==data.iloc[i,0]):\r\n list1 = (str(data.iloc[i,1])).split(\",\")\r\n for j in range(len(list1)):\r\n rl=Label(root,text=list1[j],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*j))\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\")\r\n for i in range(5):\r\n if(tkvar.get()==data.iloc[i,0]):\r\n list1 = (str(data.iloc[i,1])).split(\",\")\r\n for j in range(len(list1)):\r\n rl=Label(root,text=list1[j],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*j))\r\n\r\n elif(tkvar.get()==\"Subway\"):\r\n if(h<8 or h>=21):\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n elif(h<11):\r\n data = pd.read_csv(\"Stalls_Menus_BK.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[2,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[2,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n elif(tkvar.get()==\"McDonald's\"):\r\n if(h<7):\r\n rl=Label(root, text=\"CLOSED\",font=('Comic Sans',14,'bold'))\r\n rl.pack()\r\n rl.place(x=560,y=500)\r\n elif(h<11):\r\n data = pd.read_csv(\"Stalls_Menus_BK.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[4,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n else:\r\n data = pd.read_csv(\"Stalls_Menus_LD.csv\",encoding=\"ISO-8859-1\")\r\n list1 = (str(data.iloc[4,1])).split(\",\")\r\n for i in range(len(list1)):\r\n rl=Label(root,text=list1[i],font=('Comic Sans',10))\r\n rl.pack()\r\n rl.place(x=775,y=(350+50*i))\r\n\r\n\r\ndef display_stalls_info(*args): # Function to display the selected stall's details/information \r\n global tkvar\r\n info = pd.read_csv(\"Stalls_info.csv\",encoding=\"ISO-8859-1\") #'pandas' library used to read the '.csv' file that stores stalls' information, and store it in 'info' variable\r\n for i in range(5): #looping structure to display each of these as labels in the tkinter window\r\n if(tkvar.get()==info.iloc[i,0]):\r\n info_list = (str(info.iloc[i,1])).split(\",\") #stores all data from the chosen stall's information in a list\r\n for j in range(len(info_list)):\r\n rlab=Label(root,text=info_list[j],font=('Comic Sans',10))\r\n rlab.pack()\r\n rlab.place(x=560,y=(350+50*j))\r\n \r\ndef no(*args): #Function to display the estimated waiting time (given the entered no. of people), for the selected stall\r\n try: # 'try' block to perform these operations\r\n global tkvar\r\n global number\r\n global resultLabel\r\n time=WT_PerPerson(tkvar.get()) #stores the waiting time per person for that selected stall, in 'time' variable, by invoking the function 'WT_PerPerson'\r\n try: # 'try' block to get the total estimated waiting time\r\n if(int(number.get())<0): #Checks if the entered no. of people is a negative value\r\n resultLabel.config(text='Please enter a positive integer') # If yes, then an error message is displayed as a label in the new tkinter window\r\n else: \r\n result=int(number.get()) # gets the (entered) no. of people in the queue, before that person\r\n lastresult=int(time)*result # gets the total estimated waiting time\r\n resultLabel.config(text='The waiting time should be {} min'.format(lastresult)) # displays the estimated waiting time as a label in a new tkinter window\r\n except (ValueError): # 'except' block to catch a 'Value Error', if raised by the 'try' block.\r\n # In this case, it will be raised when the (entered) no. of people in the queue, before that person, is not an integer value.\r\n resultLabel.config(text='Please enter a positive integer') #error message displayed as a label in the new tkinter window\r\n \r\n number.delete(0,END) # Clears the entry widget (for no. of people in the queue)\r\n \r\n except(NameError,AttributeError,UnboundLocalError): {} # 'except' block to catch a 'Name Error', 'Attribute Error', or 'Unbound Local Error', if raised by the 'try' block.\r\n # Name Error: raised when a variable or function name has been used based on a previous definition\r\n # Attribute Error: raised when an invalid attribute reference is made, or when an attribute assignment fails.\r\n # Unbound Local Error: raised when a local variable is referenced before it has been assigned.\r\n \r\n \r\ndef wt_setup(): # Function to setup the new tkinter window for getting the estimated waiting time\r\n global tkvar\r\n global number\r\n global resultLabel\r\n newwin = Toplevel(root) # creates a new tkinter (GUI) window\r\n newwin.title('WaitingTime')# sets the title of this window\r\n newwin.geometry('300x300')# sets the geometry of this window\r\n\r\n Label(newwin,text='The number of people in the queue').pack(side='top') # label created to prompt the user to enter the no. of people in the queue, before that person\r\n\r\n number=Entry(newwin)# creates the required entry widget\r\n number.focus()\r\n \r\n #number.bind('',no)\r\n number.pack(side='top') # packs this entry widget on the top of this window\r\n\r\n enternumber = Button(newwin,text= \"Enter\", command=no) # creates a button, which when pressed, gets the value entered by the user\r\n enternumber.pack(side='top')# packs this button on the top of this window\r\n\r\n# Create and empty Label to put the result in\r\n resultLabel = Label(newwin, text = \"\")\r\n resultLabel.pack(side='top')\r\n#Creates a refresh button (packed on the left of this window) , named 'CLOSE', to close this new tkinter window\r\n frame2=Frame(newwin)\r\n frame2.pack()\r\n button_refresh = Button(frame2, text = \"close\",command=newwin.destroy)\r\n button_refresh.pack(side = LEFT)\r\n\r\n\r\ndef oh_setup(): # Function to setup the new tkinter window for getting the selected stall's operating hours\r\n global tkvar\r\n n = Toplevel(root) # creates a new tkinter (GUI) window\r\n n.title('WaitingTime')# sets the title of this window\r\n n.geometry('300x300')# sets the geometry of this window\r\n\r\n try: # 'try' block to perform these operations\r\n list_oh=list()\r\n Label(n,text='Operating Hours',font=('Comic Sans',14)).pack(side='top') # label created to display the title: 'Operating Hours'\r\n data_oh = pd.read_csv(\"Stalls_OH.csv\",encoding=\"ISO-8859-1\")#'pandas' library used to read the '.csv' file that stores stalls' operating hours, and store it in 'data_oh' variable\r\n for i in range(5):\r\n name_oh = data_oh.iloc[i,0]\r\n list_oh = (str(data_oh.iloc[i,1])).split(\",\")#stores all data from the chosen stall's operating hours in a list\r\n if(tkvar.get()==name_oh):\r\n for j in range(len(list_oh)):\r\n Label(n,text=list_oh[j],font=('Comic Sans',12)).pack(side='top')\r\n except AttributeError: {} # 'except' block to catch an 'Attribute Error', if raised by the 'try' block.\r\n \r\n \r\n#Creates a refresh button (packed on the left of this window) , named 'CLOSE', to close this new tkinter window\r\n frame2=Frame(n)\r\n frame2.pack()\r\n button_refresh = Button(frame2, text = \"close\",command=n.destroy)\r\n button_refresh.pack(side = LEFT)\r\n\r\n \r\n# creates the drop-down menu structure, to display the stalls available \r\nmainframe=Frame(root)\r\nmainframe.grid(column=0,row=0,sticky=(N,W,E,S))\r\nmainframe.columnconfigure(0,weight = 1)\r\nmainframe.rowconfigure(0, weight=1)\r\nmainframe.pack(pady=1,padx=1)\r\n\r\ntkvar=StringVar(root)# creates a string variable for storing the selected stall's name\r\nchoices={\"McDonald's\",'Subway','Western','Mini Wok','Japanese and Korean Delights'}\r\ntkvar.set('Choose') #setting the default option in dropdown\r\n\r\npopupMenu=OptionMenu(mainframe,tkvar,*choices)\r\nLabel(mainframe, text=\"Choose a food stall\",font=('Comic Sans',14,'bold'),fg='black').grid(row=1,column=1)# label created to prompt the user to choose a stall\r\npopupMenu.grid(row=2,column=1)\r\n\r\n#gets the background image for the root page\r\ncanvas = Canvas(root, width =500, height = 707) \r\ncanvas.pack() \r\nimg = PhotoImage(file=\"P2_img.png\") \r\ncanvas.create_image(0,0, anchor=NW, image=img)\r\n\r\n# creates a button, which when pressed, opens the window for getting the estimated waiting time\r\nWT=Button(text='Estimate waiting time',command=wt_setup)\r\nWT.pack()\r\nWT.place(x=580,y=625)\r\n\r\n# creates a button, which when pressed, opens the window for getting the stall's operating hours\r\nOH=Button(text='View Operating Hours',command=oh_setup)\r\nOH.pack()\r\nOH.place(x=580,y=675)\r\n\r\n# Required function calls\r\ngetSDT()\r\ntkvar.trace('w',no)\r\ntkvar.trace('w',display_menus_info_sun)\r\ntkvar.trace('w',display_menus_info_sat)\r\ntkvar.trace('w',display_menus_info_wkday)\r\ntkvar.trace('w',display_stalls_info)\r\n\r\nroot.mainloop()# runs the main loop for execution\r\n\r\n\r\n\r\n","sub_path":"Stalls_SDDT.py","file_name":"Stalls_SDDT.py","file_ext":"py","file_size_in_byte":17850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"227771897","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 06 11:45:26 2016\n\n@author: julien\n\"\"\"\nimport functions as fn\n\nclass Logbook(object):\n def __init__(self, body, is_discovered, is_explored):\n self.instance = [body]\n self.is_discovered = is_discovered\n self.is_explored = is_explored\n self.time_of_exploration = None\n self.travel_time = 0\n self.travel_cost = 0\n \n def get_travel_info(self,planet,travel_bonus):\n self.travel_time = fn.travel_time(fn.dist(self.instance[0].pos,planet.pos),self.instance[0].game.space_travel_unit)/travel_bonus\n self.travel_cost = fn.travel_formula(self.travel_time)\n \n ","sub_path":"logbook.py","file_name":"logbook.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"150812394","text":"import unittest\n\nfrom tests.test_common import create_css_file\n\nfrom icon_font_to_png import load_css\n\nclass TestLoadCSS(unittest.TestCase):\n def test_common_prefix(self):\n css_file = create_css_file(\n \".foo-bar:before { content: '\\\\f001' }\\n\"\n \".foo-xyzzy:before { content: '\\\\f002' }\\n\"\n )\n icons, prefix = load_css(css_file.name, strip_prefix=True)\n self.assertEqual(prefix, \"foo-\")\n\n css_file = create_css_file(\n \".foo:before { content: '\\\\f001' }\\n\"\n \".bar:before { content: '\\\\f002' }\\n\"\n )\n icons, prefix = load_css(css_file.name, strip_prefix=True)\n self.assertEqual(prefix, \"\")\n\nif __name__ == '__main__':\n unittest.main\n","sub_path":"tests/test_load_css.py","file_name":"test_load_css.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"218215394","text":"#!/usr/bin/env python3\n\nfrom zencad import *\nfrom globals import *\n\nfrom room import Room\nfrom rotplate import RotationPlate\n\n#zencad.lazy.fastdo=True\n\nclass Fork(zencad.assemble.unit):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\tdef fork(self):\n\t\th = 15\n\t\thear_x = 95/2\n\t\thear_z = 44\n\t\thear_R = 28/2\n\t\thear_h = 15\n\n\t\tyk = hear_R * 3 / 4\n\t\tR = ROOF_R #- self.t/2\n\t\tiR = ROOF_R - T*2\n\n\t\txk = math.sqrt(R**2 - yk**2)\n\t\tixk = math.sqrt(iR**2 - yk**2)\n\n\t\tbase_cylinder = cylinder(r=R, h=h)\n\t\tibase_cylinder = cylinder(r=iR, h=h - T) \n\t\tbase = (base_cylinder\n\t\t\t- halfspace().rotateY(deg(90)).moveX(-xk)\n\t\t\t- halfspace().rotateY(deg(-90)).moveX(xk)\n\t\t)\n\n\t\ticyl = cylinder(r=R-T, h=T)\n\n\t\tibase_cylinder = (ibase_cylinder\n\t\t\t- halfspace().rotateY(deg(90)).moveX(-ixk)\n\t\t\t- halfspace().rotateY(deg(-90)).moveX(ixk)\n\t\t)\n\n\t\t#face0 = near_face(base, point3(-1000,0,h/2))\n\t\t\t# - ibase_cylinder \n\n\t\thear_c = cylinder(r=hear_R, h=hear_h).rotateY(deg(-90))\n\t\thear = hear_c\n\t\thear = hear.move(-hear_x, 0 , hear_z)\n\t\t\n\t\tbx=hear_h;by=hear_R*6/4;bz=(hear_z-h)/2\n\t\tbbb = box(bx,by,bz,center=True).move(-hear_x - bx/2, 0 , hear_z-bz/2)\n\t\tibbb = box(bx-T,by-2*T,bz,center=True).move(-hear_x - bx/2 +T/2, 0 , hear_z-bz/2)\n\n\t\tbotwire1 = near_face(bbb, point(-hear_x-bx/2, 0, -10000)).wires()[0]\n\t\tbotwire2 = botwire1.move(-botwire1.props1().center())\n\t\tbotwire2 = botwire2.moveX(-ROOF_R/2)\n\t\t\n\t\tibotwire1 = near_face(ibbb, point(-hear_x-bx/2, 0, -10000)).wires()[0]\n\t\t#ibotwire2 = ibotwire1.move(-ibotwire1.props1().center())\n\t\t#ibotwire2 = ibotwire2.moveX(-self.roof_r/2 + self.t)\n#\n\t\tbotwire2 = botwire1.move(hear_x - xk + hear_h, 0, -hear_z+bz)\n\t\tibotwire2 = ibotwire1.move(hear_x - ixk + hear_h, 0, -hear_z+bz)\n\t\t\n\t\thear += bbb\n\t\thear += loft([botwire1, botwire2])\n\t\thear += hear.mirrorYZ()\n\n\t\t\n\t\tihear_c = cylinder(r=23/2, h=hear_h - T).rotateY(deg(-90))\n\t\tihear0 = ihear_c\n\t\tihear0 = ihear0.move(-hear_x, 0 , hear_z)\n\t\tihear_c2 = (cylinder(r=6,h=bz,center=True)-halfspace().rotateY(deg(90)).left(bx/2-T)).move(-hear_x - bx/2, 0 , hear_z-bz/2)\n\t\tihear_c2_face0 = near_face(ihear_c2, point(-hear_x-bx/2, 0, -10000))\n\t\tihear_c2_face1 = near_face(ihear_c2, point(-hear_x-bx/2, 0, -10000)).move(hear_x - ixk + hear_h, 0, -hear_z+bz-T*2.5)\n\t\tihear0 += ihear_c2 \n\t\tihear0 += loft([ihear_c2_face1.wires()[0], ihear_c2_face0.wires()[0]])\n\t\tihear0 += sphere(12).move(-iR+10,0,0)\n#\n\t\tihear1 = cylinder(r=20/2, h=hear_h - T)\n\t\tihear1 += box(12*2,T,hear_h-T,center=True).movZ((hear_h-T)/2)\n\t\tihear1 += box(T,12*2,hear_h-T,center=True).movZ((hear_h-T)/2)\n\t\tihear1 = ihear1.rotateY(deg(90)).move(hear_x, 0 , hear_z)\n\n\n\t\tmiddle = unify(\n\t\t\tbase \n\t\t\t+ hear\n\t\t\t- ihear0\n\t\t\t- ihear1\n\t\t\t- ibase_cylinder\n\t\t\t- icyl\n\t\t)\n\n\t\tstlb = (stolb(2.5,1.5,7,h-T*2,[deg(90),deg(180),deg(270)])\n\t\t\t.mirrorXY())#.rotZ(deg(45)))\n\t\tsupports = sqrmirror()(\n\t\t\tstlb.move(ROOF_R-10, 0, h-T).rotZ(deg(45))\n\t\t)\n\n\t\treturn unify(\n\t\t\tmiddle \n\t\t\t+ supports\n\t\t\t#+ supports.mirrorYZ()\n\t\t)\n\nclass Fork0(Fork):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.add(self.model())\n\n\tdef model(self):\n\t\treturn self.fork() - halfspace().rotateY(deg(90))\n\nclass Fork1(Fork):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.add(self.model())\n\n\tdef model(self):\n\t\treturn self.fork() - halfspace().rotateY(deg(-90))\n\nclass ConnectionCylinder(zencad.assemble.unit):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.add(self.model())\n\n\tdef model(self):\n\t\tm = cylinder(ROOF_R-T, T)\n\t\tm -= motor_screw(3,5,3)\n\n\t\tm += sqrmirror()(\n\t\t\t(cylinder(r=4,h=3) - cylinder(r=3,h=3))\n\t\t\t\t.move(ROOF_R-10, 0, T).rotZ(deg(45)) \n\t\t\t)\n\t\tm -= cables_hole().extrude(T).mirrorXZ()\n\t\treturn m\n\n#class RotaryBase:\n#\t\t\n#\tdef roof(self):\n#\t\tr = self.roof_r + self.border_t\n#\t\tr1 = r + 5\n#\t\tt = self.t\n#\t\tc_h = 10\n#\n#\t\tbase = (cylinder(r=r, h=t) \n#\t\t\t- LIGHT_HOLES \n#\t\t\t- self.motor_hole\n#\t\t\t- self.nut_hole2\n#\t\t\t- self.cables_hole()\n#\t\t)\n#\n#\t\t#c = (cone(r1=r, r2=r1, h=c_h) - cone(r1=r-t, r2=r1-t, h=c_h)).up(t)\n#\n#\t\tmh = (\n#\t\t\tbox(31,1,2,center=True) \n#\t\t\t+ box(1,15,2,center=True).moveY(7.5) \n#\t\t\t+ cylinder(r=2.5,h=2,center=True).moveX(31/2)\n#\t\t\t+ cylinder(r=2.5,h=2,center=True).moveX(-31/2)\n#\t\t\t- cylinder(r=1,h=2,center=True).moveX(31/2)\n#\t\t\t- cylinder(r=1,h=2,center=True).moveX(-31/2)\n#\t\t).up(t+1).moveY(5+0.5)\n#\n#\t\treturn unify(base + mh)\n#\n#\tdef room(self):\n#\t\ty=100\n#\t\tt=self.t\n#\t\tx_ext = 6*t\n#\n#\t\tadd_z_to_roof = 10\n#\t\tadd_z_to_down = 1\n#\t\t\n#\t\tpanel = self.panel()\n#\t\tz = panel.bbox().ymax - panel.bbox().ymin + 2*t + add_z_to_roof + add_z_to_down\n#\t\tx = panel.bbox().xmax + x_ext\n#\t\tk = panel.bbox().ymax - panel.bbox().ymin - 5 - 1.6 + t + add_z_to_roof\n#\n#\t\tbase = box(x,y,z) - box(x-2*t,y-2*t,z-t).move(t,t,t)\n#\t\tbase = base.moveZ(-t)\n#\n#\t\tpanel = (panel.rotateX(deg(-90)).move(x_ext/2,y-t,z-t + panel.bbox().ymin- add_z_to_down))\n#\t\troof_trans = move(x/2, y/2, -t)\n#\n#\t\t# крепление для stereopi\n#\t\t#print(k)\n#\t\tsrad=2.5\n#\t\tir = 1.8\n#\t\tspi_kreps = union([\n#\t\t\tstolb(srad,ir,7,k,angles=[deg(0),deg(180),deg(270)]).move(x_ext/2+srad,y-3-t*1-1,0),\n#\t\t\tstolb(srad,ir,7,k,angles=[deg(0),deg(180),deg(270)]).move(x-x_ext/2-srad,y-3-t*1-1,0),\n#\t\t\tstolb(srad,ir,7,k).move(x_ext/2+srad,y-3-t*1-1-35,0),\n#\t\t\tstolb(srad,ir,7,k).move(x-x_ext/2-srad,y-3-t*1-1-35,0)\n#\t\t])\n#\n#\t\treturn unify(\n#\t\t\tbase - panel.bbox().shape() + panel\n#\t\t\t- roof_trans(cylinder(r=self.roof_r, h=t))\n#\t\t\t+ roof_trans(self.roof())\n#\t\t\t+ spi_kreps\n#\t\t)\n#\t\n#\tdef crest(self):\n#\t\tr = 20\n#\t\th = 10\n#\t\tm = box(r, self.t, h, center=True) + box(self.t, r, h, center=True) \n#\t\tm = m.moveZ(h/2)\n#\t\treturn m\n#\n#\t\n#\tdef camera_box(self):\n#\t\tx=80\n#\t\t#ch = 4\n#\n#\t\tm = box(x, 30, 30, center=True)\n#\t\tm -= (box(x-self.t*2, 30-self.t, 30-self.t, center=True)\n#\t\t\t.move(0,-self.t,self.t))\n#\n#\n#\t\t#m += cylinder(r=10,h=10).rotateY(deg(-90)).movX(-x/2)\n#\t\tm -= cylinder(r=10,h=self.t).rotateY(deg(-90)).movX(-x/2+self.t)\n#\t\tm -= cylinder(r=10,h=self.t).rotateY(deg(90)).movX(x/2-self.t)\n#\n#\t\treturn m\n\nroom = Room()\nrotplate = RotationPlate()\nfork0 = Fork0()\nfork1 = Fork1()\nconcyl = ConnectionCylinder()\n\nroom.socket.link(rotplate)\nrotplate.socket.link(fork0)\nrotplate.socket.link(fork1)\nrotplate.socket.link(concyl)\n\ndisp(room)\n\n#disp(Room())\n#disp(RotaryBase().room().rotateX(deg(180)).move(-50, 50, -10))\n#disp(RotaryBase().rotation_plate().rotZ(deg(180)).down(RotaryBase().rotation_plate().bbox().zmax))\n#disp(RotaryBase().fork_half0())\n#disp(RotaryBase().fork_half1())\n#disp(RotaryBase().camera_box().moveZ(45))\n#disp(RotaryBase().connection_cylinder())#.up(10))\n\n#disp(RotaryBase().roof())\n\n#to_stl(RotaryBase().rotation_plate(), \"/home/mirmik/models/spi_rotation_plate.stl\", delta=0.1)\n#to_stl(RotaryBase().room(), \"/home/mirmik/models/spi_room.stl\", delta=0.1)\n#to_stl(RotaryBase().fork_half0(), \"/home/mirmik/models/spi_forkhalf0.stl\", delta=0.1)\n#to_stl(RotaryBase().fork_half1(), \"/home/mirmik/models/spi_forkhalf1.stl\", delta=0.1)\n\n#disp(m)\nshow()\n\n","sub_path":"models/head/rotary_base.py","file_name":"rotary_base.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"18796910","text":"import json\n\nfrom fastapi import APIRouter\nfrom sqlalchemy import create_engine, distinct\nfrom sqlalchemy.orm import sessionmaker\n\nfrom dependencies import DB_CONNECTION\nfrom internal.models.db_models import APComponent, APPartnumber, APCategory, APParamlist\n\nrouter = APIRouter(\n prefix=\"/filter\",\n tags=[\"filter\"]\n)\n\nengine = create_engine(DB_CONNECTION)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef sort_param(data):\n \"\"\" Documentation for a method param_filter. Added: 02.10.2020 16:12 dmytro.medintsev\n\n\n :param :\n :type :\n\n :return:\n :rtype:\n \"\"\"\n res = {}\n p_list = []\n for i in data:\n for j in i.params:\n p_list.append(i.params)\n if j not in res:\n res[j] = []\n for t in p_list:\n for p in res:\n if p in t:\n if t[p] not in res[p]:\n res[p].append(t[p])\n return res\n\n\n@router.get(\"/component/{filter_json}\", tags=[\"filter\"])\ndef components_filter(filter_json: str):\n c_filter = json.loads(filter_json)\n\n if len(c_filter) > 0:\n dd = session.query(APComponent.params).filter(APComponent.params.contains(c_filter)).all()\n session.close()\n ff = sort_param(dd)\n return {\n \"components\": [i[0] for i in dd],\n \"filters\": ff\n }\n else:\n tree = {}\n tree_query_res = session.query(distinct(APParamlist.category), APParamlist.family).all()\n for branch, leaf in tree_query_res:\n if branch not in tree:\n tree[branch] = []\n tree[branch].append(leaf)\n return tree\n\n\n@router.get(\"/component/table_column_order/{family}\", tags=[\"filter\"])\ndef components_filter(family: str):\n return [i[0] for i in session.query(APParamlist.param_name).filter(APParamlist.family == family).\n filter(APParamlist.index >= 0).order_by(APParamlist.index.asc()).all()]\n\n\n@router.get(\"/article/{filter_json}\", tags=[\"filter\"])\ndef components_filter(filter_json: str):\n a_filter = json.loads(filter_json)\n temp_list = []\n if 'Category' in a_filter:\n category_name = a_filter['Category']\n category = session.query(APCategory).filter(APCategory.name == category_name).first()\n temp_list = session.query(APPartnumber).filter(APPartnumber.name.startswith(f\"11.{category.id}.\")).all()\n if 'Family' in a_filter:\n family = a_filter['Family']\n family_id = category.family_list[family]\n temp_list = session.query(APPartnumber).filter(\n APPartnumber.name.startswith(f\"11.{category.id :02d}.{family_id :02d}\")).all()\n return temp_list\n else:\n return session.query(APPartnumber.name, APPartnumber.Description).all()\n\n pass\n","sub_path":"routers/filter_router.py","file_name":"filter_router.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"477061956","text":"import re\nimport ast\nimport traceback\n\nvariable_regexp = r\"\\$([\\w_]+)\"\n#function_regexp = r\"\\$\\{([\\w_]+\\([\\$\\w\\.\\-/_ =,]*\\))\\}\"\n#function_regexp_compile = re.compile(r\"^([\\w_]+)\\(([\\$\\w\\.\\-/_ =,]*)\\)$\")\nfunction_regexp = r\"\\$\\{([\\w_]+\\([\\$'\\\"\\w\\.\\-/_ =,]*\\))\\}\"\nfunction_regexp_compile = re.compile(r\"^([\\w_]+)\\(([\\$'\\\"\\w\\.\\-/_ =,]*)\\)$\")\n\nnumeric_types = (int, float)\nbasestring = (str, bytes)\n\nclass Utils(object):\n\n @staticmethod\n def get_os_environ(variable_name):\n \"\"\" get value of environment variable.\n\n Args:\n variable_name(str): variable name\n\n Returns:\n value of environment variable.\n\n Raises:\n exceptions.EnvNotFound: If environment variable not found.\n\n \"\"\"\n try:\n return os.environ[variable_name]\n except KeyError:\n raise exceptions.EnvNotFound(variable_name)\n\n @staticmethod\n def ensure_mapping_format(variables):\n \"\"\" ensure variables are in mapping format.\n\n Args:\n variables (list/dict): original variables\n\n Returns:\n dict: ensured variables in dict format\n\n Examples:\n >>> variables = [\n {\"a\": 1},\n {\"b\": 2}\n ]\n >>> print(ensure_mapping_format(variables))\n {\n \"a\": 1,\n \"b\": 2\n }\n\n \"\"\"\n if isinstance(variables, list):\n variables_dict = {}\n for map_dict in variables:\n variables_dict.update(map_dict)\n\n return variables_dict\n\n elif isinstance(variables, dict):\n return variables\n\n else:\n raise exceptions.ParamsError(\"variables format error!\")\n\n\nutils = Utils()\n\n\nclass Parser(object):\n \"\"\"变量/函数模板解析器\"\"\"\n\n def parse_func(self, content, func_dt):\n print('[INFO]: parse_func')\n func_lst = self.extract_functions(content)\n parsed = None\n print(\"Vars: %s\" % func_lst)\n for f in func_lst:\n try:\n #res = getattr(f)()\n #res = func_dt.get(f)(*args, **kwargs)\n res = eval(f)\n func_sign = '${%s}' % f\n print(\"replace: %s\" % func_sign)\n parsed = content.replace(func_sign, str(res))\n content = parsed\n except Exception as err:\n print('[Error]: %s' % err)\n #traceback.print_exc()\n if parsed:\n print(parsed)\n return parsed\n else:\n return content\n\n def parse_function(self, content):\n \"\"\" parse function name and args from string content.\n Args:\n content (str): string content\n Returns:\n dict: function meta dict\n {\n \"func_name\": \"xxx\",\n \"args\": [],\n \"kwargs\": {}\n }\n Examples:\n >>> parse_function(\"func()\")\n {'func_name': 'func', 'args': [], 'kwargs': {}}\n >>> parse_function(\"func(5)\")\n {'func_name': 'func', 'args': [5], 'kwargs': {}}\n >>> parse_function(\"func(1, 2)\")\n {'func_name': 'func', 'args': [1, 2], 'kwargs': {}}\n >>> parse_function(\"func(a=1, b=2)\")\n {'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}}\n >>> parse_function(\"func(1, 2, a=3, b=4)\")\n {'func_name': 'func', 'args': [1, 2], 'kwargs': {'a':3, 'b':4}}\n \"\"\"\n matched = function_regexp_compile.match(content)\n if not matched:\n #raise exceptions.FunctionNotFound(\"{} not found!\".format(content))\n print(\"{} not found!\".format(content))\n\n function_meta = {\n \"func_name\": matched.group(1),\n \"args\": [],\n \"kwargs\": {}\n }\n\n args_str = matched.group(2).strip()\n if args_str == \"\":\n return function_meta\n\n args_list = args_str.split(',')\n for arg in args_list:\n arg = arg.strip()\n if '=' in arg:\n key, value = arg.split('=')\n function_meta[\"kwargs\"][key.strip()] = self.parse_string_value(value.strip())\n else:\n function_meta[\"args\"].append(self.parse_string_value(arg))\n\n return function_meta\n\n def parse_string_functions(self, content, variables_mapping, functions_mapping):\n \"\"\" parse string content with functions mapping.\n Args:\n content (str): string content to be parsed.\n variables_mapping (dict): variables mapping.\n functions_mapping (dict): functions mapping.\n Returns:\n str: parsed string content.\n Examples:\n >>> content = \"abc${add_one(3)}def\"\n >>> functions_mapping = {\"add_one\": lambda x: x + 1}\n >>> parse_string_functions(content, functions_mapping)\n \"abc4def\"\n \"\"\"\n print('[INFO]: parse_string_functions')\n functions_list = self.extract_functions(content)\n for func_content in functions_list:\n function_meta = self.parse_function(func_content)\n func_name = function_meta[\"func_name\"]\n\n args = function_meta.get(\"args\", [])\n kwargs = function_meta.get(\"kwargs\", {})\n args = self.parse_data(args, variables_mapping, functions_mapping)\n kwargs = self.parse_data(kwargs, variables_mapping, functions_mapping)\n\n if func_name in [\"parameterize\", \"P\"]:\n if len(args) != 1 or kwargs:\n raise exceptions.ParamsError(\"P() should only pass in one argument!\")\n from httprunner import loader\n eval_value = loader.load_csv_file(args[0])\n elif func_name in [\"environ\", \"ENV\"]:\n if len(args) != 1 or kwargs:\n #raise exceptions.ParamsError(\"ENV() should only pass in one argument!\")\n print(\"ENV() should only pass in one argument!\")\n eval_value = utils.get_os_environ(args[0])\n else:\n func = self.get_mapping_function(func_name, functions_mapping)\n eval_value = func(*args, **kwargs)\n\n func_content = \"${\" + func_content + \"}\"\n if func_content == content:\n # content is a function, e.g. \"${add_one(3)}\"\n content = eval_value\n else:\n # content contains one or many functions, e.g. \"abc${add_one(3)}def\"\n content = content.replace(\n func_content,\n str(eval_value), 1\n )\n\n return content\n\n @staticmethod\n def get_mapping_function(function_name, functions_mapping):\n \"\"\" get function from functions_mapping,\n if not found, then try to check if builtin function.\n Args:\n variable_name (str): variable name\n variables_mapping (dict): variables mapping\n Returns:\n mapping function object.\n Raises:\n exceptions.FunctionNotFound: function is neither defined in debugtalk.py nor builtin.\n \"\"\"\n if function_name in functions_mapping:\n return functions_mapping[function_name]\n\n try:\n # check if HttpRunner builtin functions\n from httprunner import loader\n built_in_functions = loader.load_builtin_functions()\n return built_in_functions[function_name]\n except KeyError:\n pass\n\n try:\n # check if Python builtin functions\n item_func = eval(function_name)\n if callable(item_func):\n # is builtin function\n return item_func\n except (NameError, TypeError):\n # is not builtin function\n #raise exceptions.FunctionNotFound(\"{} is not found.\".format(function_name))\n print(\"{} is not found!\".format(function_name))\n\n @staticmethod\n def extract_functions(content):\n \"\"\" extract all functions from string content, which are in format ${fun()}\n Args:\n content (str): string content\n Returns:\n list: functions list extracted from string content\n Examples:\n >>> extract_functions(\"${func(5)}\")\n [\"func(5)\"]\n >>> extract_functions(\"${func(a=1, b=2)}\")\n [\"func(a=1, b=2)\"]\n >>> extract_functions(\"/api/1000?_t=${get_timestamp()}\")\n [\"get_timestamp()\"]\n >>> extract_functions(\"/api/${add(1, 2)}\")\n [\"add(1, 2)\"]\n >>> extract_functions(\"/api/${add(1, 2)}?_t=${get_timestamp()}\")\n [\"add(1, 2)\", \"get_timestamp()\"]\n \"\"\"\n try:\n return re.findall(function_regexp, content)\n except TypeError:\n return []\n\n @staticmethod\n def extract_variables(content):\n \"\"\" extract all variable names from content, which is in format $variable\n Args:\n content (str): string content\n Returns:\n list: variables list extracted from string content\n Examples:\n >>> extract_variables(\"$variable\")\n [\"variable\"]\n >>> extract_variables(\"/blog/$postid\")\n [\"postid\"]\n >>> extract_variables(\"/$var1/$var2\")\n [\"var1\", \"var2\"]\n >>> extract_variables(\"abc\")\n []\n \"\"\"\n # TODO: change variable notation from $var to {{var}}\n try:\n return re.findall(variable_regexp, content)\n except TypeError:\n return []\n\n def parse_data(self, content, variables_mapping=None, functions_mapping=None, raise_if_variable_not_found=True):\n \"\"\" parse content with variables mapping\n Args:\n content (str/dict/list/numeric/bool/type): content to be parsed\n variables_mapping (dict): variables mapping.\n functions_mapping (dict): functions mapping.\n raise_if_variable_not_found (bool): if set False, exception will not raise when VariableNotFound occurred.\n Returns:\n parsed content.\n Examples:\n >>> content = {\n 'request': {\n 'url': '/api/users/$uid',\n 'headers': {'token': '$token'}\n }\n }\n >>> variables_mapping = {\"uid\": 1000, \"token\": \"abcdef\"}\n >>> parse_data(content, variables_mapping)\n {\n 'request': {\n 'url': '/api/users/1000',\n 'headers': {'token': 'abcdef'}\n }\n }\n \"\"\"\n # TODO: refactor type check\n if content is None or isinstance(content, (numeric_types, bool, type)):\n return content\n\n if isinstance(content, (list, set, tuple)):\n return [\n self.parse_data(\n item,\n variables_mapping,\n functions_mapping,\n raise_if_variable_not_found\n )\n for item in content\n ]\n\n if isinstance(content, dict):\n parsed_content = {}\n for key, value in content.items():\n parsed_key = self.parse_data(\n key,\n variables_mapping,\n functions_mapping,\n raise_if_variable_not_found\n )\n parsed_value = self.parse_data(\n value,\n variables_mapping,\n functions_mapping,\n raise_if_variable_not_found\n )\n parsed_content[parsed_key] = parsed_value\n\n return parsed_content\n\n if isinstance(content, basestring):\n # content is in string format here\n variables_mapping = utils.ensure_mapping_format(variables_mapping or {})\n functions_mapping = functions_mapping or {}\n content = content.strip()\n\n try:\n # replace functions with evaluated value\n # Notice: parse_string_functions must be called before parse_string_variables\n content = self.parse_string_functions(\n content,\n variables_mapping,\n functions_mapping\n )\n # replace variables with binding value\n content = self.parse_string_variables(\n content,\n variables_mapping,\n functions_mapping\n )\n except Exception as err:\n print('[Error]: %s' % err)\n traceback.print_exc()\n return content\n\n def parse_string_variables(self, content, variables_mapping, functions_mapping):\n \"\"\" parse string content with variables mapping.\n Args:\n content (str): string content to be parsed.\n variables_mapping (dict): variables mapping.\n Returns:\n str: parsed string content.\n Examples:\n >>> content = \"/api/users/$uid\"\n >>> variables_mapping = {\"$uid\": 1000}\n >>> parse_string_variables(content, variables_mapping, {})\n \"/api/users/1000\"\n \"\"\"\n variables_list = self.extract_variables(content)\n for variable_name in variables_list:\n variable_value = self.get_mapping_variable(variable_name, variables_mapping)\n\n if variable_name == \"request\" and isinstance(variable_value, dict) \\\n and \"url\" in variable_value and \"method\" in variable_value:\n # call setup_hooks action with $request\n for key, value in variable_value.items():\n variable_value[key] = parse_data(\n value,\n variables_mapping,\n functions_mapping\n )\n parsed_variable_value = variable_value\n elif \"${}\".format(variable_name) == variable_value:\n # variable_name = \"token\"\n # variables_mapping = {\"token\": \"$token\"}\n parsed_variable_value = variable_value\n else:\n parsed_variable_value = self.parse_data(\n variable_value,\n variables_mapping,\n functions_mapping,\n raise_if_variable_not_found=False\n )\n\n # TODO: replace variable label from $var to {{var}}\n if \"${}\".format(variable_name) == content:\n # content is a variable\n content = parsed_variable_value\n else:\n # content contains one or several variables\n if not isinstance(parsed_variable_value, str):\n parsed_variable_value = builtin_str(parsed_variable_value)\n\n content = content.replace(\n \"${}\".format(variable_name),\n parsed_variable_value, 1\n )\n\n return content\n\n @staticmethod\n def get_mapping_variable(variable_name, variables_mapping):\n \"\"\" get variable from variables_mapping.\n Args:\n variable_name (str): variable name\n variables_mapping (dict): variables mapping\n Returns:\n mapping variable value.\n Raises:\n exceptions.VariableNotFound: variable is not found.\n \"\"\"\n try:\n return variables_mapping[variable_name]\n except Exception as err:\n print('[Error]: %s' % err)\n traceback.print_exc()\n\n @staticmethod\n def parse_string_value(str_value):\n \"\"\" parse string to number if possible\n e.g. \"123\" => 123\n \"12.2\" => 12.3\n \"abc\" => \"abc\"\n \"$var\" => \"$var\"\n \"\"\"\n try:\n return ast.literal_eval(str_value)\n except ValueError:\n return str_value\n except SyntaxError:\n # e.g. $var, ${func}\n return str_value\n\nif __name__ == '__main__':\n ps = Parser()\n\n # usage\n def get_name():\n return 'YT'\n\n def get_sum(*args):\n res = 0\n for i in args:\n res += i\n return res\n\n def get_lang(name):\n dt = {'Silly': 'python'}\n return dt.get(name)\n\n def get_info(data):\n info = ''\n for k in data:\n #print('%s: %s' % (k, data[k]))\n pair = '%s: %s' % (k, data[k])\n info = \"%s%s, \" % (info, pair)\n return info[0:-2]\n\n def test1(content):\n print('[INFO]: 参数为实际值')\n fs = ps.extract_functions(content)\n r = ps.parse_string_functions(content, {}, funs_dt)\n print(r)\n\n def test2(content):\n print('[INFO]: 参数为变量')\n fs = ps.extract_functions(content)\n r = ps.parse_string_functions(content, vars_dt, funs_dt)\n print(r)\n\n vars_dt = {\n 'num': '33',\n 'name': 'Silly',\n 'info1': {'name': 'YT', 'height': '173'}\n }\n\n funs_dt = {\n 'get_name': get_name,\n 'get_sum': get_sum,\n 'get_lang': get_lang,\n 'get_info': get_info\n }\n\n st = 'no is ${get_sum(11,22)}'\n st0 = \"my name: ${get_name()}\"\n st1 = \"silly likes ${get_lang('Silly')}\"\n st11 = 'silly likes ${get_lang(\"Silly\")}'\n st2 = \"silly likes ${get_lang($name)}\"\n st3 = \"my info: ${get_info($info)}\"\n #fs = ps.extract_functions(st2)\n #print(\"Func: %s\" % fs)\n #ps.parse_func(st2,funs_dt)\n #r = ps.parse_string_functions(fs[0], {}, funs_dt)\n #r = ps.parse_string_functions(st1, {'name': 'Silly'}, funs_dt)\n #r = ps.parse_string_functions(st2, {}, funs_dt)\n #print(r)\n test1(st11)\n #test2(st2)\n #test1(st)\n #test2(st3)\n #test1(st0)\n","sub_path":"projects/ytools/parse_tpl/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":18129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"274684236","text":"# TOSHIBA - TSDV\n# Team: PHOcr\n# Author:\n# Email:\n# Date create:\n# Last update by: Phung Dinh Tai\n# Date: 01/10/2019\n# Description: This script is used to delete all build and test results from both of\n# Engineering Test and Integration Test and only keep results of latest build\n# on Integration Test when a change was merged into base line of project. This\n# script should be run on master of jenkins where we store all results for jobs.\nimport os\nimport sys\nimport time\nimport argparse\nimport sys_path\nsys_path.insert_sys_path()\nfrom configs.jenkins import JenkinsHelper\nfrom configs.json_key import JobName\nfrom baseapi.file_access import remove_paths\nfrom jenkins.lib_parsers.change_build_mapping_parser import ChangeBuildMappingParser\n\n\nMANUAL = \"\"\"\nThis script is used to delete all build and test results from both of Engineering Test and\nIntegration Test when a change was merged into base line of project. We only keep the latest\nsuccess build as reference. Note that there are some specific configuration for jenkins archive\ndirectory, then this script should only be run on master node of jenkins in Post Integration.\n\"\"\"\n\n\ndef parse_argument():\n parser = argparse.ArgumentParser(MANUAL)\n parser.add_argument('-g', '--gerrit-number',\n required=True,\n help='Change number to be removed data')\n return parser.parse_args()\n\n\ndef main():\n # Parse arguments\n args = parse_argument()\n\n # Calculate time for processes\n start_time = time.time()\n\n # Prepare configuration for deleting. Currently, we will remove all results from Engineering\n # Test and Integration Test of a change when it is merged to base line of project.\n list_job_name = [JobName.ET, JobName.IT]\n change_number = args.gerrit_number\n\n print(\" Deleting old results for jobs: {jobs_list}\"\n \"\".format(jobs_list=\", \".join(list_job_name)))\n\n # Return code when finish\n exit_code = 0\n\n for job_name in list_job_name:\n print(\"\\n+ {job}\".format(job=job_name))\n print(\" Look into: {archive_dir}\"\n \"\".format(archive_dir=os.path.join(JenkinsHelper.ARTIFACT_DIR, job_name)))\n # Get mapping file\n change_build_mapping_file = \\\n JenkinsHelper.get_file_mapping(job_name=job_name,\n file_name=JenkinsHelper.CHANGE_BUILD_MAPPING_FILE)\n\n # Create data parser for change build mapping information\n data_parser = ChangeBuildMappingParser(mapping_file=change_build_mapping_file)\n\n # Get all build number correspond with change number\n list_build_number = list(data_parser.get_list_build(change_number=change_number))\n\n # For Integration Test, we need to keep the latest build/test result for reference later.\n if job_name == JobName.IT:\n latest_build = data_parser.get_latest_success_build(change_number=change_number)\n list_build_number.remove(latest_build)\n print(\"\\tKeeping {build} in {job}\".format(build=latest_build, job=job_name))\n\n if not list_build_number:\n print(\"\\tWARN: There is no build for {job}!\".format(job=job_name))\n exit_code = 1\n continue\n else:\n print(\"\\tList build of {job} will be deleted: {build_list}\"\n \"\".format(job=job_name, build_list=\", \".join(list_build_number)))\n\n # Go through all build in the list and do removal\n for build_number in list_build_number:\n # Get path to archive folder for build of the job\n archive_folder = os.path.join(JenkinsHelper.ARTIFACT_DIR, job_name, build_number)\n # Remove data of old build\n if os.path.exists(archive_folder):\n print(\"Removing {archive_folder}\".format(archive_folder=archive_folder))\n remove_paths(archive_folder)\n else:\n # Notice use that result folder for build number does not exist or accidentally\n # removed\n print(\"\\tWARN: Archive folder for build {build} of {job} does not exist or \"\n \"accidentally removed by someone!\".format(build=build_number, job=job_name))\n exit_code = 2\n\n # Calculate execution time\n print(\"\\n Finished in: {execution_time}s\".format(execution_time=time.time()-start_time))\n sys.exit(exit_code)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"utilities/jenkins/delete_old_test_result.py","file_name":"delete_old_test_result.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"344922790","text":"import time\nfrom RPi import GPIO\nimport spidev\nimport math\n\nclass MCP:\n def __init__(self, bus=0, device=0):\n # spidev object initialiseren\n self.spi = spidev.SpiDev()\n # open bus 0, device 0\n self.spi.open(bus, device)\n # stel klokfrequentie in op 100kHz\n self.spi.max_speed_hz = 10 ** 5\n\n def read_channel(self, ch):\n # commandobyte samenstellen\n channel = ch << 4 | 128\n # list met de 3 te versturen bytes\n bytes_out = [0b00000001, channel, 0b00000000]\n # versturen en 3 bytes terugkrijgen\n bytes_in = self.spi.xfer2(bytes_out)\n # meetwaarde uithalen\n byte1 = bytes_in[1]\n byte2 = bytes_in[2]\n result = byte1 << 8 | byte2\n # meetwaarde afdrukken\n if ch == 0:\n result = 100 - (result / 1023 * 100)\n return result\n if ch == 1:\n return result\n if ch == 2:\n return result\n\ndef lees_thermistor():\n print(\"Reading Temperature\")\n old_temp = 24\n while True:\n new_temp = float(MCP().read_channel(1))\n rntc = 10000/((1023/new_temp)-1)\n tkelvin = 1/(1/298.15+1/60000*math.log(rntc/10000))\n tcelsiusraw = tkelvin - 273.15\n global tcelsius\n tcelsius = int(tcelsiusraw)\n print(f\"temp:{tcelsius}\")\n if tcelsius is not old_temp:\n print(f\"Temperatuur changed to: {tcelsius}\")\n old_temp = tcelsius\n time.sleep(3)\n elif tcelsius == old_temp:\n print(\"Temperature still consistent\")\n time.sleep(3)\n\n\nlees_thermistor()","sub_path":"Backend/project1/Sensor Testing/Thermistor-test.py","file_name":"Thermistor-test.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"650315728","text":"import whois\nimport requests\nfrom os.path import isfile\nimport argparse\nfrom requests import HTTPError\nfrom requests import ConnectionError\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom requests import ReadTimeout\nfrom socket import timeout\n\n\ndef check_file(file_path):\n if not isfile(file_path):\n message = \"'{}' is not a file or doesn't exist\".format(file_path)\n raise argparse.ArgumentTypeError(message)\n return file_path\n\n\ndef get_args():\n script_usage = \"python check_sites_health.py \"\n parser = argparse.ArgumentParser(\n description=\"How to run check_sites_health.py:\",\n usage=script_usage\n )\n parser.add_argument(\n \"url_list\",\n type=check_file,\n help=\"Specify the path to url_list file\"\n )\n args = parser.parse_args()\n return args\n\n\ndef load_urls4check(path_to_url_list):\n with open(path_to_url_list) as file_handler:\n urls4check = file_handler.read().splitlines()\n return urls4check\n\n\ndef is_server_respond_ok(url):\n user_agent = {'User-agent': 'Mozilla/5.0'}\n try:\n response = requests.get(url, headers=user_agent, timeout=10)\n return response.ok\n except (ConnectionError, HTTPError, timeout, ReadTimeout):\n return False\n\n\ndef get_domain_expiration_date(url):\n try:\n domain_whois = whois.whois(url)\n return domain_whois.expiration_date\n except (timeout, BaseException):\n return None\n\n\ndef is_domain_name_payed(expiration_date, days):\n today = datetime.today()\n until_date = today + timedelta(days=days)\n if isinstance(expiration_date, list):\n return expiration_date[0] >= until_date\n return expiration_date >= until_date\n\n\ndef print_site_status(url, is_server_ok, is_domain_payed):\n separator = \"*\"*40\n print(separator)\n print(\"Site - {}\\nServer is ok - {}\\n\"\n \"Domain paid till next month - {}\".format(\n url,\n is_server_ok,\n is_domain_payed\n ))\n print(separator)\n\n\nif __name__ == \"__main__\":\n args = get_args()\n path_to_url_list = args.url_list\n urls4check = load_urls4check(path_to_url_list)\n for url in urls4check:\n domain_expiration_date = get_domain_expiration_date(url)\n is_server_ok = is_server_respond_ok(url)\n if not domain_expiration_date:\n is_domain_payed = False\n else:\n is_domain_payed = is_domain_name_payed(\n domain_expiration_date,\n days=31\n )\n print_site_status(url, is_server_ok, is_domain_payed)","sub_path":"check_sites_health.py","file_name":"check_sites_health.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"286474821","text":"#!/usr/bin/python3\n\"\"\"\n author: Jie Hu\n email: tojiehu@gmail.com\n date: 2020-07-25\n\"\"\"\n\n\n\nimport sys\nimport functools\n\ndef super_split(line):\n s = []\n pre = 0\n inside_quota = False\n for i in range(0, len(line)):\n if line[i] == ',' and inside_quota == False:\n s.append(line[pre:i])\n pre = i + 1\n if line[i] == '\"':\n inside_quota = (not inside_quota)\n s.append(line[pre:])\n return s\n\n\n\"\"\"\n process the complaint and extract the data. \n the product should be low case.\n\"\"\"\ndef process_complaint(line):\n s = super_split(line)\n if len(s) != 18:\n return (-1 , -1, -1, -1)\n idd = s[-1]\n try:\n product = s[1].strip().lower()\n year = int(s[0].strip().split('-')[0])\n company = s[7].strip().lower()\n return (idd, product, year, company)\n except Exception as ex:\n return (-1, -1, -1, -1)\n\n\"\"\"\n 1.read data from the input file\n 2.preprocess the data \n 3.return the product_data\n\"\"\"\ndef process_data(f):\n ids = set()\n ids.add(-1)\n product_data = {}\n with open(f) as complaints:\n complaints.readline()\n for line in complaints:\n line = line.strip()\n (idd, product, year, company) = process_complaint(line)\n # if we get duplicate complaint\n # we should pass\n if idd in ids:\n continue\n ids.add(idd)\n if product not in product_data:\n product_data[product] = {}\n if year not in product_data[product]:\n product_data[product][year] = [0, 0, set(),[]]\n # total number complains from the year and product\n product_data[product][year][0] = product_data[product][year][0] + 1\n # total number of companies receiving at least one complaint for that product and year\n product_data[product][year][2].add(company)\n product_data[product][year][3].append(company)\n product_data[product][year][1] = len(product_data[product][year][2])\n return product_data\n\ndef complain_cmp(a, b):\n pa = a[0]\n pb = b[0]\n if pa[0] == '\"':\n pa = pa[1:-1]\n if pb[0] == '\"':\n pb = pb[1:-1]\n\n if pa > pb:\n return 1\n elif pa < pb:\n return -1\n if a[1] > b[1]:\n return 1\n else:\n return -1\n\ndef process_nums(nums):\n expand_nums = nums * 100\n if expand_nums - int(expand_nums) >= 0.5:\n expand_nums += 1\n return int(expand_nums)\n\n\ndef company_count(total_company):\n company_nums = {}\n n = 0\n for company in total_company:\n company_nums[company] = company_nums.get(company, 0) + 1\n return sorted(company_nums.items(), key=lambda item: item[1])[-1][-1]\n\ndef caculate_and_sort(product_data):\n data = []\n for item in product_data.items():\n product = item[0]\n for year_item in item[1].items():\n year = year_item[0]\n total_complains = year_item[1][0]\n total_company = year_item[1][1]\n max_company = company_count(year_item[1][3])\n percentage = max_company / total_complains\n data.append([product, year, total_complains, total_company, process_nums(percentage)])\n data = sorted(data, key=functools.cmp_to_key(complain_cmp))\n return data\n\n\ndef output(data, output_file_name):\n try:\n # open file and with write\n with open(output_file_name, \"w\") as output_file:\n # write the first line\n for item in data:\n product = item[0]\n year = item[1]\n total_complaints = item[2]\n total_company_receive_more =item[3]\n highest_percentage = item[4]\n output_file.writelines(\n \"%s,%d,%d,%d,%d\\n\" % (product, year, total_complaints,total_company_receive_more, highest_percentage)\n )\n except IOError as e:\n print(\"Output Results:I/O error({0}): {1}\".format(e.errno, e.strerror))\n except Exception as ex:\n print(ex)\n pass\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n exit(1)\n complaints_file = sys.argv[1]\n output_file_name = sys.argv[2]\n data = process_data(complaints_file)\n sorted_data = caculate_and_sort(data)\n output(sorted_data, output_file_name)\n","sub_path":"src/consumer_complaints.py","file_name":"consumer_complaints.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"468717180","text":"#!/usr/bin/env python3\n\nimport random\n\nnumber = random.randint(1, 10)\ntries = 1\n\n\n\n\ndef askToUser():\n\tglobal guess\t\t\n\tguess = int(input(\"Have a guess: \"))\n\tif guess < number:\n\t\tprint(\"Guess higher...\")\n\tif guess > number:\n\t\tprint(\"Guess lower...\")\n\n\twhile guess != number:\n\t\ttries += 1\n\t\tguess = int(input(\"Try again:\"))\n\t\tif guess < number:\n\t\t\tprint(\"Guess higher\")\n\t\tif guess == number:\n\t\t\tprint(\"You're right! you win! The number was\", number, \"and it only took\", tries, \"tries!\")\n\nif __name__ == '__main__':\n\tuname = input(\"Hello! What is your username?\")\n\n\tprint(\"Hello\", uname + \".\", )\n\n\tquestion = input(\"Woudl you like to play a game? [Y/n]\")\n\tif question == \"n\":\n\t\tprint(\"oh... okay\")\n\t\tpass\n\telif question == \"Y\":\n\t\tprint(\"I'm thinking of a number between 1 and 10\")\n\t\taskToUser()\n\telse:\n\t\tprint(\"Invalid command\")","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"365187267","text":"def main():\n num = int(input())\n for i in range(0, num):\n length = int(input())\n nums = list(map(int, input().split(\" \")))\n index = int(input())\n print(find(nums, index))\n\n\ndef find(nums, index):\n left = 0\n right = len(nums) - 1\n right_first = True\n while left != right:\n if right_first:\n if nums[left] > nums[right]:\n temp = nums[left]\n nums[left] = nums[right]\n nums[right] = temp\n left += 1\n right_first = False\n else:\n right -= 1\n else:\n if nums[left] > nums[right]:\n temp = nums[left]\n nums[left] = nums[right]\n nums[right] = temp\n right -= 1\n right_first = True\n else:\n left += 1\n\n if len(nums) > 2:\n if left + 1 == index:\n return nums[left]\n elif left + 1 > index:\n return find(nums[:left], index)\n else:\n return find(nums[left + 1:], index - left - 1)\n else:\n return nums[index - 1]\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Code/CodeRecords/2293/60641/279578.py","file_name":"279578.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"258223935","text":"#%%\r\n#pCNN\r\n\r\n#stftされた結果を結合する実験\r\n\r\n#データ増強を先にやっている\r\n#データ読み込みを改良➡高速化\r\n#unityへデータを送るのも実装\r\n#前処理を自作で実装\r\n#交差検証なし➡readsumよりも低く過学習する\r\n#標準化方向実験➡あまり変わらない\r\n#窓関数処理を追加\r\n#フィルター処理,stftでrealに変更 (absでも精度はさほど変化しないがifftで復元されない)\r\n#モジュール構造に変更\r\n#固定パラメータをモジュール間で共有化\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom Read_Brain_Data import read_brain_data\r\nfrom Random_Gragh import random_gragh\r\nimport param\r\n\r\n#被験者を指定\r\nprint(\"subject\"+str(param.subject))\r\n\r\nprint(\"data_magnigication:\",param.data_magnification)\r\n\r\n#脳波データの型を初期化\r\ndata_x = np.zeros((param.ch_number,param.all_sample_number,param.extraction_freq),dtype=\"float64\")\r\ndata_y = np.zeros(param.all_sample_number,dtype=\"int32\")\r\nprint(\"data_x.shape\",data_x.shape)\r\n\r\nrun_cnt = 0\r\nfor i in range(param.subject_number): #各被験者ごとのループ\r\n read_brain_data(data_x,data_y,run_cnt)\r\n\r\n#グラフ描画\r\nx = np.linspace(0,param.imagination_time,param.extraction_freq)\r\nrandom_gragh(x,data_x)\r\n\r\n\r\n\r\n#%%\r\n#----前処理\r\nfrom scipy.fftpack import fft,fftfreq,ifft\r\n\r\nfft_wave_complex = np.zeros((param.ch_number,param.all_sample_number,param.extraction_freq),dtype=\"complex128\")\r\n\r\n\"\"\"\r\nf = fft(data_x[0][0])\r\ng = fftfreq(n=data_x[0][0].size, d=1/sampling_freq)\r\nprint(f.shape)\r\nprint(g.shape)\r\n\"\"\"\r\n\r\na = 1\r\nplt.plot(x,data_x[0][a])\r\nplt.title(\"label %d\"%a)\r\nplt.show()\r\n\r\n#window function process\r\nwindow_function = param.hamming_window\r\ndata_x *= window_function\r\n#data_x[:][:] /= np.amax(data_x[0][a]) #標準化して窓関数と一致するか検証\r\n\r\n\r\nplt.plot(x,data_x[0][a])\r\nplt.plot(x,window_function)\r\nplt.show()\r\n\r\n\r\n#fft\r\nfft_wave_complex = fft(data_x)\r\nfft_fre = fftfreq(n=data_x[0][0].size, d=1/param.sampling_freq)\r\n\r\n\"\"\"\r\nifft ➡ 復元されるかの実験 ➡ realでやらなければ復元されない\r\nfft_wave_abs = abs(fft_wave_complex) #absはまちがいifftで復元されない\r\ndata_x[:][:] = ifft(fft_wave_abs[:][:])\r\ndata_x[:][:] = np.real(ifft(fft_wave_complex))\r\nplt.plot(x,data_x[0][0])\r\nplt.show()\r\n\"\"\"\r\n\r\n#周波数で正の値をとるインデックスをとる➡フーリエ変換した結果をみるため\r\npidxs = np.where(fft_fre >= 0)\r\n#print(pidxs)\r\n\r\nfreq, power = fft_fre[pidxs], np.abs(fft_wave_complex[0][a])[pidxs]\r\nplt.plot(freq,power)\r\nplt.title(\"no filter\")\r\nplt.show()\r\n\r\n#2~60Hzバンドパスフィルター\r\n#-------------------------------------\r\nmax_freq = 30\r\nmin_freq = 2\r\n#-------------------------------------\r\n#print(\"%d-%dHz band pass filter\"% (min_freq,max_freq))\r\nfft_pass = np.where((min_freq<=np.abs(fft_fre)) & (np.abs(fft_fre)<=max_freq),fft_fre,0)\r\nfft_pass = np.where(fft_pass==0,fft_pass,1)\r\nfft_wave_complex[:][:] *= fft_pass\r\n\r\nfreq, power = fft_fre[pidxs], np.abs(fft_wave_complex[0][a])[pidxs]\r\nplt.plot(freq,power)\r\nplt.title(\"%d-%dHz band pass filter\"% (min_freq,max_freq))\r\nplt.show()\r\n\r\n#50Hz noach filter ➡50Hz付近の値を除去できていない(電源ではないのか)\r\n#print(\"50Hz notch filter\")\r\nfft_pass = np.where(np.abs(fft_fre)!=50,fft_fre,0)\r\n\r\nfft_pass = np.where(fft_pass==0,fft_pass,1)\r\nfft_pass[0] = 1\r\nfft_wave_complex[:][:] *= fft_pass\r\n\r\nfreq, power = fft_fre[pidxs], np.abs(fft_wave_complex[0][a])[pidxs]\r\nplt.plot(freq,power)\r\nplt.title(\"50Hz noach filter\")\r\nplt.show()\r\n\r\n\"\"\"\r\n#0.5Hzハイパスフィルター\r\nprint(\"High-pass filter with a cutoff frequency of 0.5Hz\")\r\nfft_pass = np.where(np.abs(fft_fre)>0.5,fft_fre,0)\r\nfft_pass = np.where(fft_pass==0,fft_pass,1)\r\nfft_wave_complex[:][:] *= fft_pass\r\n\"\"\"\r\n\r\n#ifft\r\nfft_wave_abs = abs(fft_wave_complex)\r\n#data_x[:][:] = ifft(fft_wave_abs[:][:])\r\ndata_x[:][:] = np.real(ifft(fft_wave_complex))\r\n\r\n#μ+-6σの修正外れ値にクリップ\r\n\r\n#各チャンネルで(xi-μi)/σiで標準化\r\nprint(\"Normalized by (xi-μ) / σi for each channel\")\r\navg = np.zeros((param.ch_number),dtype=\"float64\")\r\nstd = np.zeros((param.ch_number),dtype=\"float64\")\r\n###\r\n#列288へ\r\n#data_x = data_x.reshape((ch_number,extraction_freq,all_sample_number))\r\n###\r\n#for ch in range(ch_number):\r\n# for i in range(all_sample_number): #行\r\n# for i in range(extraction_freq): #列\r\n# avg = np.average(data_x[ch][i])\r\n# std = np.std(data_x[ch][i])\r\n# data_x[ch][i] = (data_x[ch][i] - avg) / std\r\n# avg[ch] = np.average(data_x[ch]) #chごとの全体\r\n# std[ch] = np.std(data_x[ch]) #chごとの全体\r\n\r\n#for ch in range(ch_number):\r\n# data_x[ch] = (data_x[ch] - avg[ch]) / std[ch]\r\n#print(np.average(data_x[0]))\r\n#print(np.std(data_x[0]))\r\n\r\n#\"\"\"\r\n#列\r\n#data_x = data_x.reshape((ch_number,all_sample_number,extraction_freq))\r\n\r\n#全体\r\ndata_x = ( data_x[:] - np.average(data_x[:]) ) / np.std(data_x[:])\r\n#print(np.average(data_x))\r\n#print(np.std(data_x))\r\n\r\n\"\"\"\r\n#ノイズ除去のためにreshape\r\ndata_x = data_x.reshape((all_sample_number,ch_number,extraction_freq))\r\n\r\n#EOG,EMGアーティファクト除去\r\nprint(\"EOG,EMG artifact removal\")\r\nprint(\"asn:\",all_sample_number)\r\ntmp = np.zeros(ch_number,dtype=\"float64\")\r\n\r\n##max<(x_ne) - min(x_ne)>_N\r\nprint(\"< max(x_ne) - min(x_ne) >_N\")\r\ndelete_list = []\r\nfor asn in range(all_sample_number):\r\n tmp[0] = np.amax(data_x[asn][0]) - np.min(data_x[asn][0])\r\n tmp[1] = np.amax(data_x[asn][1]) - np.min(data_x[asn][1])\r\n tmp[2] = np.amax(data_x[asn][2]) - np.min(data_x[asn][2])\r\n #print(tmp)\r\n z = np.average(tmp)\r\n if z > threshold:\r\n delete_list.append(asn)\r\n all_sample_number -= 1\r\n #print(delete_list)\r\n\r\ndata_x = np.delete(data_x,delete_list,0)\r\ndata_y = np.delete(data_y,delete_list,0)\r\nprint(\"asn:\",all_sample_number)\r\n\r\ndata_x = data_x.reshape(ch_number,all_sample_number,extraction_freq)\r\n\"\"\"\r\n\r\n#グラフ描画\r\ny = data_x[0][a] #ランダムに表示\r\nplt.plot(x,y)\r\nplt.title(\"label %d\"%a)\r\nplt.show()\r\nrandom_gragh(x,data_x)\r\n\r\n\r\n#%%\r\n#----STFT\r\nfrom scipy.signal import stft\r\n\r\nheight = param.input_data_height\r\nwidth = param.input_data_width\r\n\r\n#--------------------------------input_dataの型を計算で求める➡求める必要ないか?\r\ninput_data = np.zeros((param.ch_number,param.all_sample_number,height,width),dtype=\"complex128\")\r\ninput_data_stick = np.zeros((param.all_sample_number,height*3,width),dtype=\"float64\")\r\n\r\nf, t, input_data[0:3,:param.all_sample_number] = stft(data_x[0:3,:param.all_sample_number],fs=param.sampling_freq,nperseg=param.nperseg_number,noverlap=param.over_lap_number)\r\n\r\nprint(\"len(t):\",len(t))\r\nprint(\"len(f:)\",len(f))\r\n\r\nf_stick = f\r\nf_stick = np.append(f_stick,f+128)\r\nf_stick = np.append(f_stick,f+256)\r\n\r\n#print(f_stick)\r\n\r\n#input_data = abs(input_data)\r\ninput_data = np.real(input_data) #➡realでないと復元されないはず,absでも精度は出る\r\n\r\ninput_data_stick[:,0:65,0:65] = input_data[0,:,0:65,0:65]\r\ninput_data_stick[:,65:130,0:65] = input_data[1,:,0:65,0:65]\r\ninput_data_stick[:,130:195,0:65] = input_data[2,:,0:65,0:65]\r\n\r\n#stft描画\r\nfig = 5\r\ni = 0\r\nj = 0\r\n#plt.pcolormesh(t,f,input_data[i][j],vmin=0)\r\nplt.pcolormesh(t,f_stick,input_data_stick[i],vmin=0)\r\nplt.ylim([f[1],f[-1]])\r\nplt.title(\"STFT Magnitude\")\r\nplt.legend(loc=\"upper right\",fontsize=fig,title=\"input_data[%d][%d]\"%(i,j))\r\nplt.xlabel(\"Time[sec]\")\r\nplt.ylabel(\"Frequency[Hz]\")\r\npp=plt.colorbar (orientation=\"vertical\") # カラーバーの表示\r\npp.set_label(\"Label\", fontname=\"Arial\", fontsize=24) #カラーバーのラベル\r\nplt.show()\r\n\r\n\"\"\"\r\n#max-minを確認➡STFTではせいぜい2.0もない\r\nfor ch in range(ch_number):\r\n for asn in range(all_sample_number):\r\n max = np.amax(input_data[ch][asn])\r\n min = np.min(input_data[ch][asn])\r\n print(\"max-min\",max-min)\r\n\"\"\"\r\n\"\"\"\r\n#各チャンネルで正規化\r\nfor ch in range(ch_number):\r\n for asn in range(all_sample_number):\r\n max = np.amax(input_data[ch][asn])\r\n input_data[ch][asn] = input_data[ch][asn] / max\r\n\r\nprint(input_data[0][0])\r\n\"\"\"\r\n\r\ninput_data = input_data.reshape(param.all_sample_number,len(f),len(t),param.ch_number)\r\ninput_data_stick = input_data_stick.reshape((param.all_sample_number,len(f)*param.ch_number,len(t),1))\r\nprint(\"input_data.shape:\",input_data.shape)\r\nprint(\"input_data_stick.shape:\",input_data_stick.shape)\r\n\r\n\r\n\r\n#%%\r\n#----データ整理\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.utils.np_utils import to_categorical\r\n\r\n#正解ラベルYを(each_data_number,)へ\r\nprint(data_y.shape)\r\nprint(input_data_stick.shape)\r\n\r\n#出力Yのラベルを1,2,3から0,1,2に変更\r\nfor i in range(len(data_y)):\r\n if data_y[i] == 1:\r\n data_y[i] = 0\r\n elif data_y[i] == 2:\r\n data_y[i] = 1\r\n elif data_y[i] == 3:\r\n data_y[i] = 2\r\n\r\n#正解ラベルYをont-hot表現へ\r\ndata_y = to_categorical(data_y,param.classes)\r\n\r\n#データの分割\r\nx_train, x_test, y_train, y_test = train_test_split(input_data_stick, data_y, test_size=param.test_rate)\r\n\r\nprint(\"x_train:\",x_train.shape)\r\nprint(\"y_train:\",y_train.shape)\r\nprint(\"x_test:\",x_test.shape)\r\nprint(\"y_test:\",y_test.shape)\r\n\r\n\r\n\r\n#%%\r\n#----pCNNモデル構築\r\nfrom keras.optimizers import Adam,RMSprop,Adagrad,Adadelta,Adamax,Nadam\r\nfrom CNN import create_cnn_model\r\n\r\noptimizer = Adam(lr=param.lr)\r\n\r\nmodel = create_cnn_model(optimizer,input_data_stick[0].shape)\r\nmodel.summary()\r\n\r\n\r\n\r\n#%%\r\n#----学習\r\nsave_cnn_number = 1\r\n#------------------------------------------------\r\n#gpuの必要なメモリしか使わない文\r\nimport tensorflow as tf\r\nfrom keras.backend import tensorflow_backend\r\n\r\nconfig = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\r\nsession = tf.Session(config=config)\r\ntensorflow_backend.set_session(session)\r\n#------------------------------------------------\r\n\r\nhistory = model.fit(x_train,\r\n y_train,\r\n epochs=param.epoch,\r\n batch_size=256,\r\n validation_data=(x_test,y_test),\r\n )\r\n\r\n#maxを保存するようにする必要ある\r\n#モデル保存\r\nopen(\"cnn_\"+str(save_cnn_number)+\".json\",\"w\").write(model.to_json())\r\n\r\n#学習済みの重みを保存\r\nmodel.save_weights(\"cnn_\"+str(save_cnn_number)+\"_weight.h5\")\r\n\r\n\r\n\r\n#%%\r\n#----グラフ描画\r\n#loss\r\n#plt.figure(facecolor=\"azure\", edgecolor=\"coral\", linewidth=2)\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\n#plt.plot(all_loss)\r\n#plt.plot(all_val_loss)\r\n\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n\r\n#Accuracy\r\n#plt.figure(facecolor=\"azure\", edgecolor=\"coral\", linewidth=2)\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\n#plt.plot(all_acc)\r\n#plt.plot(all_val_acc)\r\n\r\nplt.title('model accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n\r\n#print(max(all_val_acc))\r\nprint(max(history.history[\"val_acc\"]))\r\n#print(len(y_test))\r\n\r\n\r\n\r\n#%%\r\n#----交差検証\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import train_test_split\r\n#--------------------------------------------\r\n#gpuの必要なメモリしか使わない文\r\nimport tensorflow as tf\r\nfrom keras.backend import tensorflow_backend\r\n\r\nsave_cnn_number = 1\r\n\r\nconfig = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\r\nsession = tf.Session(config=config)\r\ntensorflow_backend.set_session(session)\r\n#--------------------------------------------\r\nkf = KFold(n_splits=5, shuffle=True)\r\nall_loss=[]\r\nall_val_loss=[]\r\nall_acc=[]\r\nall_val_acc=[]\r\nfor train_index, val_index in kf.split(x_train,y_train):\r\n\r\n train_data=x_train[train_index]\r\n train_label=y_train[train_index]\r\n val_data=x_train[val_index]\r\n val_label=y_train[val_index]\r\n\r\n history = model.fit(train_data,\r\n train_label,\r\n epochs=param.epoch,\r\n batch_size=256,\r\n validation_data=(val_data,val_label),\r\n )\r\n\r\n loss=history.history['loss']\r\n val_loss=history.history['val_loss']\r\n acc=history.history['acc']\r\n val_acc=history.history['val_acc']\r\n\r\n all_loss.extend(loss)\r\n all_val_loss.extend(val_loss)\r\n all_acc.extend(acc)\r\n all_val_acc.extend(val_acc)\r\n#----------------------------------\r\n#maxを保存するようにする必要ある\r\n#モデル保存\r\nopen(\"cnn_\"+str(save_cnn_number)+\".json\",\"w\").write(model.to_json())\r\n\r\n#学習済みの重みを保存\r\nmodel.save_weights(\"cnn_\"+str(save_cnn_number)+\"_weight.h5\")\r\n\r\n\r\n\r\n#%%\r\n#----未知データで検証\r\nfrom keras.models import model_from_json\r\n\r\ntest_number = len(y_test)\r\nmodel_number = 1\r\nfile_number = str(1)\r\npredict_y = np.zeros((model_number,param.classes))\r\npredict_label = np.zeros((model_number),dtype=\"int32\")\r\n\r\n#モデル読み込み\r\nmodel = model_from_json(open(\"cnn_\" + file_number + \".json\",\"r\").read())\r\n\r\n#重み読み込み\r\nmodel.load_weights(\"cnn_\" + file_number + \"_weight.h5\")\r\n\r\n#data_yをone-hot表現から戻す➡試験的に必要なだけ\r\n#\r\n#data_y = np.argmax(data_y,axis=1)\r\n#\r\n\r\ncnt = 0\r\nall_cnt = 0\r\n\r\n#読み込んだ学習済みモデルでtest\r\nfor label in range(test_number):\r\n #並列処理で精度が改善するか確認\r\n predict_y[0] = model.predict(x_test[label].reshape(1,len(f),len(t),param.ch_number))\r\n for j in range(model_number):\r\n if predict_y[j][0] > predict_y[j][1]:\r\n predict_label[j] = 0\r\n else:\r\n predict_label[j] = 1\r\n\r\n #多数決\r\n cnt_label_1 = np.count_nonzero(predict_label == 0)\r\n cnt_label_2 = np.count_nonzero(predict_label == 1)\r\n\r\n if cnt_label_1 > cnt_label_2:\r\n determined_label = 0\r\n else:\r\n determined_label = 1\r\n\r\n if determined_label == data_y[label]:\r\n cnt += 1\r\n all_cnt += 1\r\n\r\nscore = cnt / all_cnt\r\nprint(\"score:\",score)\r\n\r\n\r\n\r\n#%%\r\n#----予測ラベルをunityに送信\r\nimport socket\r\nimport random\r\nimport time\r\nfrom tqdm import tqdm\r\n\r\nHOST = '127.0.0.1'\r\nPORT = 50007\r\n\r\ntest_number = 10000\r\ntest_number = len(y_test)\r\nmodel_number = 1\r\nmodel = []\r\npredict_y = np.zeros((model_number,param.classes))\r\npredict_label = np.zeros((model_number),dtype=\"int32\")\r\n\r\n#data_yをone-hot表現から戻す➡試験的に必要なだけ\r\n#\r\n#data_y = np.argmax(data_y,axis=1)\r\n#\r\n\r\n#モデル読み込み\r\nmodel_1 = model_from_json(open(\"cnn_\"+str(1)+\".json\",\"r\").read())\r\nmodel_2 = model_from_json(open(\"cnn_\"+str(2)+\".json\",\"r\").read())\r\nmodel_3 = model_from_json(open(\"cnn_\"+str(3)+\".json\",\"r\").read())\r\nmodel_4 = model_from_json(open(\"cnn_\"+str(4)+\".json\",\"r\").read())\r\nmodel_5 = model_from_json(open(\"cnn_\"+str(5)+\".json\",\"r\").read())\r\nmodel_6 = model_from_json(open(\"cnn_\"+str(6)+\".json\",\"r\").read())\r\n\r\n#重み読み込み\r\nmodel_1.load_weights(\"cnn_\"+str(1)+\"_weight.h5\")\r\nmodel_2.load_weights(\"cnn_\"+str(2)+\"_weight.h5\")\r\nmodel_3.load_weights(\"cnn_\"+str(3)+\"_weight.h5\")\r\nmodel_4.load_weights(\"cnn_\"+str(4)+\"_weight.h5\")\r\nmodel_5.load_weights(\"cnn_\"+str(5)+\"_weight.h5\")\r\nmodel_6.load_weights(\"cnn_\"+str(6)+\"_weight.h5\")\r\n\r\ncnt = 0\r\nall_cnt = 0\r\n\r\n#読み込んだ学習済みモデルで予測\r\n#while True:\r\nfor i in tqdm(range(test_number)):\r\n #並列処理で精度が改善するか確認\r\n label = random.randrange(len(y_test))\r\n predict_y[0] = model_6.predict(x_test[label].reshape(1,len(f),len(t),param.ch_number))\r\n \"\"\"\r\n predict_y[0] = model_1.predict(x_test[label].reshape(1,len(f),len(t),ch_number))\r\n predict_y[1] = model_2.predict(x_test[label].reshape(1,len(f),len(t),ch_number))\r\n predict_y[2] = model_3.predict(x_test[label].reshape(1,len(f),len(t),ch_number))\r\n predict_y[3] = model_4.predict(x_test[label].reshape(1,len(f),len(t),ch_number))\r\n predict_y[4] = model_5.predict(x_test[label].reshape(1,len(f),len(t),ch_number))\r\n \"\"\"\r\n \"\"\"\r\n #print(predict_y)\r\n #print(predict_y.shape)\r\n for i in range(model_number):\r\n if predict_y[i][0] > predict_y[i][1]:\r\n predict_label[i] = 0\r\n else:\r\n predict_label[i] = 1\r\n\r\n cnt_label_1 = np.count_nonzero(predict_label == 0)\r\n cnt_label_2 = np.count_nonzero(predict_label == 1)\r\n\r\n if cnt_label_1 > cnt_label_2:\r\n determined_label = 0\r\n else:\r\n determined_label = 1\r\n\r\n if determined_label == data_y[label]:\r\n cnt += 1\r\n all_cnt += 1\r\n #if all_cnt == test_number:\r\n if i == test_number - 1:\r\n score = cnt / all_cnt\r\n print(\"score:\",score)\r\n break\r\n\"\"\"\r\n prediction_label = str(0)\r\n#\"\"\"\"\r\n #予測ラベルを送信\r\n client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n print(\"send_label:\",prediction_label)\r\n client.sendto(prediction_label.encode('utf-8'),(HOST,PORT))\r\n time.sleep(1.0)\r\n#\"\"\"\r\n\r\n\r\n\r\n#%%\r\nprint(cnt / all_cnt)\r\n#print(len(y_test))\r\n#print(y_test.shape)\r\n#%%\r\n","sub_path":"pCNN_stick_stft_fft_filter_module.py","file_name":"pCNN_stick_stft_fft_filter_module.py","file_ext":"py","file_size_in_byte":17129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"648333063","text":"# Copyright 2018 Google. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SSD (via ResNet50) model definition.\n\nDefines the SSD model and loss functions from this paper:\n\nhttps://arxiv.org/pdf/1708.02002\n\nUses the ResNet model as a basis.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom REDACTED.ssd import ssd_constants\nfrom REDACTED.util import image_util\n\n_NMS_TILE_SIZE = 256\n\nFLAGS = tf.flags.FLAGS\n\n\ndef _bbox_overlap(boxes, gt_boxes):\n \"\"\"Calculates the overlap between proposal and ground truth boxes.\n\n Some `gt_boxes` may have been padded. The returned `iou` tensor for these\n boxes will be -1.\n\n Args:\n boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of\n proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The\n last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.\n gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This\n tensor might have paddings with a negative value.\n\n Returns:\n iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].\n \"\"\"\n with tf.name_scope('bbox_overlap'):\n bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(\n value=boxes, num_or_size_splits=4, axis=2)\n gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(\n value=gt_boxes, num_or_size_splits=4, axis=2)\n\n # Calculates the intersection area.\n i_xmin = tf.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))\n i_xmax = tf.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))\n i_ymin = tf.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))\n i_ymax = tf.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))\n i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)\n\n # Calculates the union area.\n bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)\n gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)\n # Adds a small epsilon to avoid divide-by-zero.\n u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8\n\n # Calculates IoU.\n iou = i_area / u_area\n\n return iou\n\n\ndef _self_suppression(iou, _, iou_sum):\n batch_size = tf.shape(iou)[0]\n can_suppress_others = tf.cast(\n tf.reshape(tf.reduce_max(iou, 1) <= 0.5, [batch_size, -1, 1]), iou.dtype)\n iou_suppressed = tf.reshape(\n tf.cast(tf.reduce_max(can_suppress_others * iou, 1) <= 0.5, iou.dtype),\n [batch_size, -1, 1]) * iou\n iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])\n return [\n iou_suppressed,\n tf.reduce_any(iou_sum - iou_sum_new > 0.5), iou_sum_new\n ]\n\n\ndef _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):\n batch_size = tf.shape(boxes)[0]\n new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0],\n [batch_size, _NMS_TILE_SIZE, 4])\n iou = _bbox_overlap(new_slice, box_slice)\n ret_slice = tf.expand_dims(\n tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype),\n 2) * box_slice\n return boxes, ret_slice, iou_threshold, inner_idx + 1\n\n\ndef _suppression_loop_body(boxes, iou_threshold, output_size, idx):\n \"\"\"Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE).\n\n Args:\n boxes: a tensor with a shape of [batch_size, anchors, 4].\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n output_size: an int32 tensor of size [batch_size]. Representing the number\n of selected boxes for each batch.\n idx: an integer scalar representing induction variable.\n\n Returns:\n boxes: updated boxes.\n iou_threshold: pass down iou_threshold to the next iteration.\n output_size: the updated output_size.\n idx: the updated induction variable.\n \"\"\"\n num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE\n batch_size = tf.shape(boxes)[0]\n\n # Iterates over tiles that can possibly suppress the current tile.\n box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0],\n [batch_size, _NMS_TILE_SIZE, 4])\n _, box_slice, _, _ = tf.while_loop(\n lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,\n _cross_suppression, [boxes, box_slice, iou_threshold,\n tf.constant(0)])\n\n # Iterates over the current tile to compute self-suppression.\n iou = _bbox_overlap(box_slice, box_slice)\n mask = tf.expand_dims(\n tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape(\n tf.range(_NMS_TILE_SIZE), [-1, 1]), 0)\n iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)\n suppressed_iou, _, _ = tf.while_loop(\n lambda _iou, loop_condition, _iou_sum: loop_condition, _self_suppression,\n [iou, tf.constant(True),\n tf.reduce_sum(iou, [1, 2])])\n suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0\n box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)\n\n # Uses box_slice to update the input boxes.\n mask = tf.reshape(\n tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])\n boxes = tf.tile(tf.expand_dims(\n box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(\n boxes, [batch_size, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask)\n boxes = tf.reshape(boxes, [batch_size, -1, 4])\n\n # Updates output_size.\n output_size += tf.reduce_sum(\n tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])\n return boxes, iou_threshold, output_size, idx + 1\n\n\ndef non_max_suppression_padded(scores, boxes, max_output_size, iou_threshold):\n \"\"\"A wrapper that handles non-maximum suppression.\n\n Assumption:\n * The boxes are sorted by scores unless the box is a dot (all coordinates\n are zero).\n * Boxes with higher scores can be used to suppress boxes with lower scores.\n\n The overal design of the algorithm is to handle boxes tile-by-tile:\n\n boxes = boxes.pad_to_multiply_of(tile_size)\n num_tiles = len(boxes) // tile_size\n output_boxes = []\n for i in range(num_tiles):\n box_tile = boxes[i*tile_size : (i+1)*tile_size]\n for j in range(i - 1):\n suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]\n iou = _bbox_overlap(box_tile, suppressing_tile)\n # if the box is suppressed in iou, clear it to a dot\n box_tile *= _update_boxes(iou)\n # Iteratively handle the diagnal tile.\n iou = _box_overlap(box_tile, box_tile)\n iou_changed = True\n while iou_changed:\n # boxes that are not suppressed by anything else\n suppressing_boxes = _get_suppressing_boxes(iou)\n # boxes that are suppressed by suppressing_boxes\n suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)\n # clear iou to 0 for boxes that are suppressed, as they cannot be used\n # to suppress other boxes any more\n new_iou = _clear_iou(iou, suppressed_boxes)\n iou_changed = (new_iou != iou)\n iou = new_iou\n # remaining boxes that can still suppress others, are selected boxes.\n output_boxes.append(_get_suppressing_boxes(iou))\n if len(output_boxes) >= max_output_size:\n break\n\n Args:\n scores: a tensor with a shape of [batch_size, anchors].\n boxes: a tensor with a shape of [batch_size, anchors, 4].\n max_output_size: a scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non max suppression.\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n\n Returns:\n nms_scores: a tensor with a shape of [batch_size, anchors]. It has same\n dtype as input scores.\n nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has\n same dtype as input boxes.\n \"\"\"\n # TODO(wangtao): Filter out score <= ssd_constants.MIN_SCORE.\n with tf.name_scope('nms'):\n batch_size = tf.shape(boxes)[0]\n num_boxes = tf.shape(boxes)[1]\n pad = tf.cast(\n tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE),\n tf.int32) * _NMS_TILE_SIZE - num_boxes\n boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]])\n scores = tf.pad(tf.cast(scores, tf.float32), [[0, 0], [0, pad]])\n num_boxes += pad\n\n def _loop_cond(unused_boxes, unused_threshold, output_size, idx):\n return tf.logical_and(\n tf.reduce_min(output_size) < max_output_size,\n idx < num_boxes // _NMS_TILE_SIZE)\n\n selected_boxes, _, output_size, _ = tf.while_loop(\n _loop_cond, _suppression_loop_body, [\n boxes, iou_threshold,\n tf.zeros([batch_size], tf.int32),\n tf.constant(0)\n ])\n idx = num_boxes - tf.cast(\n tf.nn.top_k(\n tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *\n tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],\n tf.int32)\n idx = tf.minimum(idx, num_boxes - 1)\n idx = tf.reshape(\n idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])\n boxes = tf.reshape(\n tf.gather(tf.reshape(boxes, [-1, 4]), idx),\n [batch_size, max_output_size, 4])\n boxes = boxes * tf.cast(\n tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(\n output_size, [-1, 1, 1]), boxes.dtype)\n scores = tf.reshape(\n tf.gather(tf.reshape(scores, [-1, 1]), idx),\n [batch_size, max_output_size])\n scores = scores * tf.cast(\n tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(\n output_size, [-1, 1]), scores.dtype)\n return scores, boxes\n\n\ndef batch_norm_relu(inputs,\n is_training_bn,\n params,\n relu=True,\n init_zero=False,\n data_format='channels_last',\n name=None):\n \"\"\"Performs a batch normalization followed by a ReLU.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training_bn: `bool` for whether the model is training.\n params: params of the model, a dict including `distributed_group_size`\n and `num_shards`.\n relu: `bool` if False, omits the ReLU operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n name: the name of the batch normalization layer\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_first':\n axis = 1\n else:\n axis = 3\n\n if params['distributed_group_size'] > 0:\n if params['tpu_slice_row'] > 0 and params['tpu_slice_col'] > 0:\n physical_shape = (params['tpu_slice_row'], params['tpu_slice_col'])\n else:\n physical_shape = None\n\n if params['dbn_tile_row'] > 0 and params['dbn_tile_col'] > 0:\n tile_shape = (params['dbn_tile_row'], params['dbn_tile_col'])\n else:\n tile_shape = None\n\n input_partition_dims = FLAGS.input_partition_dims\n inputs = image_util.distributed_batch_norm(\n inputs=inputs,\n decay=ssd_constants.BATCH_NORM_DECAY,\n epsilon=ssd_constants.BATCH_NORM_EPSILON,\n is_training=is_training_bn,\n gamma_initializer=gamma_initializer,\n num_shards=params['num_shards'],\n distributed_group_size=params['distributed_group_size'],\n physical_shape=physical_shape,\n tile_shape=tile_shape,\n input_partition_dims=input_partition_dims)\n else:\n inputs = tf.layers.batch_normalization(\n inputs=inputs,\n axis=axis,\n momentum=ssd_constants.BATCH_NORM_DECAY,\n epsilon=ssd_constants.BATCH_NORM_EPSILON,\n center=True,\n scale=True,\n training=is_training_bn,\n fused=True,\n gamma_initializer=gamma_initializer,\n name=name)\n\n if relu:\n inputs = tf.nn.relu(inputs)\n return inputs\n\n\ndef fixed_padding(inputs, kernel_size, data_format='channels_last'):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]` or\n `[batch, height, width, channels]` depending on `data_format`.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs\n\n\ndef space_to_depth_fixed_padding(inputs,\n kernel_size,\n data_format='channels_last',\n block_size=2):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]` or `[batch,\n height, width, channels]` depending on `data_format`.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n block_size: `int` block size for space-to-depth convolution.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = (pad_total // 2 + 1) // block_size\n pad_end = (pad_total // 2) // block_size\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs\n\n\ndef conv2d_fixed_padding(inputs,\n filters,\n kernel_size,\n strides,\n data_format='channels_last'):\n \"\"\"Strided 2-D convolution with explicit padding.\n\n The padding is consistent and is based only on `kernel_size`, not on the\n dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` size of the kernel to be used in the convolution.\n strides: `int` strides of the convolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A `Tensor` of shape `[batch, filters, height_out, width_out]`.\n \"\"\"\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format=data_format)\n\n return tf.compat.v1.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n\n\ndef residual_block(inputs,\n filters,\n is_training_bn,\n strides,\n params,\n use_projection=False,\n data_format='channels_last'):\n \"\"\"Standard building block for residual networks with BN after convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training_bn: `bool` for whether the model is in training.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n params: params of the model, a dict.\n use_projection: `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n shortcut = inputs\n if use_projection:\n # Projection shortcut in first layer to match filters and strides\n shortcut = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=1,\n strides=strides,\n data_format=data_format)\n shortcut = batch_norm_relu(\n shortcut, is_training_bn, params, relu=False, data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format)\n inputs = batch_norm_relu(\n inputs, is_training_bn, params, data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=1,\n data_format=data_format)\n inputs = batch_norm_relu(\n inputs,\n is_training_bn,\n params,\n relu=False,\n init_zero=True,\n data_format=data_format)\n\n return tf.nn.relu(inputs + shortcut)\n\n\ndef block_group(inputs,\n filters,\n block_fn,\n blocks,\n strides,\n is_training_bn,\n name,\n params,\n data_format='channels_last',\n use_projection=True):\n \"\"\"Creates one group of blocks for the ResNet model.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first convolution of the layer.\n block_fn: `function` for the block to use within the model\n blocks: `int` number of blocks contained in the layer.\n strides: `int` stride to use for the first convolution of the layer. If\n greater than 1, this layer will downsample the input.\n is_training_bn: `bool` for whether the model is training.\n name: `str`name for the Tensor output of the block layer.\n params: params of the model, a dict.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_projection: `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(\n inputs,\n filters,\n is_training_bn,\n strides,\n params,\n use_projection=use_projection,\n data_format=data_format)\n\n for _ in range(1, blocks):\n inputs = block_fn(\n inputs, filters, is_training_bn, 1, params, data_format=data_format)\n\n return tf.identity(inputs, name)\n\n\ndef conv0_space_to_depth(inputs, data_format='channels_last'):\n \"\"\"Strided 2-D convolution with explicit padding.\n\n The padding is consistent and is based only on `kernel_size`, not on the\n dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n\n Args:\n inputs: `Tensor` of size `[batch, height_in, width_in, channels]`.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A `Tensor` with the same type as `inputs`.\n \"\"\"\n # Create the conv0 kernel w.r.t. the original image size. (no space-to-depth).\n filters = 64\n kernel_size = 7\n space_to_depth_block_size = ssd_constants.SPACE_TO_DEPTH_BLOCK_SIZE\n strides = 2\n conv0 = tf.compat.v1.layers.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=2,\n padding=('SAME' if strides == 1 else 'VALID'),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n # Use the image size without space-to-depth transform as the input of conv0.\n batch_size, h, w, channel = inputs.get_shape().as_list()\n conv0.build([\n batch_size, h * space_to_depth_block_size, w * space_to_depth_block_size,\n channel // (space_to_depth_block_size**2)\n ])\n\n kernel = conv0.weights[0]\n # [7, 7, 3, 64] --> [8, 8, 3, 64]\n kernel = tf.pad(\n kernel,\n paddings=tf.constant([[1, 0], [1, 0], [0, 0], [0, 0]]),\n mode='CONSTANT',\n constant_values=0.)\n # Transform kernel follows the space-to-depth logic: http://shortn/_9YvHW96xPJ\n kernel = tf.reshape(\n kernel,\n [4, space_to_depth_block_size, 4, space_to_depth_block_size, 3, filters])\n kernel = tf.transpose(kernel, [0, 2, 1, 3, 4, 5])\n kernel = tf.reshape(kernel, [4, 4, int(channel), filters])\n kernel = tf.cast(kernel, inputs.dtype)\n\n inputs = space_to_depth_fixed_padding(inputs, kernel_size, data_format,\n space_to_depth_block_size)\n\n return tf.nn.conv2d(\n input=inputs,\n filter=kernel,\n strides=[1, 1, 1, 1],\n padding='VALID',\n data_format='NHWC' if data_format == 'channels_last' else 'NCHW',\n name='conv2d/Conv2D')\n\n\ndef resnet_v1_generator(block_fn, layers, params, data_format='channels_last'):\n \"\"\"Generator of ResNet v1 model with classification layers removed.\n\n Our actual ResNet network. We return the output of c2, c3,c4,c5\n N.B. batch norm is always run with trained parameters, as we use very small\n batches when training the object layers.\n\n Args:\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layers: list of 4 `int`s denoting the number of blocks to include in each\n of the 4 block groups. Each group consists of blocks that take inputs of\n the same resolution.\n params: params of the model, a dict.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n Model `function` that takes in `inputs` and `is_training` and returns the\n output `Tensor` of the ResNet model.\n \"\"\"\n def model(inputs, is_training_bn=False):\n \"\"\"Creation of the model graph.\"\"\"\n if params['conv0_space_to_depth']:\n # conv0 uses space-to-depth transform for TPU performance.\n inputs = conv0_space_to_depth(inputs=inputs, data_format=data_format)\n else:\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=64,\n kernel_size=7,\n strides=2,\n data_format=data_format)\n inputs = tf.identity(inputs, 'initial_conv')\n inputs = batch_norm_relu(\n inputs, is_training_bn, params, data_format=data_format)\n\n inputs = tf.compat.v1.layers.max_pooling2d(\n inputs=inputs,\n pool_size=3,\n strides=2,\n padding='SAME',\n data_format=data_format)\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n c2 = block_group(\n inputs=inputs,\n filters=64,\n blocks=layers[0],\n strides=1,\n block_fn=block_fn,\n is_training_bn=is_training_bn,\n params=params,\n name='block_group1',\n data_format=data_format,\n use_projection=False)\n c3 = block_group(\n inputs=c2,\n filters=128,\n blocks=layers[1],\n strides=2,\n block_fn=block_fn,\n is_training_bn=is_training_bn,\n params=params,\n name='block_group2',\n data_format=data_format)\n c4 = block_group(\n inputs=c3,\n filters=256,\n blocks=layers[2],\n strides=1,\n block_fn=block_fn,\n is_training_bn=is_training_bn,\n params=params,\n name='block_group3',\n data_format=data_format)\n return c2, c3, c4\n\n return model\n\n\ndef resnet_v1(resnet_depth, params, data_format='channels_last'):\n \"\"\"Returns the ResNet model for a given size and number of output classes.\"\"\"\n model_params = {\n 34: {'block': residual_block, 'layers': [3, 4, 6, 3]}\n }\n\n if resnet_depth not in model_params:\n raise ValueError('Not a valid resnet_depth:', resnet_depth)\n\n resnet_params = model_params[resnet_depth]\n return resnet_v1_generator(resnet_params['block'], resnet_params['layers'],\n params, data_format)\n\n\ndef class_net(images, level, num_classes):\n \"\"\"Class prediction network for SSD.\"\"\"\n return tf.layers.conv2d(\n images,\n num_classes * ssd_constants.NUM_DEFAULTS_BY_LEVEL[level],\n kernel_size=(min(images.shape[1], 3), min(images.shape[2], 3)),\n padding='same',\n activation=None,\n name='class-%d' % (level),\n )\n\n\ndef box_net(images, level):\n \"\"\"Box regression network for SSD.\"\"\"\n return tf.layers.conv2d(\n images,\n 4 * ssd_constants.NUM_DEFAULTS_BY_LEVEL[level],\n kernel_size=(min(images.shape[1], 3), min(images.shape[2], 3)),\n padding='same',\n activation=None,\n name='box-%d' % (level),\n )\n\n\ndef ssd(features, params, is_training_bn=False):\n \"\"\"SSD classification and regression model.\"\"\"\n # upward layers\n with tf.variable_scope(\n 'resnet%s' % ssd_constants.RESNET_DEPTH, reuse=tf.AUTO_REUSE):\n resnet_fn = resnet_v1(ssd_constants.RESNET_DEPTH, params)\n _, _, u4 = resnet_fn(features, is_training_bn)\n\n with tf.variable_scope('ssd', reuse=tf.AUTO_REUSE):\n feats = {}\n # output channels for mlperf logging.\n out_channels = [256]\n feats[3] = u4\n feats[4] = tf.layers.conv2d(\n feats[3],\n filters=256,\n kernel_size=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n name='block7-conv1x1')\n feats[4] = tf.layers.conv2d(\n feats[4],\n filters=512,\n strides=(2, 2),\n kernel_size=(3, 3),\n padding='same',\n activation=tf.nn.relu,\n name='block7-conv3x3')\n out_channels.append(512)\n feats[5] = tf.layers.conv2d(\n feats[4],\n filters=256,\n kernel_size=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n name='block8-conv1x1')\n feats[5] = tf.layers.conv2d(\n feats[5],\n filters=512,\n strides=(2, 2),\n kernel_size=(3, 3),\n padding='same',\n activation=tf.nn.relu,\n name='block8-conv3x3')\n out_channels.append(512)\n feats[6] = tf.layers.conv2d(\n feats[5],\n filters=128,\n kernel_size=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n name='block9-conv1x1')\n feats[6] = tf.layers.conv2d(\n feats[6],\n filters=256,\n strides=(2, 2),\n kernel_size=(3, 3),\n padding='same',\n activation=tf.nn.relu,\n name='block9-conv3x3')\n out_channels.append(256)\n feats[7] = tf.layers.conv2d(\n feats[6],\n filters=128,\n kernel_size=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n name='block10-conv1x1')\n feats[7] = tf.layers.conv2d(\n feats[7],\n filters=256,\n kernel_size=(3, 3),\n padding='valid',\n activation=tf.nn.relu,\n name='block10-conv3x3')\n out_channels.append(256)\n feats[8] = tf.layers.conv2d(\n feats[7],\n filters=128,\n kernel_size=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n name='block11-conv1x1')\n feats[8] = tf.layers.conv2d(\n feats[8],\n filters=256,\n kernel_size=(3, 3),\n padding='valid',\n activation=tf.nn.relu,\n name='block11-conv3x3')\n out_channels.append(256)\n\n class_outputs = {}\n box_outputs = {}\n min_level = ssd_constants.MIN_LEVEL\n max_level = ssd_constants.MAX_LEVEL\n num_classes = ssd_constants.NUM_CLASSES\n\n with tf.variable_scope('class_net', reuse=tf.AUTO_REUSE):\n for level in range(min_level, max_level + 1):\n class_outputs[level] = class_net(\n feats[level], level, num_classes)\n\n with tf.variable_scope('box_net', reuse=tf.AUTO_REUSE):\n for level in range(min_level, max_level + 1):\n box_outputs[level] = box_net(\n feats[level], level)\n\n return class_outputs, box_outputs\n","sub_path":"Google/benchmarks/ssd/implementations/ssd-research-TF-tpu-v4-512/ssd_architecture.py","file_name":"ssd_architecture.py","file_ext":"py","file_size_in_byte":29520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"565120865","text":"import json\r\nfrom datetime import date, datetime, time, timedelta\r\nfrom typing import List\r\n\r\nimport pytz\r\nfrom django.core.validators import MaxValueValidator\r\nfrom django.db import models\r\nfrom django.utils.timezone import is_naive, make_aware\r\n\r\nfrom config.constants import ScheduleTypes\r\nfrom database.common_models import TimestampedModel\r\nfrom database.survey_models import Survey, SurveyArchive\r\n\r\n\r\nclass AbsoluteSchedule(TimestampedModel):\r\n survey = models.ForeignKey('Survey', on_delete=models.CASCADE, related_name='absolute_schedules')\r\n scheduled_date = models.DateTimeField()\r\n\r\n @staticmethod\r\n def create_absolute_schedules(timings: List[List[int]], survey: Survey) -> bool:\r\n \"\"\" Creates new AbsoluteSchedule objects from a frontend-style list of dates and times\"\"\"\r\n if not timings:\r\n return False\r\n\r\n survey.absolute_schedules.all().delete()\r\n duplicated = False\r\n for year, month, day, num_seconds in timings:\r\n hour = num_seconds // 3600\r\n minute = num_seconds % 3600 // 60\r\n schedule_date = datetime(year=year, month=month, day=day, hour=hour, minute=minute)\r\n # using get_or_create to catch duplicate schedules\r\n _, created = AbsoluteSchedule.objects.get_or_create(survey=survey, scheduled_date=schedule_date)\r\n if not created:\r\n duplicated = True\r\n\r\n return duplicated\r\n\r\n\r\nclass RelativeSchedule(TimestampedModel):\r\n survey = models.ForeignKey('Survey', on_delete=models.CASCADE, related_name='relative_schedules')\r\n intervention = models.ForeignKey('Intervention', on_delete=models.CASCADE, related_name='relative_schedules', null=True)\r\n days_after = models.IntegerField(default=0)\r\n hour = models.PositiveIntegerField(validators=[MaxValueValidator(23)])\r\n minute = models.PositiveIntegerField(validators=[MaxValueValidator(59)])\r\n\r\n def scheduled_time(self, intervention_date: date, tz=None) -> datetime:\r\n # forces the timezone, you can pass in a timezone to reduce overhead.\r\n return make_aware(\r\n datetime.combine(intervention_date, time(self.hour, self.minute)),\r\n tz or self.survey.study.timezone\r\n )\r\n\r\n @staticmethod\r\n def create_relative_schedules(timings: List[List[int]], survey: Survey) -> bool:\r\n \"\"\"\r\n Creates new RelativeSchedule objects from a frontend-style list of interventions and times\r\n \"\"\"\r\n if not timings:\r\n return False\r\n\r\n survey.relative_schedules.all().delete()\r\n duplicated = False\r\n # should be all ints\r\n for intervention_id, days_after, num_seconds in timings:\r\n hour = num_seconds // 3600\r\n minute = num_seconds % 3600 // 60\r\n # using get_or_create to catch duplicate schedules\r\n _, created = RelativeSchedule.objects.get_or_create(\r\n survey=survey,\r\n intervention=Intervention.objects.get(id=intervention_id),\r\n days_after=days_after,\r\n hour=hour,\r\n minute=minute,\r\n )\r\n if not created:\r\n duplicated = True\r\n return duplicated\r\n\r\n\r\nclass WeeklySchedule(TimestampedModel):\r\n \"\"\" Represents an instance of a time of day within a week for the weekly survey schedule.\r\n day_of_week is an integer, day 0 is Sunday.\r\n\r\n The timings schema mimics the Java.util.Calendar.DayOfWeek specification: it is zero-indexed\r\n with day 0 as Sunday.\"\"\"\r\n\r\n survey = models.ForeignKey('Survey', on_delete=models.CASCADE, related_name='weekly_schedules')\r\n day_of_week = models.PositiveIntegerField(validators=[MaxValueValidator(6)])\r\n hour = models.PositiveIntegerField(validators=[MaxValueValidator(23)])\r\n minute = models.PositiveIntegerField(validators=[MaxValueValidator(59)])\r\n\r\n @staticmethod\r\n def create_weekly_schedules(timings: List[List[int]], survey: Survey) -> bool:\r\n \"\"\" Creates new WeeklySchedule objects from a frontend-style list of seconds into the day. \"\"\"\r\n if not timings:\r\n return False\r\n \r\n assert len(timings) == 7\r\n survey.weekly_schedules.all().delete()\r\n duplicated = False\r\n for day in range(7):\r\n for seconds in timings[day]:\r\n hour = seconds // 3600\r\n minute = seconds % 3600 // 60\r\n # using get_or_create to catch duplicate schedules\r\n _, created = WeeklySchedule.objects.get_or_create(\r\n survey=survey, day_of_week=day, hour=hour, minute=minute\r\n )\r\n if not created:\r\n duplicated = True\r\n\r\n return duplicated\r\n\r\n @classmethod\r\n def export_survey_timings(cls, survey: Survey) -> List[List[int]]:\r\n \"\"\"Returns a json formatted list of weekly timings for use on the frontend\"\"\"\r\n # this sort order results in nicely ordered output.\r\n fields_ordered = (\"hour\", \"minute\", \"day_of_week\")\r\n timings = [[], [], [], [], [], [], []]\r\n schedule_components = WeeklySchedule.objects.\\\r\n filter(survey=survey).order_by(*fields_ordered).values_list(*fields_ordered)\r\n\r\n # get, calculate, append, dump.\r\n for hour, minute, day in schedule_components:\r\n timings[day].append((hour * 60 * 60) + (minute * 60))\r\n return timings\r\n\r\n def get_prior_and_next_event_times(self, now: datetime=None) -> (datetime, datetime):\r\n \"\"\" Identify the start of the week relative to the current time, use that to determine this\r\n week's (past or present) push notification event time, and the same event for next week.\r\n If now is passed in it must have a UTC timezone. \"\"\"\r\n\r\n if now is None:\r\n # handle case of utc date not matching date of local time.\r\n today = make_aware(datetime.utcnow(), timezone=pytz.utc).date()\r\n elif isinstance(now, datetime) and not is_naive(now) and now.tzinfo.zone == \"UTC\":\r\n # now must be a datetime with a timezone of UTC\r\n today = now.date()\r\n else:\r\n raise TypeError(f\"Datetime must be UTC and timezone aware, received {str(now)}\")\r\n\r\n # today.weekday defines Monday=0, in our schema Sunday=0 so we add 1\r\n start_of_this_week = today - timedelta(days=((today.weekday()+1) % 7))\r\n\r\n event_this_week = make_aware(\r\n datetime(\r\n year=start_of_this_week.year,\r\n month=start_of_this_week.month,\r\n day=start_of_this_week.day,\r\n ) +\r\n timedelta(\r\n days=self.day_of_week,\r\n hours=self.hour,\r\n minutes=self.minute,\r\n ),\r\n timezone=pytz.utc,\r\n )\r\n event_next_week = event_this_week + timedelta(days=7)\r\n return event_this_week, event_next_week\r\n\r\n\r\nclass ScheduledEvent(TimestampedModel):\r\n survey = models.ForeignKey('Survey', on_delete=models.CASCADE, related_name='scheduled_events')\r\n participant = models.ForeignKey('Participant', on_delete=models.PROTECT, related_name='scheduled_events')\r\n weekly_schedule = models.ForeignKey('WeeklySchedule', on_delete=models.CASCADE, related_name='scheduled_events', null=True, blank=True)\r\n relative_schedule = models.ForeignKey('RelativeSchedule', on_delete=models.CASCADE, related_name='scheduled_events', null=True, blank=True)\r\n absolute_schedule = models.ForeignKey('AbsoluteSchedule', on_delete=models.CASCADE, related_name='scheduled_events', null=True, blank=True)\r\n scheduled_time = models.DateTimeField()\r\n\r\n # due to import complexity right here this is the best place to stick this\r\n SCHEDULE_CLASS_LOOKUP = {\r\n ScheduleTypes.absolute: AbsoluteSchedule,\r\n ScheduleTypes.relative: RelativeSchedule,\r\n ScheduleTypes.weekly: WeeklySchedule,\r\n AbsoluteSchedule: ScheduleTypes.absolute,\r\n RelativeSchedule: ScheduleTypes.relative,\r\n WeeklySchedule: ScheduleTypes.weekly,\r\n }\r\n\r\n class Meta:\r\n unique_together = ('survey', 'participant', 'scheduled_time',)\r\n\r\n def get_schedule_type(self):\r\n return self.SCHEDULE_CLASS_LOOKUP[self.get_schedule().__class__]\r\n\r\n def get_schedule(self):\r\n number_schedules = sum((\r\n self.weekly_schedule is not None, self.relative_schedule is not None,\r\n self.absolute_schedule is not None\r\n ))\r\n\r\n if number_schedules > 1:\r\n raise Exception(f\"ScheduledEvent had {number_schedules} associated schedules.\")\r\n\r\n if self.weekly_schedule:\r\n return self.weekly_schedule\r\n elif self.relative_schedule:\r\n return self.relative_schedule\r\n elif self.absolute_schedule:\r\n return self.absolute_schedule\r\n else:\r\n raise Exception(\"ScheduledEvent had no associated schedule\")\r\n\r\n def archive(self):\r\n # for stupid reasons involving the legacy mechanism for creating a survey archive we need\r\n # to handle the case where the object does not exist.\r\n try:\r\n survey_archive = self.survey.most_recent_archive()\r\n except SurveyArchive.DoesNotExist:\r\n self.survey.archive() # force create a survey archive\r\n survey_archive = self.survey.most_recent_archive()\r\n\r\n ArchivedEvent.objects.create(\r\n survey_archive=survey_archive,\r\n participant=self.participant,\r\n schedule_type=self.get_schedule_type(),\r\n scheduled_time=self.scheduled_time,\r\n )\r\n self.delete()\r\n\r\n# TODO there is no code that updates the response_time field. That should be rolled into the\r\n# check-for-downloads as an optional parameter passed in. If it doesn't get hit then there is\r\n# no guarantee that the app checked in.\r\nclass ArchivedEvent(TimestampedModel):\r\n survey_archive = models.ForeignKey('SurveyArchive', on_delete=models.PROTECT, related_name='archived_events', db_index=True)\r\n participant = models.ForeignKey('Participant', on_delete=models.PROTECT, related_name='archived_events', db_index=True)\r\n schedule_type = models.CharField(max_length=32, db_index=True)\r\n scheduled_time = models.DateTimeField(db_index=True)\r\n response_time = models.DateTimeField(null=True, blank=True, db_index=True)\r\n\r\n\r\nclass Intervention(models.Model):\r\n name = models.TextField()\r\n study = models.ForeignKey('Study', on_delete=models.PROTECT, related_name='interventions')\r\n\r\n\r\nclass InterventionDate(models.Model):\r\n date = models.DateField(null=True)\r\n participant = models.ForeignKey('Participant', on_delete=models.CASCADE, related_name='intervention_dates')\r\n intervention = models.ForeignKey('Intervention', on_delete=models.CASCADE, related_name='intervention_dates')\r\n\r\n class Meta:\r\n unique_together = ('participant', 'intervention',)\r\n","sub_path":"database/schedule_models.py","file_name":"schedule_models.py","file_ext":"py","file_size_in_byte":11035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"466825164","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport xml.etree.ElementTree as ET \nimport requests, sys, math, datetime, settings, os\nimport chardet\n\nclass Tableau(object):\n def __init__(self, args):\n self.args = args\n self.auth = {\n 'token': '',\n 'site_id': '',\n 'my_user_id': ''\n }\n self.api_version = \"3.4\"\n self.xmlns = {\"t\": 'http://tableau.com/api'}\n\n def sign_in(self):\n url = self.args.server + \"/api/\" + self.api_version + \"/auth/signin\"\n xml_payload_for_request = ET.Element('tsRequest')\n credentials_element = ET.SubElement(xml_payload_for_request, 'credentials', name=self.args.username, password=self.args.password)\n site_element = ET.SubElement(credentials_element, 'site', contentUrl=self.args.site)\n xml_payload_for_request = ET.tostring(xml_payload_for_request)\n\n server_response = requests.post(url, data=xml_payload_for_request)\n if server_response.status_code != 200:\n print(server_response.text)\n sys.exit(1)\n xml_response = ET.fromstring(self._encode_for_display(server_response.text))\n \n self.auth['token'] = xml_response.find('t:credentials', namespaces=self.xmlns).attrib.get('token')\n self.auth['site_id'] = xml_response.find('.//t:site', namespaces=self.xmlns).attrib.get('id')\n self.auth['my_user_id'] = xml_response.find('.//t:user', namespaces=self.xmlns).attrib.get('id')\n \n def _encode_for_display(self, text):\n return text.encode('ascii', errors=\"ignore\").decode('utf-8')\n\n def query(self):\n pageNum, pageSize = 1, 1000\n url = \"\"\n\n if (self.args.type in [\"workbook\"]):\n url = self.args.server + \"/api/\" + self.api_version + \"/sites/{0}/users/{1}/workbooks\".format(self.auth['site_id'], self.auth['my_user_id'])\n if (self.args.type == \"datasource\"):\n url = self.args.server + \"/api/\" + self.api_version + \"/sites/{0}/datasources\".format(self.auth['site_id'])\n \n paged_url = url + \"?pageSize={0}&pageNumber={1}\".format(pageSize, pageNum)\n server_response = requests.get(paged_url, headers={\"x-tableau-auth\": self.auth['token']})\n if server_response.status_code != 200:\n print(self._encode_for_display(server_response.text.encode))\n sys.exit(1)\n xml_response = ET.fromstring(self._encode_for_display(server_response.text))\n total_count_of_workbooks = int(xml_response.find('t:pagination', namespaces=self.xmlns).attrib.get('totalAvailable'))\n if total_count_of_workbooks > pageSize:\n response = []\n response.extend(xml_response.findall(settings.options[self.args.type], namespaces=self.xmlns))\n number_of_pages = int(math.ceil(total_count_of_workbooks / pageSize))\n for page in range(2, number_of_pages + 1):\n paged_url = url + \"?pageSize={}&pageNumber={}\".format(pageSize, page)\n server_response = requests.get(paged_url, headers={\"x-tableau-auth\": self.auth['token']})\n if server_response.status_code != 200:\n print(self._encode_for_display(server_response.text))\n sys.exit(1)\n workbooks_from_page = ET.fromstring(self._encode_for_display(server_response.text)).findall(settings.options[self.args.type], namespaces=self.xmlns)\n response.extend(workbooks_from_page)\n else:\n response = xml_response.findall(settings.options[self.args.type], namespaces=self.xmlns)\n\n for r in response:\n if r.get('name') == self.args.name.decode('iso-8859-1').encode('ascii', errors=\"ignore\"):\n return r\n\n def download(self): \n downloadlist = []\n starttimestamp = datetime.datetime.now()\n\n self.sign_in()\n element = self.query()\n \n if (element == None):\n return None\n\n downloadurl = self.args.server + \"/api/\" + self.api_version + \"/sites/{0}/{1}/\".format(self.auth['site_id'], settings.name[self.args.type]) +\"\".join(element.get('id')) + \"/content\"\n downloadlist.append(downloadurl)\n for downloadurl in downloadlist:\n r = requests.get(downloadurl, headers={\"x-tableau-auth\": self.auth['token']})\n r.stream = True\n with open (os.path.join(self.args.output, starttimestamp.strftime(\"%m_%d_%Y_%H_%M_%S\") + '_output' + settings.extension[self.args.type]), \"wb\") as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)","sub_path":"tableau.py","file_name":"tableau.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"65051866","text":"import db\nimport aquariumChem\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import date2num\nfrom datetime import datetime\n\n\n\n\ndef get_CO2(database='aqua.db',table='parameters'):\n con = db.connect(database)\n lst = db.get_column(['time','KH','pH'],con,table)\n lst = [ {'time':item[0], 'KH':float(item[1]), 'pH':float(item[2])} for item in lst ]\n lst = sorted(lst,key=lambda k: k['time'])\n CO2 = []\n for item in lst:\n kh = item['KH']\n ph = item['pH']\n CO2.append(aquariumChem.Water.dissolved_CO2(kh,ph))\n return lst, CO2\n \ndef parsetime(time):\n \"\"\"format will be xxxx-xx-xx xx:xx:xx\"\"\"\n\n year = int(time[0:4])\n mon = int(time[5:7])\n day = int(time[8:10])\n\n hr = int(time[11:13])\n mn = int(time[14:16])\n\n print(year,mon,day,hr,mn)\n\n return datetime(year,mon,day,hr,mn)\n\n\nif __name__=='__main__':\n data, CO2=get_CO2()\n dates = [date2num( parsetime(item['time']) ) for item in data]\n print(dates)\n plt.plot_date(dates,CO2)\n plt.show()\n \n \n \n","sub_path":"CO2_concentration.py","file_name":"CO2_concentration.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"331410220","text":"import os\n\n#base config\n#BASE_PATH = '/'.join(os.getcwd().split('/')[:-1]) # Using ubuntu machine may require removing this -1\nBASE_PATH = '/'.join(os.getcwd().split('/')) # Using ubuntu machine may require removing this -1\nface_describer_input_tensor_names = ['img_inputs:0', 'dropout_rate:0']\nface_describer_output_tensor_names = ['resnet_v1_50/E_BN2/Identity:0']\nface_describer_device = '/cpu:0'\nface_describer_model_fp = '{}/pretrained/insightface.pb'.format(BASE_PATH)\nface_describer_tensor_shape = (112, 112)\nface_describer_drop_out_rate = 0.1\n\n\n#loss_function=\"cos\"\nloss_function=\"distances\"\n#tvm config\ntvm_dtype=\"float32\"\ntvm_model_dim='aid512'\n#tvm_model_dim='512'\ntvm_model_dir=\"{}/model/insightface_{}dim/\".format(BASE_PATH,tvm_model_dim)\n#tvm_model_dir=\"{}/model/arcface_r100_v1/\".format(BASE_PATH)\ntvm_graph_json = \"{}/deploy_graph.json\".format(tvm_model_dir)\ntvm_lib = \"{}/deploy_lib.so\".format(tvm_model_dir)\ntvm_params = \"{}/deploy_param.params\".format(tvm_model_dir)\ntvm_data_shape = (1, 3, 112, 112)\n#512dim\n#tvm_face_similarity_threshold = 24\n#128dim\n#tvm_face_similarity_threshold = 10\n#512dim\n#tvm_face_similarity_threshold = 24\n#128dim\ntvm_face_similarity_threshold = 21\n#tvm_face_similarity_threshold = 1.8\n#tvm known data\ntvm_faces_dir=\"{}/images/\".format(BASE_PATH)\ntvm_faces_feature_dir=\"{}/model/feature_{}dim/\".format(BASE_PATH,tvm_model_dim)\ntvm_faces_descriptions=\"{}/faces_descriptions.npy\".format(tvm_faces_feature_dir)\ntvm_faces=\"{}/faces.npy\".format(tvm_faces_feature_dir)\n\ntvm_fd_dtype=\"float32\"\ntvm_fd_model_name='retinaface_mnet025_v1'\ntvm_fd_model_dir=\"{}/model/{}/\".format(BASE_PATH,tvm_fd_model_name)\ntvm_fd_graph_json = \"{}/deploy_graph.json\".format(tvm_fd_model_dir)\ntvm_fd_lib = \"{}/deploy_lib.so\".format(tvm_fd_model_dir)\ntvm_fd_params = \"{}/deploy_param.params\".format(tvm_fd_model_dir)\ntvm_fd_data_shape = (1, 3, 112, 112)\ntvm_fd_nms = 0.4\ntvm_fd_rac = 'net3'\n\ntvm_mtcnn_model_name=\"mtcnn/tvm\"\ntvm_mtcnn_dir=\"{}/model/{}/\".format(BASE_PATH,tvm_mtcnn_model_name)\ntvm_mtcnn_graph_json = tvm_mtcnn_dir+\"{}_deploy_graph.json\"\ntvm_mtcnn_lib = tvm_mtcnn_dir+\"{}_deploy_lib.so\"\ntvm_mtcnn_params = tvm_mtcnn_dir+\"{}_deploy_param.params\"\ntvm_mtcnn_data_shape = (1, 3, 112, 112)\n\n#\n#tpu config\ntpu_model_dir = '{}/../all_models/'.format(BASE_PATH)\n#/home/crsarang/works/ml/tpu/project/../all_models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite\n#tpu_model = '{}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'.format(tpu_model_dir)\ntpu_model = '{}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'.format(tpu_model_dir)\n#tpu_labels = '{}/coco_labels.txt'.format(tpu_model_dir)\ntpu_labels = None\ntpu_threshold=0.1\ntpu_keep_aspect_ratio=False\ntpu_relative_coord=False\ntpu_top_k=10\n\n#cam config\ncam_csi_width=1280\ncam_csi_height=720\n#cam_csi_width=1280#1920#3280#1280\n#cam_csi_height=720#1080#2464#720\ncam_csi_index=\"0\"\n\n\n","sub_path":"configs/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"466705337","text":"import pandas as pd\r\npd.set_option('display.max_columns', None)\r\npd.set_option('display.max_rows', None)\r\n\r\nairquality = pd.read_csv('airquality.csv')\r\n\r\nprint(airquality.info())\r\n\r\n# Calculate the mean of the Ozone column: oz_mean\r\noz_mean = airquality.Ozone.mean()\r\n\r\n# Replace all the missing values in the Ozone column with the mean\r\nairquality['Ozone'] = airquality.Ozone.fillna(oz_mean)\r\n\r\n# Print the info of airquality\r\nprint(airquality.info())\r\n","sub_path":"Pandas/missing_data.py","file_name":"missing_data.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"304405988","text":"from grakn.client import GraknClient\n\nwith GraknClient(uri=\"localhost:48555\") as client:\n with client.session(keyspace = \"globe\") as session:\n with session.transaction().read() as transaction:\n query = [\n 'match $country isa country, has countrycode \"ZWE\";' + \n ' $city isa city, has cityname $cname; $rel (in-country: $city, contains-city: $country)' +\n ' isa has-city; get $cname;'\n ]\n\n print(\"\\nQuery:\\n\", \"\\n\".join(query))\n query = \"\".join(query)\n\n iterator = transaction.query(query)\n\n\n\n\n\n\n # concept_map = next(iterator)\n # print(\"ConceptMap Obj: {}\".format(concept_map))\n # # print(concept_map.asThing().attributes)\n\n # # Get Entity Obj from ConceptMap\n # entity = concept_map.get('city').asAttribute().value().toString()\n\n # print(entity)\n # print(vars(entity))\n # print(\"Entity Obj: {}\".format(entity))\n # print(\"Entity Id: {}\".format(entity.id))\n \n # # Get Entity Attribute Label/Values\n # attrs = entity.attributes()\n # for each in attrs:\n # print(each.type().label())\n # print(each.value())\n\n # print(iterator)\n # answers = [print(ans) for ans in iterator]\n\n\n answers = [ans.get(\"cname\") for ans in iterator]\n print(answers)\n result = [ answer.id for answer in answers ]\n\n for answer in answers:\n print(answer.type().label(), answer.value())\n print(\"\\nResult:\\n\", result)\n\n # answers = [ans.get(\"city\") for ans in iterator]\n # print(answers)\n # result = [ answer.id for answer in answers ]\n\n # for answer in answers:\n # print(answer.type().__getattribute__('cityname'))\n # print(answer.type().label())\n # print(\"\\nResult:\\n\", result)","sub_path":"get_cities.py","file_name":"get_cities.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"11917457","text":"# input ..12345678910111213141516171820212223\nimport re\nif __name__ == '__main__':\n s = raw_input()\n an = re.match(r\"([^\\d]*)(\\d*)\", s)\n str = an.group(2)\n #dict = {}\n for i in str:\n b = str.count(i, 0, len(str))\n #dict[i] = b\n if b > 1:\n print (i)\n break\n\n\n","sub_path":"hackerrank/repeat_char.py","file_name":"repeat_char.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"404556136","text":"from __future__ import absolute_import, unicode_literals\n\nimport pytest\nimport spacy\n\nfrom textacy import cache\n\n\ndef test_load_spacy():\n for lang in (\"en\", \"en_core_web_sm\"):\n for disable in (None, (\"parser\", \"ner\")):\n assert isinstance(\n cache.load_spacy(lang, disable=disable), spacy.language.Language\n )\n\n\ndef test_load_spacy_hashability():\n with pytest.raises(TypeError):\n _ = cache.load_spacy(\"en\", disable=[\"tagger\", \"parser\", \"ner\"])\n\n\ndef test_load_pyphen():\n for lang in (\"en\", \"es\"):\n _ = cache.load_hyphenator(lang=lang)\n assert True\n\n\n@pytest.mark.skip(reason=\"We don't download DepecheMood for tests\")\ndef test_load_depechemood():\n for weighting in (\"freq\", \"normfreq\", \"tfidf\"):\n assert isinstance(cache.load_depechemood(weighting=weighting), dict)\n\n\ndef test_cache_clear():\n cache.clear()\n _ = cache.load_hyphenator(lang=\"en\")\n _ = cache.load_spacy(\"en\")\n assert len(cache.LRU_CACHE.keys()) >= 2\n # check cache size; low thresh but still larger than if the size of\n # loaded data was not being correctly assessed\n assert cache.LRU_CACHE.currsize >= 1000\n","sub_path":"tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"585388958","text":"import subprocess\n\ntry:\n info = subprocess.run(['cmus-remote', '-Q'],\n capture_output=True).stdout.decode('utf-8')\n\n artist = ''\n title = ''\n\n for line in info.splitlines():\n if line.startswith('tag artist'):\n artist = line.replace('tag artist ', '').strip()\n\n if line.startswith('tag title'):\n title = line.replace('tag title ', '').strip()\n\n if artist and title:\n print(f'{artist} - {title}')\n\n elif title:\n print(f'Unknown artist - {title}')\n\n elif artist:\n print(f'{artist} - Unknown title')\n else:\n print('')\nexcept:\n print('')\n","sub_path":"scripts/.config/scripts/get_track_info_from_cmus.py","file_name":"get_track_info_from_cmus.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"165814282","text":"import numpy as np\nfrom dataclasses import dataclass\n\nfrom kepler.util.base import find_index_above_threshold\n\n@dataclass\nclass PeriodicEvent:\n period: float\n duration: float\n t0: float\n\n def equals(self, other, period_rtol=0.001, t0_durations=1):\n return np.arcsin(np.sin((np.pi * (self.t0 - other.t0) % other.period) / other.period)) * (other.period / np.pi) < t0_durations * other.duration if not np.isclose(self.period, other.period, rtol=period_rtol, atol=1e-8) else False\n\n def count_transit_points(self, time):\n t_min, t_max = np.min(time), np.max(time)\n if(t_max - t_min) / self.period <= 10e6:\n t0 = (self.t0 - t_min) % self.period + t_min\n points_in_transit = []\n i = j = 0\n for transit_midpoint in np.arange(t0, t_max, self.period):\n i = find_index_above_threshold(time, transit_midpoint - self.duration/2, i)\n j = find_index_above_threshold(time, transit_midpoint + self.duration/2, j)\n points_in_transit.append(j - i)\n\n return np.array(points_in_transit)","sub_path":"kepler/util/periodic_event.py","file_name":"periodic_event.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"49198728","text":"class TreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n\ndef is_complete_bt(tree_node):\n node_queue = []\n if tree_node == None:\n return False\n node_queue.append(tree_node)\n status = False\n while node_queue:\n node = node_queue.pop(0)\n if node.left:\n node_queue.append(node.left)\n if node.right:\n node_queue.append(node.right)\n if not node.left and node.right: #没有左子树,有右子树 不是完全二叉树\n return False\n if not node.right:# 没有右子树则剩下所有的节点 都是叶子节点\n status = True # 标记接下来的所有节点都是叶子节点的开始\n continue\n if status:\n if node.left:\n return False\n return True\n\n\nif __name__ == '__main__':\n node1 = TreeNode(1)\n node2 = TreeNode(2)\n node3 = TreeNode(3)\n node4 = TreeNode(4)\n node5 = TreeNode(5)\n node6 = TreeNode(6)\n node7 = TreeNode(7)\n node8 = TreeNode(8)\n node9 = TreeNode(9)\n node10 = TreeNode(10)\n node11 = TreeNode(11)\n\n node1.left = node2\n node1.right = node3\n node2.left = node4\n node2.right = node5\n node3.left = node6\n node3.right = node7\n node4.left = node8\n node4.right = node9\n node5.left = node10\n node6.right = node11\n\n res = is_complete_bt(node1)\n print(res)\n","sub_path":"23判断是否是完全二叉树.py","file_name":"23判断是否是完全二叉树.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"229389324","text":"# %% [markdown]\n# # North Branch Extreme Precipitation\n# The analysis in this notebook: 1. loads gridded precipitation data, 2. clips it to the spaital domain of the waterboundary data, 3. identifies the maximum precipitaton day for each year of data, and 4. writes this data to .shp and .csv files.
\n#
\n# note: running the this file can take up to 5 minutes per year of data being processed.\n#
\n# #### This analysis is based on two data sets:
\n# 1. Livneh Daily Precipitation Data this provides daily gridded metrological observations from 1915 to 2011 with 1/16 degree (latitude or longitude) resolution.$^{1}$
\n# 2. USGS Spatial Waterboundary Data this provides nested watershed polygons. The analysis in this notebook focuses on the 'North Branch Potomac' 8 digit hydrologic unit code (HUC8) watershed.$^{2}$
\n#
\n# References:
\n# $^{1}$ Livneh B., E.A. Rosenberg, C. Lin, B. Nijssen, V. Mishra, K.M. Andreadis, E.P. Maurer, and D.P. Lettenmaier, 2013: A Long-Term Hydrologically Based Dataset of Land Surface Fluxes and States for the Conterminous United States: Update and Extensions, Journal of Climate, 26, 9384–9392.
\n# $^{2}$ U.S. Geological Survey, 2020, National Waterboundary Dataset for 2 digit hydrologic Unit (HU) 02 (mid-atlantic), accessed April 11, 2020 at URL http://prd-tnm.s3-website-us-west-2.amazonaws.com/?prefix=StagedProducts/Hydrography/WBD/HU2/Shape/
\n\n# %% [markdown]\n# ## Global Variables\n# 1. Input-Output Directories\n# %%\nimportpath: str = 'C:/Users/q0hecjrk/Documents/_Data/'\noutputpath: str = 'C:/Users/q0hecjrk/Documents/_Projects/northbranch/data/'\n\n# %% [markdown]\n# 2. Livneh Data\n# %%\nyears: list = [*range(1915, 2012, 1)]\nvariables: list = ['prec']\nlivnehdirectory: str = importpath +'Livneh/Daily/'\noutputdirectory: str = outputpath +'livneh/daily/'\nmaxdaydirectory: str = outputdirectory + 'max/1day/'\ncoordinatesystem: str = '4326'\nmeasurementcoordinatesystem: str = '3857'\n\n# %% [markdown]\n# 3. Watershed Boundaries\n# %%\nvariable: str = 'HUC8'\ncode: str = '02070002'\nname: str = 'North Branch Potomac'\nwatershedfile: str = importpath + 'Geospatial/Waterboundary/WBD_02_HU2_Shape/Shape/WBDHU8.shp'\ncoordinatesystem: str = '4326'\n\n# %% [markdown]\n# ## Import and Clip Data \n# %%\nimport dataimports.livneh as livneh\nimport dataimports.waterboundary as wbd \n\n# %% [markdown]\n# 4. Watershed Geometries\n# %%\nnbshape = wbd.importwaterboundary(watershedfile, variable, code)\nnbarea_km2 = nbshape.iloc[0]['AreaSqKm']\nnbbox = wbd.boundaryboxfromshape(nbshape)\nnbshape.head()\n\n# %% [markdown]\n# 5. Import Livneh Data, write out max day files and return max dates\n# %%\ninfiles = livneh.inputfilepaths(livnehdirectory, variables, years)\noutfiles = livneh.outputfilepaths(outputdirectory, infiles)\nmaxdays = livneh.processfiles(infiles, outfiles, nbbox, nbshape, nbarea_km2, maxdaydirectory)\nprint(maxdays)\n\n\n","sub_path":"src/.ipynb_checkpoints/import-checkpoint.py","file_name":"import-checkpoint.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"567075742","text":"from face_activity import face_activity\nimport cv2\n\n\ncamera = face_activity(True,1 )\ncamera.dalmation_dog_face()\nwhile True:\n frame = camera.get_video_frame()\n grey = camera.convert_to_grey(frame)\n people = camera.detect_human(grey)\n\n \n for x,y,w,h in people:\n camera.crop_image(x,y,w,h)\n if camera.is_out_of_frame():\n continue\n\n camera.apply_mask()\n \n camera.show_image(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncamera.end_program()","sub_path":"offline-version/level_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"122501587","text":"from libs import * \nsys.path.insert(1, '/homecentral/alexandre.mahrach/gdrive/postdoc_IDIBAPS/python/data_analysis') \n\nimport data.constants as gv \nimport data.utils as data \n\nimport plotting as plot \n\ndef unit_vector(vector): \n \"\"\" Returns the unit vector of the vector. \"\"\" \n u = vector / np.linalg.norm(vector) \n return u \n\ndef angle_between(v1, v2): \n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2':: \"\"\"\n v1_u = unit_vector(v1) \n v2_u = unit_vector(v2) \n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) \n \ndef coefs_clf(X_trials, y_trials, clf=LogisticRegression(), shuffle=0): \n\n coefs = [] \n for bin in np.arange(0, X_trials.shape[2]): # epochs \n X = X_trials[:,:,bin] \n X = StandardScaler().fit_transform(X) \n \n if bin==0: \n y = y_trials \n if shuffle: \n random.shuffle(y) \n\n clf.fit(X, y)\n \n coefs.append(clf.coef_.flatten() ) \n # coefs.append( np.concatenate( (mean_intercept_CV.flatten() , mean_coefs_CV.flatten() ), axis=0) ) \n \n coefs = np.asarray(coefs) \n return coefs\n\ndef get_cos(coefs): \n \"\"\" Returns the cosine of the angle alpha between vector coefs[0] (early delay) and coefs[1] (late delay) \"\"\"\n alphas = []\n cos_alp=[]\n for j in np.arange(0, coefs.shape[0]): \n alpha = angle_between(coefs[0], coefs[j]) \n alphas.append(alpha) \n cos_alp.append(np.cos(alpha))\n \n return alphas, cos_alp\n\ndef angle_epochs(X_proj, IF_SHUFFLE=0):\n \n clf = LogisticRegression(C=1, solver='liblinear', penalty='l2',tol=1e-6, max_iter=int(1e6)) \n clf = LinearDiscriminantAnalysis() \n # clf = LinearDiscriminantAnalysis(solver='eigen',shrinkage='auto') \n gv.epochs = ['ED','MD','LD'] \n\n for i, gv.trial in enumerate(gv.trials): \n X_S1_trials = X_proj[i,0] \n X_S2_trials = X_proj[i,1] \n X_trials, y_trials = data.get_X_y_epochs(X_S1_trials, X_S2_trials) \n print('X', X_trials.shape,'y', y_trials.shape) \n coefs = coefs_clf(X_trials, y_trials, clf=clf) \n\n print('coefs', coefs.shape) \n alpha, cos_alp = get_cos(coefs) \n print('trial', gv.trial, 'cos_alp', cos_alp) \n\n mean_cos = []\n q1 = []\n q3 = []\n if IF_SHUFFLE: \n mat_cos = [] \n \n for i in range(100): \n coefs_shuffle = coefs_clf(X_trials, y_trials, clf=clf, shuffle=1) \n alpha_shuffle, cos_alp_shuffle = get_cos(coefs_shuffle) \n \n mat_cos.append(cos_alp_shuffle) \n \n mat_cos = np.asarray(mat_cos) \n \n mean_cos = np.mean(mat_cos, axis=0) \n std_cos = np.std(mat_cos, axis=0) \n q1 = np.percentile(mat_cos, 25, axis=0) \n q3 = np.percentile(mat_cos, 75, axis=0) \n \n print('', mean_cos, 'std_cos', std_cos, 'q1', q1, 'q3', q3) \n plot.plot_cosine_bars(cos_alp, mean_cos=[], q1=[], q3=[], IF_SHUFFLE=IF_SHUFFLE) \n","sub_path":"python/data_analysis/dim_red/pca/angle_epochs.py","file_name":"angle_epochs.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"647375658","text":"import torch\nimport torchvision\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import TensorDataset,Dataset, DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport random\nimport _pickle as pc\nimport glob\n\nfrom PIL import Image\n\nfrom img_to_vec import Img2Vec\nfrom dset import AgeDataset\n\ntorch.manual_seed(2)\n\n#parameters\nparam = {}\nparam[\"l_rate\"] = 0.001\nparam[\"epoch\"] = 30\nparam[\"l2_rate\"] = 0.0001\nparam[\"layers\"] = [512,128,32]\nparam[\"drop\"] = False\nparam[\"drop_p\"] = 0.2\nparam[\"base_drop\"] = False\nparam[\"base_drop_p\"] = 0.1\nparam[\"batch_size\"] = 8\nparam[\"batch_norm\"] = False\n\ndef default_param(number_of_layers):\n global param\n param[\"epoch\"] = 50\n param[\"l2_rate\"] = 0\n param[\"drop\"] = True\n param[\"base_drop\"] = True\n param[\"base_drop_p\"] = 0.2\n param[\"drop_p\"] = 0.2\n param[\"batch_size\"] = 8\n param[\"batch_norm\"] = True\n if number_of_layers==3:\n param[\"drop\"] = True\n param[\"base_drop\"] = True\n param[\"l_rate\"] = 0.0001\n param[\"layers\"] = [512,128,32]\n elif number_of_layers==2:\n param[\"l_rate\"] = 0.0001\n param[\"layers\"] = [256,64]\n param[\"drop\"] = True\n param[\"base_drop\"] = True\n elif number_of_layers==1:\n param[\"l_rate\"] = 0.001\n param[\"layers\"] = [128]\n param[\"drop\"] = True\n param[\"base_drop\"] = True\n elif number_of_layers==0:\n param[\"drop\"] = False\n param[\"base_drop\"] = False\n param[\"base_drop_p\"] = 0\n param[\"drop_p\"] = 0\n param[\"l_rate\"] = 0.001\n param[\"layers\"] = []\n else:\n assert False, \"Unknown number of layers\"\n\n \n\ndef generate_random(a,b,percentage=0,default=-1,intmi=False):\n #returns default with a change of percentage\n if( np.random.ranf() > percentage ):\n retval= (b-a) * np.random.ranf() + a\n else:\n retval = default\n if(intmi==True):\n return int(retval)\n return retval\n\ndef get_random_architecture():\n length = random.choice([0, 1,1, 2,2,3,3,3])\n retval = []\n old = 12\n for i in range(length):\n try:\n old = random.randint(1,old)\n except:\n return [500,200,50]\n retval.append(2**old)\n return retval\n\ndef random_params():\n global param\n param[\"l_rate\"] = 10**(-generate_random(0,6,intmi=True))\n param[\"l2_rate\"] = 10**(-generate_random(0,8,intmi=True))\n param[\"layers\"] = get_random_architecture()\n \n ##init\n param[\"batch_norm\"] = False\n param[\"drop\"] = False\n param[\"drop_p\"] = False\n ##\n param[\"batch_norm\"] = random.choice([True, False])\n\n param[\"drop\"] = random.choice([True, False])\n param[\"drop_p\"] = generate_random(0,0.6)\n param[\"base_drop\"] = random.choice([True, False])\n param[\"base_drop_p\"] = generate_random(0,0.6)\n\n param[\"batch_size\"] = random.choice([16,32,64,128,256,512,1024])\n \n param[\"epoch\"] = 50 if sum(param[\"layers\"]) < 2000 else 30\n\ndef mse_loss(a,b):\n a = a.data.numpy()\n b = b.numpy()\n return ((a - b)**2).mean(axis=None)\n\n\n#auxilary functions\ndef calc_acc(pred,truth):\n pred =pred.data.view(-1).numpy()\n truth=truth.view(-1).numpy()\n assert pred.shape==truth.shape, \"Shapes are not same\"\n return (np.abs(pred-truth) < 10).sum() / pred.shape[0]\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net,self).__init__()\n \n l = param[\"layers\"]\n if(len(l)==0):\n self.fc1 = nn.Linear(512,1)\n elif(len(l)==1):\n self.fc1 = nn.Linear(512,l[0])\n self.fc2 = nn.Linear(l[0],1)\n if param[\"batch_norm\"]==True:\n self.bn1 = nn.BatchNorm1d(l[0])\n elif(len(l)==2):\n self.fc1 = nn.Linear(512,l[0])\n self.fc2 = nn.Linear(l[0],l[1])\n self.fc3 = nn.Linear(l[1],1)\n if param[\"batch_norm\"]==True:\n self.bn1 = nn.BatchNorm1d(l[0])\n self.bn2 = nn.BatchNorm1d(l[1])\n elif(len(l)==3):\n self.fc1 = nn.Linear(512,l[0])\n self.fc2 = nn.Linear(l[0],l[1])\n self.fc3 = nn.Linear(l[1],l[2])\n self.fc4 = nn.Linear(l[2],1)\n if param[\"batch_norm\"]==True:\n self.bn1 = nn.BatchNorm1d(l[0])\n self.bn2 = nn.BatchNorm1d(l[1])\n self.bn3 = nn.BatchNorm1d(l[2])\n\n if param[\"drop\"]==True :\n self.drop = nn.Dropout(p=param[\"drop_p\"])\n \n if param[\"base_drop\"]==True :\n self.base_drop = nn.Dropout(p=param[\"base_drop_p\"])\n\n \n def forward(self,x):\n \n l = param[\"layers\"]\n if param[\"base_drop\"]==True :\n x = self.base_drop(x)\n\n if(len(l)==0):\n x = self.fc1(x)\n elif(len(l)==1):\n x = self.fc1(x)\n if param[\"drop\"]==True :\n x=self.drop(x)\n if param[\"batch_norm\"]==True:\n x = self.bn1(x)\n x = F.relu(x)\n x = self.fc2(x)\n elif(len(l)==2):\n x = self.fc1(x)\n if param[\"drop\"]==True :\n x=self.drop(x)\n if param[\"batch_norm\"]==True:\n x = self.bn1(x)\n x = F.relu(x)\n x = self.fc2(x)\n if param[\"batch_norm\"]==True:\n x = self.bn2(x)\n x = F.relu(x)\n x = self.fc3(x)\n elif(len(l)==3):\n x = self.fc1(x)\n if param[\"drop\"]==True :\n x=self.drop(x)\n if param[\"batch_norm\"]==True:\n x = self.bn1(x)\n x = F.relu(x)\n x = self.fc2(x)\n if param[\"batch_norm\"]==True:\n x = self.bn2(x)\n x = F.relu(x)\n x = self.fc3(x)\n if param[\"batch_norm\"]==True:\n x = self.bn3(x)\n x = F.relu(x)\n x = self.fc4(x)\n return x\n\n\n\ndef train(net):\n net.train()\n optimizer = optim.RMSprop(net.parameters(),lr=param[\"l_rate\"], weight_decay=param[\"l2_rate\"])\n criterion = nn.MSELoss()\n log_interval = 5\n train_loss_list = []\n val_loss_list = []\n train_accuracy_list = []\n val_accuracy_list = []\n \n for epoch in range(param[\"epoch\"]):\n net.train()\n for batch_idx, (data,target) in enumerate(dataloader):\n data,target = Variable(data),Variable(target)\n #resize\n data = data.view(-1,512)\n optimizer.zero_grad()\n net_out = net(data)\n loss = criterion(net_out,target)\n \n loss.backward()\n optimizer.step() \n if batch_idx % log_interval == 0:\n pass\n #train_loss_list.append(int(loss[0]))\n \n net.eval()\n net_out_train = net(Variable(x_train))\n train_accuracy = calc_acc(net_out_train,y_train)\n train_loss = int(mse_loss(net_out_train,y_train))\n\n net_out_val = net(Variable(x_val))\n val_accuracy = calc_acc(net_out_val,y_val)\n val_loss = int(mse_loss(net_out_val,y_val))\n\n train_loss_list.append(train_loss)\n train_accuracy_list.append(train_accuracy)\n val_loss_list.append(val_loss)\n val_accuracy_list.append(val_accuracy)\n\n if((epoch+1)%1==0 or epoch==0):\n print('Train Epoch: {}\\tLoss: {:.4f}\\tAcc: {:.4f}'.format(\n epoch, loss.data[0],val_accuracy))\n if val_accuracy > 0.62:\n break\n return train_loss_list, val_loss_list, train_accuracy_list, val_accuracy_list\n \n \n\nx_train = np.load(\"./original/train.npy\")\ny_train = np.load(\"./original/train_gt.npy\")\nx_val = np.load(\"./original/valid.npy\")\ny_val = np.load(\"./original/valid_gt.npy\")\nx_test = np.load(\"./original/test.npy\")\n\n\nx_test = torch.from_numpy(x_test)\nx_train = torch.from_numpy(x_train)\ny_train = torch.from_numpy(y_train).view(-1,1).float()\nx_val = torch.from_numpy(x_val)\ny_val = torch.from_numpy(y_val).view(-1,1).float()\n\n\"\"\"\nage_dataset = TensorDataset(x_train,y_train)\nage_dataset_val = TensorDataset(x_val,y_val)\ndataloader = DataLoader(age_dataset, batch_size=param[\"batch_size\"],\n shuffle=True, num_workers=4)\n\"\"\"\n\nage_dataset = TensorDataset(x_train,y_train)\ndataloader = DataLoader(age_dataset, batch_size=param[\"batch_size\"],\n shuffle=True, num_workers=4)\n\n\ndef run_test():\n #SET PARAMETERS\n default_param(3)\n #\n net=Net()\n train(net)\n net.eval()\n net_out_test = net(Variable(x_test))\n net_out_test=net_out_test.data.view(-1).numpy()\n print()\n with open(\"estimations_test.npy\",\"wb\") as filem:\n np.save(filem,net_out_test)\n print(net_out_test.shape)\n print()\n print(net_out_test)\n\ndef run_pictures():\n #SET PARAMETERS\n default_param(3)\n #\n net=Net()\n train(net)\n net.eval()\n \n pics = glob.glob(\"./pictures/*\")\n for pic in pics:\n fe = Img2Vec(cuda=False) # change this if you use Cuda version of the PyTorch.\n img = Image.open(pic)\n img = img.resize((224, 224))\n feats = fe.get_vec(img).reshape(1, -1)\n feats = torch.from_numpy(feats)\n print(pic)\n net_out = net(Variable(feats))\n print(net_out)\n\nrun_pictures()\nexit()\n\n\n\ncounter = 3\nwhile True:\n default_param(counter)\n for i in [0]:\n \n\n param[\"epoch\"] = 100\n print()\n print(param)\n print()\n torch.manual_seed(2)\n net=Net()\n train_loss_list, val_loss_list, train_accuracy_list, val_accuracy_list = train(net)\n\n param[\"max_val_acc\"] = max(val_accuracy_list)\n param[\"min_val_loss\"] = min(val_loss_list)\n param[\"max_train_acc\"] = max(train_accuracy_list)\n param[\"min_train_loss\"] = min(train_loss_list)\n param[\"last_ten_val_acc\"] = sum( val_accuracy_list[-10:] )/10\n param[\"last_five_val_acc\"] = sum( val_accuracy_list[-5:] )/5\n net.eval()\n\n net_out_val = net(Variable(x_val))\n \n print()\n print(\"Avg acc ten: \",param[\"last_ten_val_acc\"])\n print(\"Avg acc five: \",param[\"last_five_val_acc\"] )\n print(\"Max acc: \",param[\"max_val_acc\"])\n print(\"Min loss: \",param[\"min_val_loss\"])\n print(\"Max acc (train): \",param[\"max_train_acc\"])\n print(\"Min loss (train): \",param[\"min_train_loss\"])\n print()\n print(\"---------\")\n \n plt.plot(train_loss_list, label=\"Train. Loss\")\n plt.plot(val_loss_list, label=\"Val. Loss\")\n plt.legend()\n plt.show()\n plt.plot(train_accuracy_list,label=\"Train Acc.\")\n plt.plot(val_accuracy_list,label=\"Val. Acc.\")\n plt.ylim((0.2,0.8))\n plt.legend()\n plt.show()\n \n\n with open(\"logs.p\",\"ab\") as filem:\n pc.dump(param,filem)\n with open(\"logs.txt\",\"a\") as filem:\n filem.write(str(param)+\"\\n\\n\" )\n with open(\"logs.txt\",\"a\") as filem:\n filem.write(str(train_loss_list)+\"\\n\\n\" )\n filem.write(str(val_loss_list)+\"\\n\\n\" )\n filem.write(str(train_accuracy_list)+\"\\n\\n\" )\n filem.write(str(val_accuracy_list)+\"\\n\\n\" )\n with open(\"logs.txt\",\"a\") as filem:\n filem.write(\"--------------------------\"+\"\\n\\n\\n\" )\n counter += 1\n\n\n\"\"\"\n{'l_rate': 0.001, 'epoch': 100, 'l2_rate': 1e-05, 'layers': [1008], 'drop': False, 'drop_p': 0.7110156773892836, 'base_drop': False, 'base_drop_p': 0.6544765857182548, 'batch_size': 256, 'max_val_acc': 0.6335, 'min_val_loss': 158, 'max_train_acc': 0.8918, 'min_train_loss': 40}\n\n\"\"\"\n\n\n\n\n","sub_path":"ceng483/hw2/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":11668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"497825007","text":"#Autor: Angel Alejandro Garcia Ramirez\r\n#Grupo: GIR0142\r\n\r\n#Agregando nuevos dispositivos\r\n\r\nfile = open(\"devices.txt\", \"a\")\r\nwhile True:\r\n nuevodis = input(\"Ingrese el nombre del nuevo dispositivo: \")\r\n if nuevodis == \"salir\":\r\n print(\"Dispositivo agregado! \")\r\n break\r\n file.write(nuevodis + \"\\n\")\r\nfile.close()\r\n \r\n\r\n","sub_path":"unidad_1/actividad_7/07_file-access_actvity_Angel.py","file_name":"07_file-access_actvity_Angel.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"554002276","text":"from blue_class import blue_handler\nfrom process import data_make\nclass main:\n def __init__(self):\n self.device_addr={}\n\n self.rough_data={}\n self.masked_data={}\n # key is name and value is list of data\n\n self.blue_handle=blue_handler()\n self.data_handle=data_make()\n\n def initial_process(self):\n #self.blue_handle.find()\n self.blue_handle.device_list={\"SYR_2\":\"98:D3:71:FD:7C:04\"}\n self.device_addr=self.blue_handle.device_list\n # find complete\n\n\n def data_processing(self):\n # get data on device_addr\n for name in self.device_addr.keys():\n self.rough_data[name]= self.blue_handle.receive(self.device_addr.get(name))\n print(self.rough_data)\n self.masked_data[name]=self.data_handle.data_masking(self.rough_data.get(name))\n print(self.masked_data)\n\n def data_save(self):\n for name in self.device_addr.keys():\n file_name=\"/data/\"+name+\".txt\"\n with open(file_name, \"a\") as f:\n f.write(self.masked_data.get(name))\n\n\n\n def start(self):\n\n self.initial_process()\n self.data_processing()\n self.masked_data()\n\na=main()\na.start()\n\n\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"225267488","text":"import urllib2\nimport json\n\nGIFY_URL = \"http://api.giphy.com/\"\nGIFY_KEY = \"dc6zaTOxFJmzC\"\n\ndef top_gifs(artist_name):\n\tartist_name_rep=artist_name.replace(' ', '%20')\n\treq_url = GIFY_URL+\"v1/gifs/search?q=\"+artist_name_rep+\"&api_key=\"+GIFY_KEY\n\t\n\tresponse = json.loads(urllib2.urlopen(req_url).read())\n\tgif_list = response['data']\n\tif len(gif_list) < 5:\n\t\tartist_list = artist_name.split(' ')\n\t\tif artist_list[0] != artist_name:\n\t\t\treq_url = GIFY_URL+\"v1/gifs/search?q=\"+artist_list[0]+\"&api_key=\"+GIFY_KEY\n\t\t\tresponse = json.loads(urllib2.urlopen(req_url).read())\n\t\t\tgif_list.extend(response['data'])\n\n\tif len(gif_list) < 5:\n\t\treq_url = GIFY_URL+\"v1/gifs/search?q=doge&api_key=\"+GIFY_KEY\n\t\tresponse = json.loads(urllib2.urlopen(req_url).read())\n\t\tgif_list.extend(response['data'])\n\n\treturn gif_list","sub_path":"gify.py","file_name":"gify.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"560210401","text":"import matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\nimport myutils\n\nif len(sys.argv) < 2:\n print('please give annotated corpus as arg, example:')\n print('python3 scripts/1.distribution.py data/lexnorm2015.annotated')\n exit(1)\n\nplt.style.use('rob.mplstyle')\nfig, ax = plt.subplots(figsize=(8,5), dpi=300)\n\ncatCounts = [0] * 16\ncatUniq = [0] * 16\nseen = set()\ntotal = 0\ntotalUniq = 0\nfor line in open(sys.argv[1]):\n splitted = line.split()\n if len(splitted) < 2:\n continue\n total += 1\n cat = int(splitted[0])\n if len(splitted) > 3:\n repl = splitted[1] + ':' + splitted[3]\n else:\n repl = splitted[1]\n\n if repl not in seen:\n seen.add(repl)\n catUniq[cat] += 1\n totalUniq += 1\n catCounts[cat] += 1 \n\n# FOR PERCENTAGES\n#for i in range(len(catCounts)):\n# catCounts[i] = (catCounts[i] * 100) / total\n# catUniq[i] = (catUniq[i] * 100) / totalUniq\n\nbarwidth = 1/3\nidxs = []\nidxs2 = []\nfor i in range(len(catCounts)-1):\n idxs.append(1/6 + i + barwidth * .5)\n idxs2.append(1/6 + i + barwidth * 1.5)\n\nmyutils.setTicks(ax, myutils.cats, 45)\nax.bar(idxs, catCounts[1:], width= barwidth, color = myutils.colors[0], label='Total')\nax.bar(idxs2, catUniq[1:], width=barwidth, color=myutils.colors[1], label='Repl. Types')\n\n\n\n\"\"\"EDIT 100-NUMBERS BENEATH HERE TO EDIT THE YLIMIT RANGE\"\"\"\nax.plot([5,5], [0, 840], color='black', linestyle='dashed', clip_on=False)\nax.text(2, 820, 'Unintended', fontsize=15)\nax.text(5.3, 820, 'Intended', fontsize=15)\n\n\nax.set_ylabel(\"Number of replacements\", color='black')\n#ax.set_xlabel(\"Category\", color='black')\nleg = plt.legend(loc='upper left')\nleg.get_frame().set_linewidth(1.5)\nax.set_ylim(0,800)\nax.set_xlim(0,len(catCounts) - 1)\nfig.savefig('distribution.pdf', bbox_inches='tight')\n#plt.show()\n\n","sub_path":"Distribution/1.distribution.py","file_name":"1.distribution.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"147531284","text":"from distutils.core import setup\n\nwith open(\"requirements.txt\") as reqs_file:\n reqs = reqs_file.readlines()\n\nsetup(\n name='speechless',\n version='0.1.1',\n packages=['speechless'],\n url='https://github.com/JuliusKunze/speechless',\n license='MIT License',\n author='Julius Kunze',\n author_email='juliuskunze@gmail.com',\n description='',\n install_requires=reqs\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"430391995","text":"from flask import Flask,Response,render_template\nfrom flask_socketio import SocketIO, send ,emit\nimport time\nimport threading\nimport argparse\nimport datetime\nimport imutils\n#import RPi.GPIO as GPIO \nfrom time import sleep\nimport json\nimport sys\nimport numpy as np\nimport turtle\nimport subprocess\nimport os\nimport io\nfrom PIL import Image\nimport shutil \n\nfrom turtle import Turtle, Screen\n\noutputFrame = None\nlock = threading.Lock()\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'mysecret'\nsocketio = SocketIO(app)\n\ntime.sleep(0.1)\nglobal making_map\nmaking_map = False\nglobal cleaning\ncleaning = False\n\n#motor pins for movement\nin1 = 22\nin2 = 17\nin3 = 27\nin4 = 25\nen4 = 18\nen8 = 5\n\n#ultrasonic pins\nF_TRIG = 23\nF_ECHO = 24\nL_TRIG = 26\nL_ECHO = 19\nR_TRIG = 13 #grey\nR_ECHO = 16 # yellow\n\nprint(\"Initializing...\")\n# GPIO.setmode(GPIO.BCM)\n# GPIO.setup(in1,GPIO.OUT)\n# GPIO.setup(in2,GPIO.OUT)\n# GPIO.setup(in3,GPIO.OUT)\n# GPIO.setup(in4,GPIO.OUT)\n\n# GPIO.setup(en4,GPIO.OUT)\n# GPIO.setup(en8,GPIO.OUT)\n \n\n# GPIO.setup(F_TRIG,GPIO.OUT)\n# GPIO.setup(F_ECHO,GPIO.IN)\n# GPIO.setup(L_TRIG,GPIO.OUT)\n# GPIO.setup(L_ECHO,GPIO.IN)\n# GPIO.setup(R_TRIG,GPIO.OUT)\n# GPIO.setup(R_ECHO,GPIO.IN)\n\n# GPIO.output(in1,GPIO.LOW)\n# GPIO.output(in2,GPIO.LOW)\n# GPIO.output(in3,GPIO.LOW)\n# GPIO.output(in4,GPIO.LOW)\n\n\n# GPIO.output(F_TRIG, False)\n# GPIO.output(L_TRIG, False)\n# GPIO.output(R_TRIG, False)\n\n\n\n# p=GPIO.PWM(en4,1000)\n# p1=GPIO.PWM(en8,1000)\n\n# p.start(25)\n# p1.start(25)\n\n# p.ChangeDutyCycle(100)\n# p1.ChangeDutyCycle(100)\n\n# start = time.time()\n\n# PERIOD_OF_TIME = 180 # 5min\n\nclass ROBOT():\n\n def __init__(self,direction,rows,cols):\n self.direction = \"NORTH\"\n self.rows = rows\n self.cols = cols\n self.map = np.zeros((self.rows,self.cols))\n self.starting_position = [self.rows//2,self.cols//2]\n self.current_position = [self.rows//2,self.cols//2]\n self.map_formed = False\n self.map[self.current_position[0],self.current_position[1]] = 3\n self.save_map()\n self.values = []\n self.leftmost = []\n self.righmost = []\n self.reached = False\n self.map_complete = False\n self.time = 0\n self.distance = 0\n self.percent = 0\n self.movements = []\n self.final = \"NORTH\"\n print(\"[+] Starting position: \",self.current_position)\n\n def save_map(self):\n print(\"Saving Map\")\n a_file = open(\"map.txt\", \"w\")\n for row in self.map:\n np.savetxt(a_file, row)\n a_file.close()\n \n def reset_map(self):\n self.map = np.zeros((self.rows,self.cols))\n self.starting_position = [self.rows//2,self.cols//2]\n self.current_position = [self.rows//2,self.cols//2]\n self.map[self.current_position[0],self.current_position[1]] = 3\n self.map_formed = False\n self.values = []\n self.leftmost = []\n self.righmost = []\n self.reached = False\n self.time = 0\n self.time_diff = 0\n self.distance = 0\n self.percent = 0\n self.map_complete = False\n self.movements = []\n self.save_map()\n self.update_values()\n self.direction = \"NORTH\"\n self.print_map()\n print(\"[+] Starting position: \",self.current_position)\n print(\"Map Reset.\")\n\n def load_map(self):\n print(\"Loading Map..\")\n original_array = np.loadtxt(\"map.txt\").reshape(self.rows, self.cols)\n self.map = original_array\n \n def aload_map(self):\n print(\"Loading Map..\")\n original_array = np.loadtxt(\"temp.txt\").reshape(self.rows, self.cols)\n self.map = original_array\n \n def update_values(self):\n p = str(self.percent)+\"%\"\n val = {\"distance\":self.distance,\"time\":self.time_diff,\"percent\":p,\"map\":self.map_formed,\"map_complete\":self.map_complete}\n with open('values.json', 'w') as outfile:\n json.dump(val, outfile)\n print(\"[+] Updated Values.\")\n\n def calculate_percent(self):\n count_total = 0\n count_done = 0\n for i in range(0,self.rows):\n for j in range(0,self.cols):\n if(self.map[i,j]!=0):\n count_total +=1\n if(self.map[i,j]==7):\n count_done+=1\n percent = (count_done/count_total)*100\n self.percent = percent\n\n def make_image(self):\n print(\"Making image\")\n turtle.Turtle._screen = None # force recreation of singleton Screen object\n turtle.TurtleScreen._RUNNING = True \n m = turtle.Turtle()\n m.color('black', 'light blue')\n m.begin_fill()\n m.penup()\n m.setpos(-100,-100)\n m.pendown()\n #self.current_position = self.starting_position\n m.left(90)\n for i in self.values:\n if(i==1):\n m.forward(50)\n elif(i==4):\n m.forward(50)\n m.left(90)\n elif(i==2):\n m.forward(50)\n m.right(90) \n # print(m.pos())\n\n # if(m.pos()!=(0.00,-0.00)):\n # print(\"kkkk\")\n # m.forward(50)\n # print(m.pos())\n # else:\n # print(m.pos())\n\n m.end_fill()\n time.sleep(0.3)\n m.getscreen().getcanvas().postscript(file='map.ps')\n ps = open('map.ps')\n m.getscreen().bye()\n psimage=Image.open('map.ps')\n psimage.save('map.png')\n source = os.path.join(os.getcwd(),'map.png')\n destination = os.path.join(os.getcwd(),'static','images','map.png')\n if os.path.exists(destination):\n os.remove(destination)\n dest = shutil.copyfile(source, destination)\n print(\"[+] Image Saved.\")\n m = None\n\n def save_movements(self):\n turtle.Turtle._screen = None # force recreation of singleton Screen object\n turtle.TurtleScreen._RUNNING = True \n print(\"Making movement image\")\n b = turtle.Turtle()\n b.color('black', 'light blue')\n b.hideturtle() \n b.penup() \n b.setpos(-100,-100) \n b.showturtle() \n b.pendown()\n sleep(3)\n b.begin_fill()\n b.left(90)\n for i in self.values:\n if(i==1):\n b.forward(50)\n elif(i==4):\n b.forward(50)\n b.left(90)\n elif(i==2):\n b.forward(50)\n b.right(90) \n\n # if(b.pos()!=(0,0)):\n # b.forward(50)\n\n b.setpos(-100,-100) \n b.end_fill()\n b.color('red', 'yellow')\n b.hideturtle() \n b.penup() \n b.setpos(-100,-100) \n b.showturtle() \n b.pendown()\n b.pensize(4)\n b.right(90)\n\n print(self.values)\n print(self.movements)\n # if(self.final==\"SOUTH\"):\n # ind = 0\n # for i in range(len(self.movements)-1,-1,-1):\n\n # if(self.movements[i]==7):\n # ind = i\n # break\n # print(ind) \n # self.movements = self.movements[0:ind]\n\n for i in range(len(self.movements),0,-1):\n if(self.movements[i-1]==6 or self.movements[i-1]==7):\n del self.movements[i]\n \n \n print(self.values)\n print(self.movements)\n\n for i in self.movements:\n if(i==1):\n b.forward(50)\n elif(i==4):\n b.forward(50)\n b.left(90)\n elif(i==8):\n b.backward(50)\n elif(i==2):\n b.forward(50)\n b.right(90) \n elif(i==6):\n b.forward(50)\n b.right(90)\n b.forward(50) \n b.right(90) \n elif(i==7):\n b.forward(50)\n b.left(90)\n b.forward(50) \n b.left(90)\n\n b.forward(50) \n\n #b.setpos(0, 0)\n time.sleep(0.3)\n b.getscreen().getcanvas().postscript(file='map1.ps')\n ps = open('map1.ps')\n b.getscreen().bye()\n psimage=Image.open('map1.ps')\n psimage.save('map1.png')\n source = os.path.join(os.getcwd(),'map1.png')\n destination = os.path.join(os.getcwd(),'static','images','map1.png')\n if os.path.exists(destination):\n os.remove(destination)\n dest = shutil.copyfile(source, destination)\n print(\"[+] Movement Map Saved.\")\n b = None\n\n def add_padding(self):\n row_to_be_added = np.zeros((1,self.cols))\n result = np.vstack ((row_to_be_added,self.map) )\n result = np.vstack ((result,row_to_be_added) )\n self.map = result\n self.rows +=2\n self.starting_position[0]+=1\n\n def follow_map(self):\n self.time = datetime.datetime.now()\n self.time_diff = 0\n self.calculate_percent()\n self.distance = 0\n self.map_complete = False\n self.movements = []\n self.update_values()\n self.load_map()\n #self.add_padding()\n self.starting_position = [self.rows//2,self.cols//2]\n self.current_position = self.starting_position\n self.direction = \"NORTH\"\n print(\"[+] Starting position: \",self.current_position)\n self.make_area()\n self.find_leftmost()\n self.find_rightmost()\n self.print_map()\n self.map[self.current_position[0],self.current_position[1]]=7\n sec = 0.5\n rsec = 2.7\n while(not self.leftmost_reached()):\n if(self.direction==\"NORTH\"):\n if(self.map[self.current_position[0]-1,self.current_position[1]]==1):\n self.forward(sec)\n self.movements.append(1)\n self.distance+=7\n self.current_position[0]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n elif(self.map[self.current_position[0]-1,self.current_position[1]]==4):\n self.current_position[0]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.left(rsec)\n self.movements.append(4)\n self.direction = \"WEST\"\n elif(self.map[self.current_position[0]-1,self.current_position[1]]==2):\n self.current_position[0]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.right(rsec)\n self.movements.append(2)\n self.direction = \"EAST\"\n elif(self.direction==\"WEST\"):\n if(self.map[self.current_position[0],self.current_position[1]-1]==1):\n self.forward(sec)\n self.movements.append(1)\n self.distance+=7\n self.current_position[1]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n elif(self.map[self.current_position[0],self.current_position[1]-1]==4):\n self.current_position[1]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.direction = \"SOUTH\"\n self.left(rsec)\n self.movements.append(4)\n elif(self.map[self.current_position[0],self.current_position[1]-1]==2):\n self.current_position[1]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.right(rsec)\n self.movements.append(2)\n self.direction = \"NORTH\"\n elif(self.direction==\"SOUTH\"):\n if(self.map[self.current_position[0]+1,self.current_position[1]]==1):\n self.forward(sec)\n self.movements.append(1)\n self.distance+=7\n self.current_position[0]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n elif(self.map[self.current_position[0]+1,self.current_position[1]]==4):\n self.current_position[0]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.left(rsec)\n self.movements.append(4)\n self.direction = \"EAST\"\n elif(self.map[self.current_position[0]+1,self.current_position[1]]==2):\n self.current_position[0]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.right(rsec)\n self.movements.append(2)\n self.direction = \"WEST\"\n elif(self.direction==\"EAST\"):\n if(self.map[self.current_position[0]+1,self.current_position[1]+1]==1):\n self.current_position[1]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n self.forward(sec)\n self.movements.append(1)\n self.distance+=7\n elif(self.map[self.current_position[0]+1,self.current_position[1]+1]==4):\n self.current_position[1]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.direction = \"NORTH\"\n self.left(rsec)\n self.movements.append(4)\n elif(self.map[self.current_position[0]+1,self.current_position[1]+1]==2):\n self.current_position[1]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.leftmost_reached()):\n break\n self.right(rsec)\n self.movements.append(2)\n self.direction = \"SOUTH\"\n self.time_diff += (datetime.datetime.now() - self.time).seconds\n self.time = datetime.datetime.now()\n self.calculate_percent()\n self.update_values()\n \n print(\"leftmost reached.\")\n flag = 1\n self.final = \"NORTH\"\n while(not self.complete() and (not self.rightmost_reached())):\n while(self.map[self.current_position[0],self.current_position[1]+1]==0):\n self.backward(sec)\n self.movements.append(8)\n self.distance+=7\n if(self.direction==\"NORTH\"):\n self.current_position[0]+=1\n elif(self.direction==\"SOUTH\"):\n self.current_position[0]-=1\n self.stop(0.2)\n dis = self.forward_distance()\n if(dis<10):\n self.backward(sec)\n if(flag==1):\n self.uturn_right(3.8)\n self.movements.append(6)\n flag = 0\n else:\n self.uturn_left(3.8)\n self.movements.append(7)\n flag = 1\n\n self.distance+=15\n self.current_position[1]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n if(self.direction==\"EAST\" or self.direction==\"NORTH\"):\n self.direction = \"SOUTH\"\n elif(self.direction==\"SOUTH\"):\n self.direction = \"NORTH\"\n if(self.direction==\"NORTH\"):\n while(self.map[self.current_position[0]-1,self.current_position[1]]!=0):\n self.forward(sec)\n self.movements.append(1)\n self.distance+=7\n self.current_position[0]-=1\n self.map[self.current_position[0],self.current_position[1]]=7\n elif(self.direction==\"SOUTH\"):\n while(self.map[self.current_position[0]+1,self.current_position[1]]!=0):\n self.forward(sec)\n self.movements.append(1)\n self.distance+=7\n self.current_position[0]+=1\n self.map[self.current_position[0],self.current_position[1]]=7\n\n self.time_diff += (datetime.datetime.now() - self.time).seconds\n self.time = datetime.datetime.now()\n self.calculate_percent()\n self.update_values()\n self.final = self.direction\n \n self.print_map()\n self.time_diff += (datetime.datetime.now() - self.time).seconds\n self.time = datetime.datetime.now()\n self.percent = 100\n self.map_complete = True\n self.update_values()\n global making_map\n making_map = False\n self.save_movements()\n print(\"Map completed\")\n\n def find_leftmost(self):\n for i in range(0,self.cols):\n for j in range(0,self.rows):\n if(self.map[j,i]!=0):\n self.leftmost = [j,i]\n print(\"left most: \",self.leftmost)\n return\n \n def leftmost_reached(self):\n if(self.current_position == self.leftmost):\n return True\n else:\n return False\n \n def find_rightmost(self):\n f = False\n for i in range(self.cols-1,-1,-1):\n for j in range(0,self.rows):\n if(self.map[j,i]!=0):\n self.rightmost = [j,i]\n f = True\n if(f==True):\n print(\"rightmost: \",self.rightmost)\n return\n \n def rightmost_reached(self):\n if(self.current_position == self.rightmost):\n print(\"[+] rightmost reached.\")\n return True\n else:\n return False\n\n def complete(self):\n for i in range(0,self.rows):\n for j in range(0,self.cols):\n if(self.map[i,j]!=0 and self.map[i,j]!=7 ):\n return False\n print(\"[+] completed.\")\n return True\n\n def make_area(self):\n #print(self.rows,self.cols)\n # for i in range(0,self.rows-1):\n # for j in range(0,self.cols):\n # if(self.map[i,j] == 0 and self.map[i,j-1]!=0 and self.map[i-1,j]!=0 and self.map[i-1,j-1]!=0 and self.map[i,j-1]!=4 and self.map[i-1,j]!=4 and self.map[i-1,j-1]!=4):\n # self.map[i,j] = 9\n for i in range(0,self.rows-1):\n for j in range(0,self.cols):\n if(self.map[i,j] == 0 and self.map[i,j-1]!=0 and self.map[i-1,j]!=0 and self.map[i-1,j-1]!=0):\n if(self.map[i-1,j-1]==4 and self.map[i,j-1]!=9 and self.map[i-1,j]!=9):\n pass\n else:\n self.map[i,j] = 9\n # flag = False\n # for i in range(0,self.rows):\n # flag = False \n # for j in range(0,self.cols):\n # if(self.map[i,j]!=0 and flag==False):\n # flag = True\n # elif(self.map[i,j]!=0 and flag==True):\n # flag = False\n # elif(self.map[i,j]==0 and flag==True):\n # self.map[i,j] = 9\n self.print_map()\n # for i in range(0,self.rows):\n # for j in range(0,self.cols):\n # if(self.map[i,j]!=0 and self.map[i,j]!=1):\n # self.map[i,j] = 6 \n\n def forward_distance(self):\n print(\"Forward Distance Measurement In Progress\")\n sleep(2)\n distance = 0\n # GPIO.output(F_TRIG, True)\n # time.sleep(0.00001)\n # GPIO.output(F_TRIG, False)\n\n # while GPIO.input(F_ECHO)==0:\n # pulse_start = time.time()\n\n # while GPIO.input(F_ECHO)==1:\n # pulse_end = time.time()\n\n # pulse_duration = pulse_end - pulse_start\n\n # distance = pulse_duration * 17150\n # distance = round(distance, 2)\n # print(\"Forward Distance:\",distance,\"cm\")\n # GPIO.cleanup()\n return distance\n\n def left_distance(self):\n print(\"Left Distance Measurement In Progress\")\n sleep(2)\n distance = 0\n # GPIO.output(L_TRIG, True)\n # time.sleep(0.00001)\n # GPIO.output(L_TRIG, False)\n\n # while GPIO.input(L_ECHO)==0:\n # pulse_start = time.time()\n\n # while GPIO.input(L_ECHO)==1:\n # pulse_end = time.time()\n # pulse_duration = pulse_end - pulse_start\n\n # distance = pulse_duration * 17150\n # distance = round(distance, 2)\n # print(\"Left Distance:\",distance,\"cm\")\n # #GPIO.cleanup()\n return distance\n \n def right_distance(self):\n print(\"Right Distance Measurement In Progress\")\n sleep(2)\n distance = 0\n\n # GPIO.output(R_TRIG, True)\n # time.sleep(0.00001)\n # GPIO.output(R_TRIG, False)\n\n # while GPIO.input(R_ECHO)==0:\n # pulse_start = time.time()\n\n # while GPIO.input(R_ECHO)==1:\n # pulse_end = time.time()\n # pulse_duration = pulse_end - pulse_start\n\n # distance = pulse_duration * 17150\n # distance = round(distance, 2)\n # print(\"Right Distance:\",distance,\"cm\")\n # #GPIO.cleanup()\n return distance\n \n def update_map(self, move):\n\n if(self.direction==\"NORTH\"):\n self.current_position[0]-=1\n elif(self.direction==\"EAST\"):\n self.current_position[1]+=1\n elif(self.direction==\"SOUTH\"):\n self.current_position[0]+=1\n elif(self.direction==\"WEST\"):\n self.current_position[1]-=1\n \n if(move==\"forward\"):\n if(self.map[self.current_position[0],self.current_position[1]]==3):\n print(\"Starting point reached. Map finished.\")\n global making_map\n making_map = False\n self.values.append(1)\n self.save_map()\n self.map_formed = True\n self.update_values()\n self.make_image()\n return\n self.map[self.current_position[0],self.current_position[1]] = 1\n self.values.append(1)\n elif(move==\"left\"):\n self.map[self.current_position[0],self.current_position[1]] = 4\n self.values.append(4)\n self.turn_robot(\"left\")\n elif(move==\"right\"):\n self.map[self.current_position[0],self.current_position[1]] = 2\n self.values.append(2)\n self.turn_robot(\"right\")\n\n self.print_map()\n\n def turn_robot(self,move):\n if(move==\"left\"):\n if(self.direction==\"EAST\"):\n self.direction = \"NORTH\"\n elif(self.direction==\"SOUTH\"):\n self.direction = \"EAST\"\n elif(self.direction==\"WEST\"):\n self.direction = \"SOUTH\"\n elif(self.direction==\"NORTH\"):\n self.direction = \"WEST\"\n elif(move==\"right\"):\n if(self.direction==\"EAST\"):\n self.direction = \"SOUTH\"\n elif(self.direction==\"SOUTH\"):\n self.direction = \"WEST\"\n elif(self.direction==\"WEST\"):\n self.direction = \"NORTH\"\n elif(self.direction==\"NORTH\"):\n self.direction = \"EAST\"\n\n def uturn_right(self,sec):\n print(\"right utrun\") \n # GPIO.output(in1,GPIO.HIGH)\n # GPIO.output(in2,GPIO.HIGH)\n # GPIO.output(in3,GPIO.HIGH)\n # GPIO.output(in4,GPIO.LOW)\n # sleep(sec)\n # self.stop(0.4)\n # self.backward(0.8)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def uturn_left(self,sec):\n print(\"left utrun\") \n # GPIO.output(in1,GPIO.LOW)\n # GPIO.output(in2,GPIO.HIGH)\n # GPIO.output(in3,GPIO.HIGH)\n # GPIO.output(in4,GPIO.HIGH)\n # sleep(sec)\n # self.stop(0.4)\n # self.backward(0.8)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def forward(self,sec):\n print(\"forward\") \n # GPIO.output(in1,GPIO.LOW)\n # GPIO.output(in2,GPIO.HIGH)\n # GPIO.output(in3,GPIO.HIGH)\n # GPIO.output(in4,GPIO.LOW)\n # sleep(sec)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def backward(self,sec):\n print(\"backward\")\n # GPIO.output(in1,GPIO.HIGH)\n # GPIO.output(in2,GPIO.LOW)\n # GPIO.output(in3,GPIO.LOW)\n # GPIO.output(in4,GPIO.HIGH)\n # sleep(sec)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def left(self,sec):\n print(\"left\") \n # GPIO.output(in1,GPIO.LOW)\n # GPIO.output(in2,GPIO.HIGH)\n # GPIO.output(in3,GPIO.HIGH)\n # GPIO.output(in4,GPIO.HIGH)\n # sleep(sec)\n # self.stop(0.3)\n # self.backward(0.5)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def back_right(self,sec):\n print(\"back right\")\n # GPIO.output(in1,GPIO.HIGH)\n # GPIO.output(in2,GPIO.HIGH)\n # GPIO.output(in3,GPIO.LOW)\n # GPIO.output(in4,GPIO.HIGH)\n # sleep(sec)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def back_left(self,sec):\n print(\"back left\")\n # GPIO.output(in1,GPIO.HIGH)\n # GPIO.output(in2,GPIO.LOW)\n # GPIO.output(in3,GPIO.HIGH)\n # GPIO.output(in4,GPIO.LOW)\n # sleep(sec)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def right(self,sec):\n print(\"right\")\n # GPIO.output(in1,GPIO.HIGH)\n # GPIO.output(in2,GPIO.HIGH)\n # GPIO.output(in3,GPIO.HIGH)\n # GPIO.output(in4,GPIO.LOW)\n # sleep(sec)\n # self.stop(0.3)\n # self.backward(0.5)\n # self.stop(0.3)\n #GPIO.cleanup()\n\n def stop(self,sec):\n print(\"stop\")\n # GPIO.output(in1,GPIO.LOW)\n # GPIO.output(in2,GPIO.LOW)\n # GPIO.output(in3,GPIO.LOW)\n # GPIO.output(in4,GPIO.LOW)\n # sleep(sec)\n #GPIO.cleanup()\n\n def cleanup(self):\n print(\"Cleaning up\")\n # GPIO.cleanup()\n \n def print_map(self):\n print(\"current direction: \", self.direction)\n print(self.map)\n\n def comparedistance():\n print(\"Comparing distance\")\n leftd = self.left_distance()\n rightd = self.right_distance()\n if(leftd>rightd):\n print(\"moving left\")\n self.left(0.3)\n elif(rightd>leftd):\n print(\"moving right\")\n self.right(0.3)\n else:\n print(\"equal\")\n self.right(0.3)\n self.stop(0.5)\n \n def obstacle_avoidance():\n distance = self.forward_distance()\n if(distance<20):\n print(\"changing path\")\n self.changepath()\n sleep(0.5)\n\n def changepath():\n self.stop(0.5)\n self.backward(0.3)\n self.stop(0.2)\n self.right(2)\n self.stop(0.2)\n self.forward(0.6)\n self.stop(0.3)\n self.forward(0.3)\n while(True):\n self.stop(0.2)\n f = self.forward_distance()\n l = self.left_distance()\n if(l>25):\n print(\"left greater\")\n self.backward(0.2)\n self.stop(0.3)\n self.left(2.5)\n self.stop(0.2)\n self.forward(0.6)\n self.stop(0.2)\n elif(f>20):\n print(\"forward greater\")\n self.forward(0.3)\n self.stop(0.2)\n else:\n print(\"else\")\n self.backward(0.3)\n self.stop(0.2)\n self.right(3)\n self.stop(0.2)\n self.forward(0.3)\n self.stop(0.2)\n\nrobot = ROBOT(\"NORTH\",20,20)\n\n\n@app.route('/')\ndef index():\n return render_template('index1.html')\n\n############################connect#######\n@socketio.on('connect')\ndef test_connect():\n print(\"Connecting..\")\n emit('connection_response','Connection established')\n @socketio.on ('connection')\n def on_connection(c_res):\n print(\"connection made\")\n if(robot is None):\n robot = ROBOT(\"NORTH\",20,20)\n #init()\n print(c_res)\n\n@socketio.on('alarm')\ndef alarm():\n print(\"Alarm Reached.\")\n robot.follow_map()\n\n@socketio.on('start_cleaning')\ndef start_cleaning():\n print(\"Starting Cleaning.\")\n robot.follow_map()\n\n@socketio.on('finish_cleaning')\ndef alarm():\n print(\"Cleaning Finished.\")\n robot.stop(0.5)\n if(robot.map_complete==True):\n emit('finish_response','true')\n else:\n emit('finish_response','false') \n\n@socketio.on('disconnect')\ndef test_disconnect():\n print('Client disconnected')\n\n@socketio.on('reset')\ndef on_forward_con(*args):\n print(\"reset\")\n robot.reset_map()\n\n@socketio.on('making_map')\ndef on_forward_con(*args):\n print(\"making map\")\n global making_map\n making_map = True\n robot.reset_map()\n\n@socketio.on('saving_map')\ndef on_forward_con(*args):\n print(\"saving map\")\n global making_map\n making_map = False\n\n##########Controller Control ###############\n\n@socketio.on('forward_con')\ndef on_forward_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"forward\")\n robot.forward(0.5)\n robot.update_map(\"forward\") \n if(robot.map_complete==True):\n print(\"completed.\")\n emit('made','true')\n\n@socketio.on('right_con')\ndef on_right_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"right\")\n robot.update_map(\"right\")\n robot.right(2)\n else:\n print(\"no action.\")\n\n@socketio.on('left_con')\ndef on_left_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"left\")\n robot.update_map(\"left\")\n robot.left(2)\n else:\n print(\"no action.\")\n\n@socketio.on('right_uturn_con')\ndef on_left_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"uturn right\")\n #robot.update_map(\"left\")\n robot.uturn_right(3.8)\n else:\n print(\"no action.\")\n\n@socketio.on('left_uturn_con')\ndef on_left_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"uturn left\")\n #robot.update_map(\"left\")\n robot.uturn_left(3.8)\n else:\n print(\"no action.\")\n\n@socketio.on('back_con')\ndef on_back_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"back\")\n robot.backward(0.5)\n else:\n print(\"no action.\")\n\n@socketio.on('stop_con')\ndef on_stop_con(*args):\n global making_map\n if(making_map==True and robot.map_complete==False):\n print(\"stop\")\n robot.stop(0.5)\n else:\n print(\"no action.\")\n\n@socketio.on('get_info')\ndef info():\n print(\"Getting info.\")\n with open(os.path.join(os.getcwd(),'values.json')) as f:\n data = json.load(f)\n if(data[\"map_complete\"]==\"true\"):\n data[\"percent\"] = 100\n emit('cleaning_info',data)\n else:\n emit('cleaning_info',data)\n\nif __name__ == '__main__':\n socketio.run(app,host='0.0.0.0',port=5091)\n","sub_path":"raspberrypi/navigation/mapping/m.py","file_name":"m.py","file_ext":"py","file_size_in_byte":31807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"175388981","text":"from ...biotools import annotate_record\n\n\nclass FragmentsMixin:\n \"\"\"Mixin for AssemblyMix\"\"\"\n\n\n fragment_annotation_color = \"white\"\n\n @property\n def filtered_fragments(self):\n \"\"\"Return the fragments of the mix passing all the tests\n\n Generally used to remove fragments containing a restriction site used\n in a Type2S assembly.\n \"\"\"\n return [\n f\n for f in (self.fragments + self.reverse_fragments)\n if all([fl(f) for fl in self.fragment_filters])\n ]\n\n def compute_reverse_fragments(self):\n \"\"\"Precompute self.reverse_fragments.\n\n This method also marks all \"direct\" fragments in the mix as\n `fragment.is_reversed=True` and all \"reverse\" fragments as\n `fragment.is_reversed=False`.\n \"\"\"\n self.reverse_fragments = []\n for fragment in self.fragments:\n fragment.is_reversed = False\n new_fragment = fragment.reverse_complement()\n new_fragment.is_reversed = True\n new_fragment.reverse_fragment = fragment\n fragment.reverse_fragment = new_fragment\n new_fragment.original_part = fragment.original_part\n self.reverse_fragments.append(new_fragment)\n\n def annotate_fragment_with_part(self, fragment):\n part = fragment.original_part.id\n if self.annotate_fragments_with_parts:\n annotate_record(\n fragment,\n feature_type=\"misc_feature\",\n source=part,\n indicates_part=True,\n note=\"From \" + part,\n color=self.fragment_annotation_color,\n ApEinfo_fwdcolor=self.fragment_annotation_color,\n )\n","sub_path":"dnacauldron/AssemblyMix/mixins/FragmentsMixin.py","file_name":"FragmentsMixin.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"269671909","text":"import nltk\nfrom nltk.corpus import conll2000\nimport os\n\nclass UnigramChunker(nltk.ChunkParserI):\n\n def __init__(self, train_sents):\n train_data = [[ (t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]\n for sent in train_sents]\n self.tagger = nltk.UnigramTagger(train_data)\n def parse(self, sentence):\n pos_tags = [pos for (word, pos) in sentence]\n tagged_pos_tags = self.tagger.tag(pos_tags)\n chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]\n conlltags = [(word, pos, chunktag) for ((word, pos), chunktag)\n in zip (sentence, chunktags)]\n return nltk.chunk.conlltags2tree(conlltags)\n\ndef tags_since_dt(sentence, i):\n tags = set()\n for word, pos in sentence[:i]:\n if (pos == 'DT'):\n tags = set()\n else:\n tags.add(pos)\n return '+'.join(sorted(tags))\n\ndef npchunk_features(sentence, i , history):\n word, pos = sentence[i]\n if(i==0):\n prevword, prevpos = \"\", \"\"\n else:\n prevword, prevpos = sentence[i-1]\n if i == len(sentence) - 1:\n nextword, nextpos = \"\", \"\"\n else:\n nextword, nextpos = sentence[i+1]\n return {\"pos\":pos, \"word\":word, \"prevpos\":prevpos, \"nextpos\":nextpos, \"prevpos+pos\": \"%s+%s\" % (prevpos, pos),\n \"pos+nextpos\": \"%s+%s\" % (pos, nextpos), \"tags-since-dt\": tags_since_dt(sentence, i)}\n\nclass ConsecutiveNPChunkTagger(nltk.TaggerI):\n\n def __init__(self, train_sents):\n train_set = []\n for tagged_sent in train_sents:\n untagged_sent = nltk.tag.untag(tagged_sent)\n history = []\n for i, (word, tag) in enumerate(tagged_sent):\n featureset = npchunk_features(untagged_sent, i, history)\n train_set.append( (featureset, tag) )\n history.append(tag)\n self.classifier = nltk.MaxentClassifier.train(train_set, trace=0)\n\n def tag(self, sentence):\n history = []\n for i, word in enumerate(sentence):\n featureset = npchunk_features(sentence, i, history)\n tag = self.classifier.classify(featureset)\n history.append(tag)\n return zip(sentence, history)\n\n\nclass ConsecutiveNPChunker(nltk.ChunkParserI):\n def __init__(self, train_sents):\n tagged_sents = [[((w,t),c) for (w,t,c) in\n nltk.chunk.tree2conlltags(sent)]\n for sent in train_sents]\n self.tagger = ConsecutiveNPChunkTagger(tagged_sents)\n\n def parse(self, sentence):\n tagged_sents = self.tagger.tag(sentence)\n conlltags = [(w,t,c) for ((w,t),c) in tagged_sents]\n return nltk.chunk.conlltags2tree(conlltags)\n\n def tokenization(text):\n sentences = nltk.sent_tokenize(text)\n words = [nltk.word_tokenize(sent) for sent in sentences]\n return words\n\n def pos_tag_tokenization(text):\n words = self.tokenization(text)\n return [nltk.pos_tag(sent) for sent in words]\n \n\nif __name__==\"__main__\":\n main() ","sub_path":"DataExtraction.py","file_name":"DataExtraction.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"57757242","text":"\nfrom operations import LDI,PRN,TEST1,TEST2,TEST3,TEST4,TEST5,CMP,JEQ,JNE,JMP,HLT,NOT\nimport sys\nfrom itertools import dropwhile\n\nclass LS8:\n def __init__(self):\n self.pc = 0\n self.ram = [0]*256 #ls8 is a 8 bit processor, at most, it can process a total of 2^8 = 256 bytes in memory\n self.reg = [0]*8 #ls8 has 8 registers for usege\n self.sp = -1 #initial stack pointer at the end of the ram\n self.fl = 0b00000000 #00000LGE L = Less, G = Greater E = Equal\n\n def __validate__(self):\n if len(sys.argv) != 2:\n raise IOError('cannot load file. not specified')\n sys.exit()\n try:\n filename = sys.argv[1]\n program = open(filename, 'r')\n except IOError:\n print('could not open/read file', filename)\n raise IOError\n sys.exit()\n return [*program]\n \n def load(self):\n program = self.__validate__()\n address = 0\n instructions = []\n\n program = [*dropwhile(lambda l: l.startswith('#') or l == '\\n' ,program)]\n bytes = [b.split(' #')[0].strip() if '#' in b else b.strip() for b in program]\n bytes = [*filter(lambda x: '#' not in x, bytes)]\n \n for b in bytes:\n instructions.append(int(b,2))\n \n for byte in instructions:\n self.ram[address] = byte\n address += 1\n\n # def load(self):\n # program = self.__validate__()\n # address = 0\n # instructions = []\n \n # while True:\n # line = program.readline()\n # if not line.startswith('#'):\n # break\n # print('first line',line)\n\n # byte,add = line.split('#')\n # print(byte)\n # instructions.append(int(byte,2))\n # for line in program:\n # print('line', line)\n # if line == '\\n':\n # continue\n # if '#' in line:\n # byte,comment = line.split(' #')\n # instructions.append(int(byte.strip(),2))\n # else:\n # instructions.append(int(line.strip(),2))\n # print('instructions', instructions)\n # for byte in instructions:\n # self.ram[address] = byte\n # address += 1\n # print('ram after loading', self.ram)\n\n # def load(self):\n # program = self.__validate__()\n # address = 0\n # instructions = []\n\n # for line in program:\n # if line.startswith('#'):\n # continue\n # if line == '\\n':\n # continue\n # if '#' in line:\n # byte,comment = line.split(' #')\n # instructions.append(int(byte.strip(),2))\n # else:\n # instructions.append(int(line.strip(),2))\n # print('instructions', instructions)\n # for byte in instructions:\n # self.ram[address] = byte\n # address += 1\n \n def not_bitwise(self,n):\n binary = f\"{n:08b}\"\n bin_list = [int(b) for b in iter(binary)]\n # flipped = [1 for b in bin_list if b == 0 else 0 for b in bin_list]\n flipped = ['1' if b == 0 else '0' for b in bin_list]\n notted = \"\".join(flipped)\n return int(notted,2)\n\n def ldi(self,r_address,val): #load immediate (into a cpu register)\n self.reg[r_address] = val\n # print('LDI complete. register:', self.reg)\n \n def alu(self,op,reg_a,reg_b=0):\n if op == 'CMP':\n if self.reg[reg_a] - self.reg[reg_b] < 0: #reg_a < reg_b\n self.fl = 0b00000100 #3\n # print(self.reg[reg_a],' < ', self.reg[reg_b])\n elif self.reg[reg_a] - self.reg[reg_b] > 0: #reg_a > reg_b\n self.fl = 0b00000010 #2\n print(self.reg[reg_a], ' > ', self.reg[reg_b])\n elif self.reg[reg_a] - self.reg[reg_b] == 0:\n self.fl = 0b00000001 #1\n # print(self.reg[reg_a], ' = ', self.reg[reg_b])\n else:\n # print(f'error occured comparing {self.reg[reg_a]} and {self.reg[reg_b]}')\n raise Exception\n sys.exit()\n elif op == 'ADD':\n self.reg[reg_a] += self.reg[reg_b]\n elif op == 'AND':\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n elif op == 'OR':\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n elif op == 'NOT':\n # self.reg[reg_a] = ~self.reg[reg_a]\n self.reg[reg_a] = self.not_bitwise(self.reg[reg_a])\n print(self.reg[reg_a])\n print(self.reg) \n\n def push(self,pc):\n self.ram[self.sp] = pc\n self.sp -= 1\n # print('ram after pushing', self.ram)\n \n def pop(self):\n self.sp += 1\n popped = self.ram[self.sp]\n self.ram[self.sp] = 0\n return popped\n\n def run(self):\n self.load()\n halted = False\n\n while halted == False:\n instruction = self.ram[self.pc]\n # print(f'instruction', instruction, 'pc:', self.pc)\n\n if instruction == LDI:\n r_address = self.ram[self.pc + 1]\n val = self.ram[self.pc + 2]\n self.ldi(r_address,val)\n self.pc += 3\n \n elif instruction == PRN:\n r_address = self.ram[self.pc + 1]\n print(self.reg[r_address])\n self.pc += 2\n \n elif instruction == CMP:\n reg_a = self.ram[self.pc + 1]\n reg_b = self.ram[self.pc + 2]\n self.alu('CMP', reg_a, reg_b)\n self.pc += 3\n \n elif instruction == JEQ: #jump to register if equal\n # print('in JEQ')\n if self.fl == 0b00000001: #equal bit flagged\n self.push(self.pc)\n r_address = self.ram[self.pc + 1]\n self.pc = self.reg[r_address]\n else:\n # print('fl not equal')\n self.pc += 2\n elif instruction == NOT:\n reg_a = self.ram[self.pc + 1]\n self.alu('NOT',reg_a)\n self.pc += 2\n \n elif instruction == TEST1:\n # print('in TEST1')\n self.pc += 1 #evaulate the subroutine\n elif instruction == TEST2:\n # print('in TEST2')\n self.pc += 1\n elif instruction == TEST3:\n self.pc += 1\n elif instruction == TEST4:\n self.pc += 1\n elif instruction == TEST5:\n self.pc += 1\n \n elif instruction == JNE: #jump to register if not equal\n # print('in JNE')\n if self.fl != 0b00000001:\n # print('last comparison values not equal JNE')\n self.push(self.pc)\n r_address = self.ram[self.pc + 1]\n self.pc = self.reg[r_address]\n else:\n self.pc += 2\n \n elif instruction == JMP:\n # print('in JMP')\n self.push(self.pc)\n r_address = self.ram[self.pc + 1]\n self.pc = self.reg[r_address]\n \n elif instruction == HLT:\n self.pc += 1\n halted = True\n sys.exit()\n \n else:\n print(f\"error occured at program counter index: {self.pc}\")\n raise IndexError\n sys.exit()\n\nls8 = LS8()\nls8.run()\n\n\n ","sub_path":"ls8.py","file_name":"ls8.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"634673876","text":"\"\"\"\nThe data generator for the SBAC project.\n\n\"\"\"\n\nimport argparse\nimport datetime\n\nfrom datagen.worker_manager import WorkerManager\n\nif __name__ == '__main__':\n # Argument parsing for task-specific arguments\n parser = argparse.ArgumentParser(description='SBAC data generation utility.',\n epilog='Example arguments:' +\n '\\n --state_type devel --gen_iab --gen_item --pkg-source ./in/iabs.csv'\n '\\n --state_type tiny --gen_sum ./in/20*.csv'\n '\\n --state_type california --gen_sum --gen_ica ./in/20*.csv'\n )\n\n parser.add_argument('-sn', '--state_name', dest='state_name', action='store', default='California', help='The name of the state (default=California)')\n parser.add_argument('-sc', '--state_code', dest='state_code', action='store', default='CA', help='The code of the state to generate data for')\n parser.add_argument('-st', '--state_type', dest='state_type', action='store', default='tiny', help='Specify the type of state to generate data for')\n\n parser.add_argument('-hier', '--hier_source', dest='hier_source', action='store', default='generate', help='Source of hierarchy, either \\'generate\\' or a CSV pathname, e.g. ./in/hierarchy.csv')\n parser.add_argument('-sub', '--subject_source', dest='subject_source', action='store', default='generate', help='Source of subject definitions files, either \\'generate\\' or a glob expression matching files, e.g. ./in/*_subject.xml')\n parser.add_argument('-pkg', '--pkg_source', dest='pkg_source', action='store', help='Source of assessment packages, a glob expression matching files, e.g. ./in/20*.csv')\n\n group = parser.add_argument_group('outcomes')\n group.add_argument('-gsum', '--gen_sum', dest='gen_sum', action='store_true', default=False, help='Generate summative outcomes')\n group.add_argument('-gica', '--gen_ica', dest='gen_ica', action='store_true', default=False, help='Generate ICA outcomes')\n group.add_argument('-giab', '--gen_iab', dest='gen_iab', action='store_true', default=False, help='Generate IAB outcomes')\n group.add_argument('-gitem', '--gen_item', dest='gen_item', action='store_true', default=False, help='Generate item level data')\n\n parser.add_argument('-o', '--out_dir', dest='out_dir', action='store', default='out', help='Specify the root directory for writing output files to')\n # since there is only a single output format right now, default it to true for convenience\n parser.add_argument('-xo', '--xml_out', dest='xml_out', action='store_true', default=True, help='Output data to (TRT) XML')\n\n args, unknown = parser.parse_known_args()\n\n if not args.xml_out:\n print('Please specify at least one output format')\n print(' --xml_out Output (TRT) XML')\n exit()\n\n if not args.pkg_source:\n print('Please specify the source for assessment packages, e.g.')\n print(' --pkg_source ./in/*.csv')\n exit()\n\n if not (args.gen_sum or args.gen_ica or args.gen_iab):\n print('No assessment package types selected. Please specify at least one')\n print(' --gen_sum Summative assessment package')\n print(' --gen_ica Interim comprehensive assessment (ICA) package')\n print(' --gen_iab Interim assessment block (IAB) package')\n exit()\n\n worker = WorkerManager(args)\n\n # Record current (start) time\n tstart = datetime.datetime.now()\n\n worker.prepare()\n worker.run()\n worker.cleanup()\n\n # Record now current (end) time\n tend = datetime.datetime.now()\n\n # Print statistics\n print()\n print('Run began at: {}'.format(tstart))\n print('Run ended at: {}'.format(tend))\n print('Run run took: {}'.format(tend - tstart))\n print()\n","sub_path":"datagen/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"580953605","text":"#!/usr/bin/env python3\n\nimport os\nimport re\n\n\nclass PageMethods:\n def __init__(self, path):\n self.path = path\n\n def resource(self, resource):\n return os.path.relpath(resource, self.path)\n\n def header(self, title, menu_hl=None, nodiv=False, black_bg=False):\n bodystyle = \" style=\\\"background: #000; color: #fff;\\\"\" if black_bg else \"\"\n\n r = \"\"\"\n\n\n\n\"\"\" + title + \"\"\" | M-Labs\n\n\n\n\n
\n \"\"\"\n if not nodiv:\n r += \"
\"\n\n return r\n\n def footer(self, nodiv=False):\n d = \"\" if nodiv else \"
\\n\"\n return d + \"\"\"\n\n\"\"\"\n\n def get_globals(self):\n return {\n \"header\": self.header,\n \"footer\": self.footer,\n \"resource\": self.resource\n }\n\n\ndef process(path, name_in, name_out):\n fullname = os.path.join(path, name_in)\n print(\"processing\", fullname)\n with open(fullname, \"r\") as infile:\n indata = infile.read()\n\n pm = PageMethods(path)\n outdata = \"\"\n # FIXME: this is just a buggy quick hack\n splits = re.split(\"({{{[^}]+}}})\", indata)\n for split in splits:\n if split and split[:3] == \"{{{\":\n outdata += eval(split[3:-3], pm.get_globals())\n else:\n outdata += split\n\n with open(os.path.join(path, name_out), \"w\") as outfile:\n outfile.write(outdata)\n\n\ndef main():\n for root, dirs, files in os.walk(\".\"):\n for file in files:\n if file.endswith(\".page\"):\n process(root, file, file[:-5] + \".html\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"614798866","text":"import os\nimport shutil\n\nimport cv2\nfrom PIL import Image\n\nDATASET_ROOT_PATH=\"data/dataset/fddb/\"\n\nOUTPUT_LABEL=\"annotations_darknet\"\nOUTPUT_BASE_PATH=DATASET_ROOT_PATH+\"FDDB-folds/\"+OUTPUT_LABEL\n\nTRAIN_FILES_PATH = DATASET_ROOT_PATH+\"/\"+'train'+\".txt\"\nTEST_FILES_PATH = DATASET_ROOT_PATH+\"/\"+'test'+\".txt\"\n\nif(not os.path.exists(OUTPUT_BASE_PATH)):\n\tos.mkdir(OUTPUT_BASE_PATH)\n\nif(os.path.exists(TRAIN_FILES_PATH)):\n\tos.remove(TRAIN_FILES_PATH)\n\tprint(\"usunieto plik train\")\n\nif(os.path.exists(TEST_FILES_PATH)):\n\tos.remove(TEST_FILES_PATH)\n\tprint(\"usunieto plik test\")\n\nfiles_in_OUTPUT_BASE_PATH=os.listdir(OUTPUT_BASE_PATH)\nfor item in files_in_OUTPUT_BASE_PATH:\n if item.endswith(\".jpg\"):\n os.remove(os.path.join(OUTPUT_BASE_PATH, item))\n\nfile_no=0\n\n\nwh = [] #relative box size in comparision to image - it will be used in clusterin after loop is end\nproblem_nr = 0\nfor list in range(1,11):\n\tlist2=str(list)\n\tif list<10:\n\t\tlist2=\"0\"+str(list)\n\tpath=DATASET_ROOT_PATH+\"FDDB-folds/FDDB-fold-\"+str(list2)+\"-ellipseList.txt\"\n\tlines=open(path).readlines()\n\n\tline_no=0\n\t\n\twhile True:\n\t\tif line_no>=len(lines):\n\t\t\tbreak\n\t\t\n\t\tline=lines[line_no]\n\t\tline_no=line_no+1\n\n\t\tfile_path=line.replace(\"\\n\",\"\")\n\t\timage_path=DATASET_ROOT_PATH+file_path+\".jpg\"\n\t\tfile_no=file_no+1\n\t\n\t\timage_test = Image.open(image_path)\t\t\t\n\t\t\n\t\timage=cv2.imread(image_path, cv2.IMREAD_UNCHANGED)\n\t\timagew=image.shape[1]\n\t\timageh=image.shape[0]\n\n\t\tcopy_path=OUTPUT_BASE_PATH+\"/\"+str(file_no)+\".jpg\"\n\t\trelative_path=\"dataset/fddb/FDDB-folds/\"+OUTPUT_LABEL+\"/\"+str(file_no)+\".jpg\"\n\n\t\tif image_test.mode == 'RGB':\n\t\t\tshutil.copyfile(image_path, copy_path)\n\t\t\n\t\tline_n=int(lines[line_no])\n\t\tline_no=line_no+1\n\n\t\tif line_no%10 != 0:\n\t\t\tannotation_path=TRAIN_FILES_PATH\n\t\telse:\n\t\t\tannotation_path=TEST_FILES_PATH\n\n\t\tf_annotation=open(annotation_path,mode=\"a\")\n\t\tstring_to_write = ''\n\t\tnumber_of_pos_to_write = 0\n\t\t\n\t\tfor i in range(line_n):\n\t\t\tline=lines[line_no]\n\t\t\tline_no=line_no+1\n\n\t\t\tdata=line.split(\" \")\n\t\t\tmajor_axis_radius=float(data[0])\n\t\t\tminor_axis_radius=float(data[1])\n\t\t\tangle=float(data[2])\n\t\t\tcenter_x=float(data[3])\n\t\t\tcenter_y=float(data[4])\n\t\t\t\n\t\t\tx=center_x\n\t\t\ty=center_y\n\n\t\t\tw=minor_axis_radius*2\n\t\t\th=major_axis_radius*2\n\t\t\tif image_test.mode == 'RGB':\n\t\t\t\twh.append([w/imagew, h/imageh]) # add relative size of bounding boxes for clustering\n\t\t\tcategory=0\n\t\t\tx_min, y_min = int(x-w//2), int(y-h//2)\n\t\t\tx_max, y_max = int(x+w//2), int(y+h//2)\n\t\t\tproblem_encouter = False\n\t\t\tif 0 number:count\n\n dict_count = {}\n for number in A:\n if dict_count.get(number, None) is not None:\n dict_count[number] += 1\n else:\n dict_count[number] = 1\n\n result = []\n for number in dict_count:\n if dict_count[number] == 2:\n result.append(number)\n\n #sum of AP = n(n+1)/2\n sum = 0\n for number in A:\n sum += number\n #remove extra element from sum\n sum = sum - result[0]\n actual_sum = len(A)*(len(A)+1)/2\n\n result.append(actual_sum-sum)\n return result\n\na = Solution()\na.repeatedNumber([3, 1, 2, 5, 3])\n","sub_path":"algorithms_arrays/repeat_and_missing_number_array.py","file_name":"repeat_and_missing_number_array.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"106385347","text":"import os\nimport discord\nfrom discord import Game\nimport discord.ext\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions, MissingPermissions\nimport lqtgenerator\n\nTOKEN = os.environ['LUQUITO_BOT']\n\nbot = commands.Bot(command_prefix='!')\n\n\ndef check_message(m):\n if m.author == bot.user:\n return True\n\n if m.content == '!frasetts':\n return True\n\n if m.content == '!frase':\n return True\n\n if m.content == '!jogo':\n return True\n\n if m.content == '!clear':\n return True\n\n return False\n\n\n@bot.command(name='clear')\n@has_permissions(manage_channels=True)\nasync def clear(ctx):\n await ctx.channel.purge(limit=100, check=check_message)\n\n\n@clear.error\nasync def clear_error(ctx, error):\n text = 'Sem permissão, irmão'\n await ctx.send(text)\n\n\n@bot.command(name='frase')\nasync def frase(ctx):\n msg = lqtgenerator.gera_frase()\n await ctx.send(msg)\n\n\n@bot.command(name='frasetts')\nasync def frasetts(ctx):\n msg = lqtgenerator.gera_frase()\n await ctx.send(content=msg, tts=True)\n\n\n@bot.command(name='jogo')\nasync def jogo(ctx):\n game = discord.Game(lqtgenerator.gera_jogo())\n await bot.change_presence(activity=game)\n\nbot.run(TOKEN)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"261334533","text":"import tkinter as tk\nfrom PIL import ImageTk\nfrom PIL import Image\nimport time\n\n\nclass Env:\n def __init__(self):\n self.grid_size = 100\n self.win = tk.Tk()\n self.pic_player, self.pic_diamond, self.pic_boom1, self.pic_boom2, self.pic_boom3, self.pic_boom4,self.pic_boom5,self.pic_boom6,self.pic_boom7,self.pic_boom8,self.pic_boom9,self.pic_boom10= self.__load_img()\n self.__init_win()\n self.canvas = self.__init_rc()\n self.texts = self.__produce_text()\n self.canvas.pack()\n # self._init_test_case()\n # self.win.mainloop()\n\n def __init_win(self):\n self.win.title('Grid World')\n # self.win.geometry(\"500x300\")\n\n def __init_rc(self):\n canvas = tk.Canvas(self.win, width=500, height=720, bg='white')\n for h in range(5):\n for v in range(5):\n canvas.create_rectangle(self.grid_size * v, self.grid_size * h, self.grid_size * (v + 1),\n self.grid_size * (h + 1))\n trans_pixel = int(self.grid_size / 2)\n self.player = canvas.create_image(trans_pixel + self.grid_size * 0, trans_pixel + self.grid_size * 0,\n image=self.pic_player)\n self.diamond = canvas.create_image(trans_pixel + self.grid_size * 4, trans_pixel + self.grid_size * 4,\n image=self.pic_diamond)\n self.boom1 = canvas.create_image(trans_pixel + self.grid_size * 1, trans_pixel + self.grid_size * 1,\n image=self.pic_boom1)\n self.boom2 = canvas.create_image(trans_pixel + self.grid_size * 3, trans_pixel + self.grid_size * 1,\n image=self.pic_boom2)\n self.boom3 = canvas.create_image(trans_pixel + self.grid_size * 1, trans_pixel + self.grid_size * 3,\n image=self.pic_boom3)\n self.boom4 = canvas.create_image(trans_pixel + self.grid_size * 3, trans_pixel + self.grid_size * 3,\n image=self.pic_boom4)\n self.boom5 = canvas.create_image(trans_pixel + self.grid_size * 1, trans_pixel + self.grid_size * 2,\n image=self.pic_boom5)\n self.boom6 = canvas.create_image(trans_pixel + self.grid_size * 3, trans_pixel + self.grid_size * 2,\n image=self.pic_boom6)\n self.boom7 = canvas.create_image(trans_pixel + self.grid_size * 0, trans_pixel + self.grid_size * 2,\n image=self.pic_boom7)\n self.boom8 = canvas.create_image(trans_pixel + self.grid_size * 1, trans_pixel + self.grid_size * 4,\n image=self.pic_boom8)\n self.boom9 = canvas.create_image(trans_pixel + self.grid_size * 2, trans_pixel + self.grid_size * 3,\n image=self.pic_boom9)\n self.boom10 = canvas.create_image(trans_pixel + self.grid_size * 3, trans_pixel + self.grid_size * 4,\n image=self.pic_boom10)\n return canvas\n\n def __load_img(self):\n pic_resize = int(self.grid_size / 2)\n player = ImageTk.PhotoImage(Image.open(\"p.png\").resize((pic_resize, pic_resize)))\n diamond = ImageTk.PhotoImage(Image.open(\"captured.png\").resize((pic_resize, pic_resize)))\n boom1 = ImageTk.PhotoImage(Image.open('z.png').resize((pic_resize, pic_resize)))\n boom2 = ImageTk.PhotoImage(Image.open('z.png').resize((pic_resize, pic_resize)))\n boom3 = ImageTk.PhotoImage(Image.open('z.png').resize((pic_resize, pic_resize)))\n boom4 = ImageTk.PhotoImage(Image.open('z.png').resize((pic_resize, pic_resize)))\n boom5 = ImageTk.PhotoImage(Image.open('l.png').resize((pic_resize, pic_resize)))\n boom6= ImageTk.PhotoImage(Image.open('f.png').resize((pic_resize, pic_resize)))\n boom7 = ImageTk.PhotoImage(Image.open('g.png').resize((pic_resize, pic_resize)))\n boom8 = ImageTk.PhotoImage(Image.open('x.png').resize((pic_resize, pic_resize)))\n boom9 = ImageTk.PhotoImage(Image.open('xz.png').resize((pic_resize, pic_resize)))\n boom10 = ImageTk.PhotoImage(Image.open('s.png').resize((pic_resize, pic_resize)))\n return player, diamond, boom1, boom2, boom3, boom4,boom5,boom6,boom7,boom8,boom9,boom10\n\n def __produce_text(self):\n texts = []\n x = self.grid_size / 2\n y = self.grid_size / 6\n for h in range(5):\n for v in range(5):\n up = self.canvas.create_text(x + h * self.grid_size, y + v * self.grid_size, text=0)\n down = self.canvas.create_text(x + h * self.grid_size, self.grid_size - y + v * self.grid_size, text=0)\n left = self.canvas.create_text(y + h * self.grid_size, x + v * self.grid_size, text=0)\n right = self.canvas.create_text(self.grid_size - y + h * self.grid_size, x + v * self.grid_size, text=0)\n texts.append({\"up\": up, \"down\": down, \"left\": left, \"right\": right})\n return texts\n\n def _win_d_update(self):\n self.win.update()\n time.sleep(0.1)\n\n\nclass GridWorld(Env):\n def __init__(self):\n super().__init__()\n self._win_d_update()\n\n def player_move(self, x, y):\n # x横向移动向右,y纵向移动向下\n self.canvas.move(self.player, x * self.grid_size, y * self.grid_size)\n self._win_d_update()\n\n def reset(self):\n # 重置为起始位置\n x, y = self.canvas.coords(self.player)\n self.canvas.move(self.player, -x + self.grid_size / 2, -y + self.grid_size / 2)\n self._win_d_update()\n return self.get_state(self.player)\n\n def get_state(self, who):\n x, y = self.canvas.coords(who)\n state = [int(x / self.grid_size), int(y / self.grid_size)]\n return state\n\n def update_val(self, num, arrow, val):\n pos = num[0] * 5 + num[1]\n x, y = self.canvas.coords(self.texts[pos][arrow])\n self.canvas.delete(self.texts[pos][arrow])\n self.texts[pos][arrow] = self.canvas.create_text(x, y, text=val)\n # self._win_d_update()\n\n def exec_calc(self, action):\n # 执行一次决策\n feedback = 'alive' # alive, stop, dead 分别对应通过,撞墙,炸死\n next_state = []\n next_h, next_v, reward = 0.0, 0.0, 0.0\n h, v = self.get_state(self.player)\n if action == 0: # up\n next_h = h\n next_v = v - 1\n # self.player_move(0, -1)\n elif action == 1: # down\n next_h = h\n next_v = v + 1\n # self.player_move(0, 1)\n elif action == 2: # left\n next_h = h - 1\n next_v = v\n # self.player_move(-1, 0)\n elif action == 3: # right\n next_h = h + 1\n next_v = v\n # self.player_move(1, 0)\n else:\n print('programmer bug ...')\n next_state = [next_h, next_v]\n boom1, boom2, boom3, boom4 = self.get_state(self.boom1), self.get_state(self.boom2), self.get_state(\n self.boom3), self.get_state(self.boom4)\n diamond = self.get_state(self.diamond)\n if next_h < 0 or next_v < 0 or next_h > 4 or next_v > 4: # 超过边界\n reward = -1\n feedback = 'stop'\n elif next_state == boom1 or next_state == boom2 or next_state == boom3 or next_state == boom4: # 炸弹区域\n reward = -100\n feedback = 'dead'\n elif next_state == diamond: # 获得的通关物品\n reward = 500\n else:\n reward = 0\n return feedback, next_state, reward\n\n def update_view(self, state, action, next_state, q_val):\n action_list = ['up', 'down', 'left', 'right']\n self.player_move(next_state[0] - state[0], next_state[1] - state[1])\n self.update_val(state, action_list[action], round(q_val, 2))\n\n def attach(self):\n # 到达终点,返回True , 未到达,返回False\n return str(self.get_state(self.player)) == str(self.get_state(self.diamond))\n\n\n","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":8169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"65734643","text":"'''数据集和模型'''\r\nimport torch.utils.data as Data\r\nimport torch.nn as nn\r\nimport torch\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n# 自定义数据集\r\nclass myDataset(Data.Dataset):\r\n \r\n def __init__(self, input_lstm, input_2d, output_data):\r\n self.input_2d = input_2d\r\n self.input_lstm = input_lstm\r\n self.output_data = output_data\r\n \r\n def __len__(self):\r\n return self.input_lstm.shape[0]\r\n \r\n def __getitem__(self, idx):\r\n x = self.input_lstm[idx, :, :]\r\n x_2d = self.input_2d[:, idx, :, :, :]\r\n y = self.output_data[idx, :]\r\n sample = {'x': x, 'x_2d': x_2d, 'y': y}\r\n return sample\r\n \r\n# 自定义模型\r\nclass myModel(nn.Module):\r\n def __init__(self, feature_num, hidden_units, variable_num):\r\n super(myModel, self).__init__()\r\n self.feature_num = feature_num\r\n self.hidden_units = hidden_units\r\n self.variable_num = variable_num\r\n \r\n self.conv1 = nn.Sequential(nn.Conv2d(self.variable_num, 32, (3,3)),#输入num*61*61,输出32*59*59\r\n nn.ReLU())#输出32*59*59\r\n self.conv2 = nn.Sequential(nn.Conv2d(32, 32, (3,3)),#输入32*59*59,输出32*57*57\r\n nn.ReLU(),\r\n nn.MaxPool2d((2,2), stride=(2,2)))#输出32*28*28\r\n self.conv3 = nn.Sequential(nn.Conv2d(32, 64, (3,3)),#输入32*28*28,输出64*26*26\r\n nn.ReLU(),\r\n nn.MaxPool2d((2,2), stride=(2,2)))#输出64*13*13\r\n self.conv4 = nn.Sequential(nn.Conv2d(64, 64, (3,3)),#输入64*13*13,输出64*11*11\r\n nn.ReLU(),\r\n nn.MaxPool2d((2,2), stride=(2,2)))#输出64*5*5\r\n self.conv5 = nn.Sequential(nn.Conv2d(64, 128, (3,3)),#输入64*5*5,输出128*3*3\r\n nn.ReLU())#输出128*3*3\r\n self.fc1 = nn.Sequential(nn.Linear(in_features=1152, out_features=256),\r\n nn.ReLU())\r\n self.fc2 = nn.Sequential(nn.Linear(in_features=256, out_features=64),\r\n nn.Sigmoid())\r\n \r\n self.lstm = nn.LSTM(input_size=self.feature_num, hidden_size=self.hidden_units, num_layers=2, batch_first=True)\r\n \r\n self.fc01 = nn.Sequential(nn.Linear(in_features=self.hidden_units, out_features=16),\r\n nn.ReLU())\r\n self.fc02 = nn.Linear(in_features=16, out_features=1)\r\n \r\n \r\n def forward(self, x, x_2d):#x_2d:(batch, time_idx, c, h, w); x:(batch, l, f)\r\n #2D卷积\r\n outputs_conv = torch.zeros((5, x.size(0), 64), requires_grad=True).to(device)\r\n for time_idx in range(5):\r\n input_conv = x_2d[:, time_idx, :, :, :]\r\n output_conv = self.conv1(input_conv)\r\n output_conv = self.conv2(output_conv)\r\n output_conv = self.conv3(output_conv)\r\n output_conv = self.conv4(output_conv)\r\n output_conv = self.conv5(output_conv)\r\n output_conv = output_conv.view(output_conv.size(0), -1)\r\n output_conv = self.fc1(output_conv)\r\n output_conv = self.fc2(output_conv)#输出:(batch, 64)\r\n outputs_conv[time_idx, :, :] = output_conv#输出:(5, batch, 64)\r\n outputs_conv = outputs_conv.permute(1, 0, 2)#输出:(batch, 5, 64)\r\n #lstm\r\n x_cat = torch.cat((x, outputs_conv), 2)\r\n h0 = torch.zeros((2, x.size(0), self.hidden_units), requires_grad=True).to(device)\r\n c0 = torch.zeros((2, x.size(0), self.hidden_units), requires_grad=True).to(device)\r\n x_cat, (hn, cn) = self.lstm(x_cat, (h0, c0))\r\n #lstm输入x_cat:(batch, seq_len, feature),输出x_cat:(batch, seq_len, hidden_size),输出x_cat是lstm最后一层所有时间步的输出\r\n #h与c:(num_layers, batch, hidden_size)\r\n out_final = x_cat[:, -1, :]#y输出:(batch*256)\r\n out_final = self.fc01(out_final)\r\n out_final = self.fc02(out_final)\r\n \r\n return out_final","sub_path":"net_2D_1979_2016.py","file_name":"net_2D_1979_2016.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"403919675","text":"from odoo import fields,models,api\nfrom dateutil.relativedelta import relativedelta\nfrom odoo.exceptions import UserError\n\nclass PropertyOffer(models.Model):\n _name = \"estate.property.offer\"\n _order = \"price desc\"\n\n price = fields.Float('Price')\n status = fields.Selection(string=\"Status\", selection=[('accepted','Accepted'),('refused','Refused')], copy=False)\n partner_id = fields.Many2one(comodel_name=\"res.partner\", required=True)\n property_id = fields.Many2one(comodel_name=\"estate.property\", required=True)\n validity = fields.Integer(string=\"Validity(days)\", default=7)\n date_deadline = fields.Date(string=\"Deadline\", compute=\"_compute_date_deadline\", inverse=\"_inverse_date_deadline\")\n property_type_id = fields.Many2one(related='property_id.property_type_id', store=True)\n\n _sql_constraints = [\n ('check_eprice', 'CHECK(price > 0)',\n 'Offer price must be positive'),\n ]\n\n @api.depends(\"validity\", \"create_date\")\n def _compute_date_deadline(self):\n for record in self:\n if record.create_date:\n record.date_deadline = fields.Date.from_string(record.create_date) + relativedelta(days=+record.validity)\n else:\n record.date_deadline = fields.Date.from_string(fields.Date.today()) + relativedelta(days=+record.validity)\n\n def _inverse_date_deadline(self):\n for record in self:\n if record.create_date:\n record.validity = int((fields.Date.from_string(record.date_deadline) - fields.Date.from_string(record.create_date)).days)\n else:\n record.validity = int((fields.Date.from_string(record.date_deadline) - fields.Date.from_string(fields.Date.today())).days)\n\n\n def action_confirm(self):\n self.ensure_one()\n if self.property_id.buyer_id:\n raise UserError('Can not accept more than 1 offer')\n return True\n self.status = 'accepted'\n self.property_id.buyer_id = self.partner_id\n self.property_id.selling_price = self.price\n return True\n\n def action_cancel(self):\n self.ensure_one()\n self.status = 'refused'\n return True\n\n\n @api.model\n def create(self, vals):\n #print(vals)\n property_odj = self.env['estate.property'].browse(vals['property_id'])\n min_offer = min(property_odj.offer_ids.mapped('price')) if property_odj.offer_ids.mapped('price') else 0\n if vals['price'] < min_offer:\n raise UserError(\"Can not offer a lower price\")\n #print(vals['price'],min_offer)\n property_odj.state = 'offer_received'\n return super().create(vals)\n","sub_path":"models/estate_property_offer.py","file_name":"estate_property_offer.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"316061563","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# **\n#\n# ========================= #\n# UI_EDIT_BLOCK_CONTROLLER #\n# ========================= #\n# Class for controlling the block editor GUI.\n#\n# @author ES\n# **\n\nimport logging\nimport os\n\nfrom block_manager.enums.block_enums import SocketType\nfrom es_common.enums.command_enums import ActionCommand\nfrom es_common.enums.module_enums import InteractionModule\nfrom es_common.factory.command_factory import CommandFactory\nfrom es_common.model.design_module import DesignModule\nfrom es_common.model.interaction_block import InteractionBlock\nfrom es_common.model.tablet_page import TabletPage\nfrom es_common.model.topic_tag import TopicTag\nfrom es_common.utils.qt import QtCore, QtWidgets, qtSlot\nfrom es_common.model.speech_act import SpeechAct\nfrom interaction_manager.utils import config_helper\nfrom interaction_manager.view.ui_editblock_dialog import Ui_EditBlockDialog\n\nSELECT_OPTION = \"-- SELECT --\"\n\n\nclass UIEditBlockController(QtWidgets.QDialog):\n def __init__(self, interaction_block, block_controller, music_controller=None, robot_controller=None, parent=None):\n super(UIEditBlockController, self).__init__(parent)\n\n self.logger = logging.getLogger(\"EditBlockController\")\n\n self.block_controller = block_controller\n self.interaction_block = interaction_block\n self.music_controller = music_controller\n self.robot_controller = robot_controller\n\n self.pattern_settings, self.pattern = self._get_pattern_settings(self.interaction_block.pattern.lower())\n\n # init UI elements\n self.ui = Ui_EditBlockDialog()\n self.ui.setupUi(self)\n\n # init ui elements\n self._init_ui()\n\n # give it control\n self.setModal(True)\n\n def _init_ui(self):\n if self.interaction_block is None:\n self.interaction_block = InteractionBlock()\n\n self.connected_interaction_blocks = self.interaction_block.get_connected_interaction_blocks(\n socket_type=SocketType.OUTPUT\n )\n\n self.setWindowTitle(\"Edit Block\")\n\n # block properties\n self.ui.patternLineEdit.setText(self.pattern)\n self.ui.blockDescriptionLineEdit.setText(self.interaction_block.description)\n\n # Message\n speech_act = self.interaction_block.speech_act\n self.ui.messageTextEdit.setText(speech_act.message)\n # self.ui.messageTypeComboBox.setCurrentIndex(\n # self.ui.messageTypeComboBox.findText(speech_act.message_type.name.title(), QtCore.Qt.MatchFixedString))\n\n # Animation\n self.ui.animationLineEdit.setText(self.interaction_block.animation)\n self.ui.animationsActionComboBox.currentIndexChanged.connect(self.on_animation_change)\n\n # Actions\n self.set_actions()\n\n # Modules\n self.set_modules()\n\n # Gestures tab: remove!\n beh_tab_index = self.ui.tabWidget.indexOf(self.ui.tabWidget.findChild(QtWidgets.QWidget, \"behaviorsTab\"))\n self.ui.tabWidget.removeTab(beh_tab_index)\n\n # tablet page\n self.set_tablet_tab()\n\n # topic tag\n self.toggle_topic_tab()\n\n def _get_pattern_settings(self, pattern_name):\n patterns = config_helper.get_patterns()\n pattern_settings = {}\n for p in patterns.keys():\n if pattern_name in p:\n self.logger.info(\"Pattern found: {}\".format(p))\n return patterns[p], p\n return {}, \"\"\n\n def toggle_topic_tab(self):\n topic_index = self.ui.tabWidget.indexOf(self.ui.tabWidget.findChild(QtWidgets.QWidget, 'topicTab'))\n\n if self.pattern_settings[\"topic\"] == \"\":\n self.ui.tabWidget.setTabEnabled(topic_index, False) # self.ui.topicTab.setEnabled(False)\n self._set_topic_tab(reset=True)\n self.ui.tabWidget.removeTab(topic_index)\n else:\n self.ui.topicTab.setEnabled(True)\n self._set_topic_tab(reset=False)\n\n def set_modules(self):\n if \"module\" in self.pattern_settings.keys():\n try:\n self.toggle_module_tab(enable=True)\n\n # button listeners\n self.ui.moduleFileRadioButton.clicked.connect(lambda: self.enable_module_selection(enable_file=True))\n self.ui.moduleRandomizeRadioButton.clicked.connect(lambda: self.enable_module_selection(False))\n self.ui.moduleSelectFileToolButton.clicked.connect(self.select_file)\n self.ui.moduleSelectFolderToolButton.clicked.connect(self.select_folder)\n\n # TODO: check if needed\n modules = [m for m in InteractionModule.keys()]\n\n self.ui.moduleNameComboBox.addItems([SELECT_OPTION])\n self.ui.moduleNameComboBox.addItems(modules)\n\n # check if the block contains a module\n design_module = self.interaction_block.design_module\n if design_module:\n if design_module.randomize:\n self.ui.moduleRandomizeRadioButton.setChecked(True)\n self.enable_module_selection(enable_file=False)\n self.ui.moduleFolderNameLineEdit.setText(design_module.folder_name)\n else:\n self.ui.moduleFileNameLineEdit.setText(design_module.filename)\n\n if design_module.name:\n # update combobox current item\n self.ui.moduleNameComboBox.setCurrentIndex(\n self.ui.moduleNameComboBox.findText(design_module.name, QtCore.Qt.MatchFixedString))\n\n self.logger.info(\"Module tab is setup\")\n except Exception as e:\n self.logger.error(\"Error while enabling the modules: {}\".format(e))\n else:\n self.toggle_module_tab(enable=False)\n\n def toggle_module_tab(self, enable=False):\n tab_index = self.ui.tabWidget.indexOf(self.ui.tabWidget.findChild(QtWidgets.QWidget, 'moduleTab'))\n self.ui.tabWidget.setTabEnabled(tab_index, enable)\n\n if enable is False:\n self.ui.tabWidget.removeTab(tab_index)\n\n def enable_module_selection(self, enable_file=False):\n self.ui.moduleFileNameLineEdit.setEnabled(enable_file)\n self.ui.moduleSelectFileToolButton.setEnabled(enable_file)\n\n self.ui.moduleFolderNameLineEdit.setEnabled(not enable_file)\n self.ui.moduleSelectFolderToolButton.setEnabled(not enable_file)\n\n def select_file(self):\n options = QtWidgets.QFileDialog.Options()\n options |= QtWidgets.QFileDialog.DontUseNativeDialog\n file_path, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Select JSON file\", \"\", \"JSON Files (*.json)\",\n options=options)\n self.ui.moduleFileNameLineEdit.setText(file_path)\n\n def select_folder(self):\n folder_name = QtWidgets.QFileDialog.getExistingDirectory(\n self,\n \"Select a folder\",\n os.getcwd(),\n QtWidgets.QFileDialog.ShowDirsOnly\n )\n self.ui.moduleFolderNameLineEdit.setText(folder_name)\n\n def set_actions(self):\n # TODO: Use the action property defined in the pattern\n\n if \"action\" in self.pattern:\n try:\n actions = [a for a in ActionCommand.keys()]\n self.toggle_action_tab(enable=True)\n\n self.ui.actionComboBox.addItems([SELECT_OPTION])\n # Enable music action only!\n self.ui.actionComboBox.addItems([ActionCommand.PLAY_MUSIC.name]) # (actions)\n\n # listeners\n self.ui.actionComboBox.currentIndexChanged.connect(self.on_action_change)\n # music\n self.ui.playlistComboBox.currentIndexChanged.connect(self.update_tracks_combo)\n self.ui.animationsActionCheckBox.stateChanged.connect(lambda: self.ui.animationsActionComboBox.setEnabled(\n self.ui.animationsActionCheckBox.isChecked()))\n self.ui.animationsActionComboBox.addItems([a for a in config_helper.get_animations()])\n\n # check if the block contains an action\n if self.interaction_block.action_command is not None:\n comm_type = self.interaction_block.action_command.command_type\n # update action name\n self.ui.actionComboBox.setCurrentIndex(\n self.ui.actionComboBox.findText(comm_type.name, QtCore.Qt.MatchFixedString))\n # update range\n if comm_type is ActionCommand.CHECK_RESERVATIONS:\n # TODO\n self.logger.info(\"TODO\")\n elif comm_type is ActionCommand.WAIT:\n self.ui.timeSpinBox.setValue(self.interaction_block.action_command.wait_time)\n elif comm_type is ActionCommand.PLAY_MUSIC:\n self.ui.playlistComboBox.setCurrentIndex(\n self.ui.playlistComboBox.findText(self.interaction_block.action_command.playlist,\n QtCore.Qt.MatchFixedString))\n self.ui.tracksComboBox.setCurrentIndex(\n self.ui.tracksComboBox.findText(self.interaction_block.action_command.track,\n QtCore.Qt.MatchFixedString))\n self.ui.playTimeSpinBox.setValue(self.interaction_block.action_command.play_time)\n\n anim = self.interaction_block.action_command.animations_key\n if anim is not None and anim != \"\":\n self.ui.animationsActionCheckBox.setChecked(True)\n self.ui.animationsActionComboBox.setCurrentIndex(\n self.ui.animationsActionComboBox.findText(anim,\n QtCore.Qt.MatchFixedString))\n except Exception as e:\n self.logger.error(\"Error while setting actions! {}\".format(e))\n\n else: # otherwise: hide the actions\n self.toggle_action_tab(enable=False)\n\n def toggle_action_tab(self, enable=False):\n tab_index = self.ui.tabWidget.indexOf(self.ui.tabWidget.findChild(QtWidgets.QWidget, 'actionTab'))\n self.ui.tabWidget.setTabEnabled(tab_index, enable)\n\n self.ui.musicGroupBox.setHidden(True)\n self.ui.timeGroupBox.setHidden(True)\n\n if enable is False:\n self.ui.actionComboBox.setHidden(True)\n self.ui.tabWidget.removeTab(tab_index)\n\n def on_action_change(self):\n hide_reservations, hide_music, hide_time = True, True, True\n if self.ui.actionComboBox.currentText() in ActionCommand.CHECK_RESERVATIONS.name:\n hide_reservations = False\n elif self.ui.actionComboBox.currentText() in ActionCommand.WAIT.name:\n hide_time = False\n elif self.ui.actionComboBox.currentText() in ActionCommand.PLAY_MUSIC.name:\n hide_music = False\n self.update_playlist_combo()\n\n self.ui.timeGroupBox.setHidden(hide_time)\n self.ui.musicGroupBox.setHidden(hide_music)\n\n def update_playlist_combo(self):\n self.ui.playlistComboBox.clear()\n if self.music_controller is None:\n return\n\n try:\n if self.music_controller.playlists is None or len(self.music_controller.playlists) == 0:\n return\n self.ui.playlistComboBox.addItems([SELECT_OPTION])\n self.ui.playlistComboBox.addItems([p for p in self.music_controller.playlists.keys()])\n except Exception as e:\n self.logger.error(\"Error while loading the playlists! {}\".format(e))\n\n def update_tracks_combo(self):\n self.ui.tracksComboBox.clear()\n if self.music_controller is None:\n return\n playlist = self.ui.playlistComboBox.currentText()\n try:\n if playlist != SELECT_OPTION:\n self.ui.tracksComboBox.addItems([SELECT_OPTION])\n self.ui.tracksComboBox.addItems([t for t in self.music_controller.playlists[playlist][\"tracks\"]])\n except Exception as e:\n self.logger.error(\"Error while loading tracks for playlist: {}! {}\".format(playlist, e))\n\n def set_tablet_tab(self):\n pages = config_helper.get_tablet_properties()[\"pages\"].keys()\n tablet_page = self.interaction_block.tablet_page\n\n self.ui.tabletPageNameComboBox.clear()\n\n if \"input\" in self.pattern.lower():\n self.ui.tabletPageNameComboBox.addItems(config_helper.get_tablet_properties()[\"input_page\"].keys())\n else:\n self.ui.tabletPageNameComboBox.addItems([SELECT_OPTION])\n self.ui.tabletPageNameComboBox.addItems(pages)\n if not tablet_page.name == \"\":\n self.ui.tabletPageNameComboBox.setCurrentIndex(\n self.ui.tabletPageNameComboBox.findText(tablet_page.name, QtCore.Qt.MatchFixedString))\n\n self.ui.tabletImageComboBox.clear()\n self.ui.tabletImageComboBox.addItems(os.listdir(config_helper.get_tablet_properties()[\"pics_folder\"]))\n if not tablet_page.image == \"\":\n self.ui.tabletImageComboBox.setCurrentIndex(\n self.ui.tabletImageComboBox.findText(tablet_page.image, QtCore.Qt.MatchFixedString))\n\n self.ui.tabletHeadingTextEdit.setText(tablet_page.heading)\n self.ui.tabletInfoTextEdit.setText(tablet_page.text)\n\n def _set_topic_tab(self, reset=False):\n # set answers and feedbacks\n if reset is True:\n tag, topic, a1, a2, = (\"\",) * 4\n else:\n topic_tag = self.interaction_block.topic_tag\n\n a1 = '' if len(topic_tag.answers) == 0 else topic_tag.answers[0]\n a2 = '' if len(topic_tag.answers) < 2 else topic_tag.answers[1]\n\n # update the slots\n self.ui.answer1TextEdit.setText(a1)\n self.ui.answer2TextEdit.setText(a2)\n\n self.update_go_to(self.interaction_block.topic_tag.goto_ids,\n self.ui.answer1GoToComboBox, self.ui.answer2GoToComboBox)\n\n def update_go_to(self, goto_ids, combo_box_1, combo_box_2):\n if self.connected_interaction_blocks is not None and len(self.connected_interaction_blocks) > 0:\n items = [SELECT_OPTION]\n items.extend([\"{}: {}\".format(b.title, b.description) for b in self.connected_interaction_blocks])\n\n combo_box_1.addItems(items)\n combo_box_2.addItems(items)\n\n if goto_ids is None or len(goto_ids) == 0:\n return\n\n for i in range(len(goto_ids)):\n b = self.block_controller.get_block_by_parent_id(parent_id=goto_ids[i])\n if b is None: # block is not found!\n self.logger.debug(\"Found 0 blocks for id: {}\".format(goto_ids[i]))\n continue\n\n opt = \"{}: {}\".format(b.title, b.description)\n if opt in items:\n if i == 0:\n combo_box_1.setCurrentIndex(\n combo_box_1.findText(opt, QtCore.Qt.MatchFixedString))\n else:\n combo_box_2.setCurrentIndex(\n combo_box_2.findText(opt, QtCore.Qt.MatchFixedString))\n\n def get_speech_act(self):\n return SpeechAct.create_speech_act({\"message\": \"{}\".format(self.ui.messageTextEdit.toPlainText()).strip(),\n \"message_type\": \"Informal\"})\n\n @qtSlot()\n def on_animation_change(self):\n animation = self.ui.animationComboBox.currentText()\n if animation != SELECT_OPTION:\n self.ui.animationLineEdit.setText(animation)\n\n def get_animation(self):\n animation = self.ui.animationLineEdit.text().strip()\n return None if animation == \"\" else animation\n\n def get_topic_tag(self):\n topic_tag = TopicTag()\n if self.ui.topicTab.isEnabled():\n topic_tag.answers = [\"{}\".format(self.ui.answer1TextEdit.toPlainText()).strip(),\n \"{}\".format(self.ui.answer2TextEdit.toPlainText()).strip()]\n\n # set GoTos\n goto_ids = [-1, -1]\n options = [\"{}\".format(self.ui.answer1GoToComboBox.currentText()),\n \"{}\".format(self.ui.answer2GoToComboBox.currentText())]\n for i in range(len(options)):\n if self._is_valid_option(options[i]):\n b = self._get_block_from_details(*options[i].split(\":\"))\n if b is not None:\n goto_ids[i] = b.id\n self.logger.debug(\"GoTo IDS: {} | {}\".format(*goto_ids))\n topic_tag.goto_ids = goto_ids\n\n return topic_tag\n\n def get_tablet_page(self):\n return TabletPage(name=\"{}\".format(self.ui.tabletPageNameComboBox.currentText()),\n heading=\"{}\".format(self.ui.tabletHeadingTextEdit.toPlainText()).strip(),\n text=\"{}\".format(self.ui.tabletInfoTextEdit.toPlainText()).strip(),\n image=\"{}\".format(self.ui.tabletImageComboBox.currentText()),\n )\n\n def get_module(self):\n tab_index = self.ui.tabWidget.indexOf(self.ui.tabWidget.findChild(QtWidgets.QWidget, 'moduleTab'))\n if self.ui.tabWidget.isTabEnabled(tab_index):\n design_module = DesignModule()\n\n # name\n module_name = \"{}\".format(self.ui.moduleNameComboBox.currentText())\n if module_name in InteractionModule.keys():\n self.logger.info(f\"Module name set to: {module_name}\")\n design_module.name = module_name\n\n # filename\n if self.ui.moduleFileRadioButton.isChecked():\n design_module.filename = \"{}\".format(self.ui.moduleFileNameLineEdit.text())\n elif self.ui.moduleRandomizeRadioButton.isChecked():\n design_module.randomize = True\n design_module.folder_name = \"{}\".format(self.ui.moduleFolderNameLineEdit.text())\n\n self.logger.info(design_module.to_dict)\n return design_module\n\n return None\n\n def get_command(self):\n if self.ui.actionComboBox.isHidden():\n return None\n\n comm_name = \"{}\".format(self.ui.actionComboBox.currentText())\n if comm_name in ActionCommand.keys():\n args = self.get_command_arguments(comm_name=comm_name)\n if args is None:\n if comm_name == ActionCommand.PLAY_MUSIC.name:\n return None\n return CommandFactory.create_command(ActionCommand[comm_name])\n else:\n return CommandFactory.create_command(ActionCommand[comm_name], *args)\n\n return None\n\n def get_command_arguments(self, comm_name):\n args = None\n # if comm_name == ActionCommand.DRAW_NUMBER.name:\n # args = [self.ui.rangeMinSpinBox.value(), self.ui.rangeMaxSpinBox.value()]\n if comm_name == ActionCommand.WAIT.name:\n args = [self.ui.timeSpinBox.value()]\n elif comm_name == ActionCommand.PLAY_MUSIC.name:\n args = self.get_music_arguments()\n\n return args\n\n def get_music_arguments(self):\n args = []\n try:\n playlist = self.ui.playlistComboBox.currentText()\n track = self.ui.tracksComboBox.currentText()\n\n if playlist == \"\" or playlist in SELECT_OPTION \\\n or track == \"\" or track in SELECT_OPTION:\n args = None\n else:\n args = [playlist, track, int(self.ui.playTimeSpinBox.value())]\n if self.ui.animationsActionCheckBox.isChecked():\n args.append(\"{}\".format(self.ui.animationsActionComboBox.currentText()))\n except Exception as e:\n self.logger.error(\"Error while loading music arguments! {}\".format(e))\n finally:\n self.logger.debug(\"*** ARGS: {}\".format(args))\n return args\n\n def get_interaction_block(self):\n d_block = self.interaction_block.clone()\n d_block.name = \"{}\".format(self.ui.patternLineEdit.text().strip())\n d_block.pattern = \"{}\".format(self.ui.patternLineEdit.text().strip())\n d_block.description = \"{}\".format(self.ui.blockDescriptionLineEdit.text().strip())\n d_block.speech_act = self.get_speech_act()\n d_block.animation = self.get_animation()\n d_block.topic_tag = self.get_topic_tag()\n d_block.tablet_page = self.get_tablet_page()\n d_block.action_command = self.get_command()\n d_block.design_module = self.get_module()\n # d_block.interaction_module_name = self.get_module()\n\n return d_block\n\n def update_interaction_block(self, int_block):\n if int_block is None:\n return\n\n int_block.name = \"{}\".format(self.ui.patternLineEdit.text().strip())\n int_block.pattern = \"{}\".format(self.ui.patternLineEdit.text().strip())\n if int_block.block:\n int_block.block.title = int_block.pattern.title()\n int_block.description = \"{}\".format(self.ui.blockDescriptionLineEdit.text().strip())\n int_block.speech_act = self.get_speech_act()\n int_block.animation = self.get_animation()\n int_block.topic_tag = self.get_topic_tag()\n int_block.tablet_page = self.get_tablet_page()\n int_block.design_module = self.get_module()\n # int_block.interaction_module_name = self.get_module()\n\n # don't update music command if there is no connection to the music service\n if \"{}\".format(self.ui.actionComboBox.currentText()) == ActionCommand.PLAY_MUSIC.name:\n if self.music_controller is not None:\n int_block.action_command = self.get_command()\n else:\n int_block.action_command = self.get_command()\n\n def _is_valid_option(self, option):\n if option is not None and option != \"\" and option != SELECT_OPTION:\n return True\n\n return False\n\n def _get_block_from_details(self, title, desc):\n for b in self.connected_interaction_blocks:\n if b.title == title.strip() and b.description == desc.strip():\n return b\n return None\n\n def _toggle_item(self, item, status):\n if item is None or status is None:\n return\n try:\n item.setEnabled(status)\n self.repaint()\n except Exception as e:\n self.logger.info(\"Error while enabling item: {} | {}\".format(item, e))\n finally:\n return\n","sub_path":"interaction_manager/controller/ui_edit_block_controller.py","file_name":"ui_edit_block_controller.py","file_ext":"py","file_size_in_byte":22814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"353347357","text":"# coding:utf-8\nimport os\nimport base64\nimport json\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.serialization import load_pem_public_key\n\n\nclass EthCert(object):\n\n def __init__(self, username=\"anonymous\"):\n cert_dir = os.path.dirname(os.path.realpath(__file__))\n self.pems = os.path.join(cert_dir, \"pems\")\n self.pems_user_dir = os.path.join(self.pems, username)\n self.thiscert = {\n \"private\": os.path.join(self.pems, username, \"private_key.pem\"),\n \"public\": os.path.join(self.pems, username, \"public_key.pem\"),\n }\n self.private_key_str = None\n self.public_key_str = None\n self.private_key = None\n self.public_key = None\n self.style = None\n self.error = \"\"\n\n def init_dir(self, username):\n \"\"\"\n 设置用户目录\n :param username:\n :return:\n \"\"\"\n self.pems_user_dir = os.path.join(self.pems, username)\n self.thiscert = {\n \"private\": os.path.join(self.pems, username, \"private_key.pem\"),\n \"public\": os.path.join(self.pems, username, \"public_key.pem\"),\n }\n\n def generate(self, size=2048):\n \"\"\"\n 生成公钥和私钥\n :param size:\n :return:\n \"\"\"\n # Generate the public/private key pair.\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=size,\n backend=default_backend(),\n )\n self.private_key_str = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n )\n self.public_key_str = private_key.public_key().public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n return True\n\n def load_key_from_file(self):\n \"\"\"\n 从用户的目录中读取公钥或者私钥,读取到的文本会保存在变量中\n :return:\n \"\"\"\n try:\n if os.path.isfile(self.thiscert['private']):\n with open(self.thiscert['private'], 'rb') as kfd:\n self.private_key_str = kfd.read()\n if os.path.isfile(self.thiscert['public']):\n with open(self.thiscert['public'], 'rb') as kfd:\n self.public_key_str = kfd.read()\n if not self.private_key_str and not self.public_key_str:\n self.error = \"provide private or public key\"\n return False\n except Exception as e:\n self.error = f\"{e}\"\n return False\n return True\n\n def init_key(self, private_key_str=None, public_key_str=None):\n if not private_key_str and not public_key_str:\n self.error = \"should provide private or public key\"\n return False\n if private_key_str:\n self.private_key_str = self.convert(private_key_str)\n if public_key_str:\n self.public_key_str = self.convert(public_key_str)\n return True\n\n def serialization(self):\n \"\"\"\n 序列化公钥和私钥,用于加密、解密、签名、验签\n :return:\n \"\"\"\n try:\n if self.private_key_str:\n self.private_key = serialization.load_pem_private_key(\n self.private_key_str,\n password=None,\n backend=default_backend()\n )\n if self.public_key_str:\n self.public_key = load_pem_public_key(self.public_key_str, default_backend())\n except Exception as e:\n self.error = f\"serialization error: {e}\"\n return False\n return True\n\n def convert(self, origin_str):\n if isinstance(origin_str, bytes):\n return origin_str\n if isinstance(origin_str, str):\n return bytes(origin_str, encoding='utf8')\n if isinstance(origin_str, (list, dict)):\n return bytes(json.dumps(origin_str, ensure_ascii=False, separators=(',', ':')), encoding='utf8')\n else:\n return bytes(str(origin_str), encoding='utf8')\n\n def save_file(self):\n \"\"\"\n 保存公钥或者私钥到用户目录中\n :return:\n \"\"\"\n if not os.path.isdir(self.pems_user_dir):\n os.mkdir(self.pems_user_dir)\n if self.private_key_str:\n try:\n # Save the private key to a file.\n with open(self.thiscert['private'], 'wb') as f:\n f.write(self.private_key_str)\n except Exception as e:\n self.error = f\"{e}\"\n return False\n if self.public_key_str:\n try:\n # Save the public key to a file.\n with open(self.thiscert['public'], 'wb') as f:\n f.write(self.public_key_str)\n except Exception as e:\n self.error = f\"{e}\"\n return False\n return True\n\n def sign2_str(self, origin_data):\n signature = self.sign2(origin_data)\n if signature is not False:\n return signature.decode()\n else:\n return signature\n\n def sign2(self, origin_data):\n if self.private_key is None:\n self.error = \"serialization private key first\"\n return False\n signature = base64.b64encode(\n self.private_key.sign(\n self.convert(origin_data),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH,\n ),\n hashes.SHA256(),\n )\n )\n return signature\n\n def sign_str(self, origin_data):\n signature = self.sign(origin_data)\n if signature is not False:\n return signature.decode()\n else:\n return signature\n\n def sign(self, origin_data):\n if self.private_key is None:\n self.error = \"serialization private key first\"\n return False\n signature = base64.b64encode(self.private_key.sign(\n self.convert(origin_data),\n padding.PKCS1v15(),\n hashes.SHA256(),\n )\n )\n return signature\n\n def verify2(self, origin_data, signature):\n if self.public_key is None:\n self.error = \"serialization public key first\"\n return False\n try:\n signature_decode = base64.b64decode(signature)\n self.public_key.verify(\n signature_decode,\n self.convert(origin_data),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH,\n ),\n hashes.SHA256(),\n )\n except InvalidSignature as e:\n self.error = f'ERROR: signature failed verification! {e}'\n return False\n return True\n\n def verify(self, origin_data, signature):\n if self.public_key is None:\n self.error = \"serialization public key first\"\n return False\n try:\n signature_decode = base64.b64decode(signature)\n self.public_key.verify(\n signature_decode,\n self.convert(origin_data),\n padding.PKCS1v15(),\n hashes.SHA256(),\n )\n except InvalidSignature:\n self.error = \"ERROR: signature failed verification!\"\n return False\n return True\n\n def encrypt_str(self, origin_data):\n encrypt_data_encode = self.encrypt(origin_data)\n if encrypt_data_encode is not False:\n return encrypt_data_encode.decode()\n else:\n return encrypt_data_encode\n\n def encrypt(self, origin_data):\n if not self.public_key:\n self.error = \"serialization public key first\"\n return False\n encrypt_length = int(self.public_key.key_size / 8 - 11)\n bytes_data = self.convert(origin_data)\n bytes_len = len(bytes_data)\n offset = 0\n en_res = []\n while bytes_len - offset > 0:\n en_res.append(\n self.public_key.encrypt(\n bytes_data[offset: offset + encrypt_length],\n padding.PKCS1v15(),\n )\n )\n offset += encrypt_length\n if bytes_data[offset:]:\n en_res.append(\n self.public_key.encrypt(\n bytes_data[offset: offset + encrypt_length],\n padding.PKCS1v15(),\n )\n )\n encrypt_data_encode = base64.b64encode(b''.join(en_res))\n return encrypt_data_encode\n\n def get_publickey(self):\n return self.public_key_str.decode()\n\n def get_privatekey(self):\n return self.private_key_str.decode()\n\n def decrypt_str(self, encrypt_data):\n decrypt_data_res = self.decrypt(encrypt_data)\n if decrypt_data_res is not False:\n return decrypt_data_res.decode()\n else:\n return decrypt_data_res\n\n def decrypt(self, encrypt_data):\n if not self.private_key:\n self.error = \"serialization private key first\"\n return False\n try:\n bytes_data = self.convert(encrypt_data)\n decrypt_data = base64.b64decode(bytes_data)\n decrypt_length = int(self.private_key.key_size / 8)\n bytes_len = len(decrypt_data)\n de_res = []\n offset = 0\n while bytes_len - offset > 0:\n de_res.append(\n self.private_key.decrypt(\n decrypt_data[offset: offset + decrypt_length],\n padding.PKCS1v15(),\n )\n )\n offset += decrypt_length\n if decrypt_data[offset: offset + decrypt_length]:\n de_res.append(\n self.private_key.decrypt(\n decrypt_data[offset: offset + decrypt_length],\n padding.PKCS1v15(),\n )\n )\n decrypt_data_res = b''.join(de_res)\n except Exception as e:\n self.error = f\"ERROR: Decryption failed!\"\n return False\n return decrypt_data_res\n\n\nif __name__ == \"__main__\":\n ec = EthCert(\"text\")\n # 生成私钥与公钥, 长度默认为2048\n ec.generate(4096)\n print(ec.get_publickey())\n print(ec.get_privatekey())\n if ec.save_file():\n ec.serialization()\n origin = \"XiaMen City\"\n # 数据签名与验证方式一\n sign = ec.sign_str(origin)\n print(ec.verify(origin, sign))\n # 数据签名与验证方式二\n sign = ec.sign2_str(origin)\n print(ec.verify2(origin, sign))\n # 加密数据\n edata = ec.encrypt_str(origin)\n # 解密数据\n ddata = ec.decrypt_str(edata)\n print(ddata)\n else:\n print(ec.error)\n\n","sub_path":"le.py","file_name":"le.py","file_ext":"py","file_size_in_byte":11494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"611817011","text":"def nge(arr):\n ng = []\n sol = []\n for i in reversed(range(len(arr))):\n if i == len(arr) - 1:\n sol.append(-1)\n ng.append(arr[i])\n continue\n while ng and arr[i] >= ng[-1]:\n ng.pop()\n if ng:\n sol.append(ng[-1])\n else:\n sol.append(-1)\n ng.append(arr[i])\n return sol\n\ntest_number = input()\ntest_number = int(test_number)\n\nfor _ in range(test_number):\n size_sum = input()\n # size_sum = size_sum.split(\" \")\n # size_sum = [int(x) for x in size_sum]\n # val = size_sum[1]\n\n arr = input()\n arr = arr.split()\n arr = [int(x) for x in arr]\n\n res = nge(arr)\n res.reverse()\n res = [str(x) for x in res]\n res = ' '.join(res)\n print(res)","sub_path":"problems/code_submission/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"585228732","text":"# Source: https://oj.leetcode.com/problems/valid-anagram/\n# Author: renzongxian\n# Date: 2016-04-12\n\n\"\"\"\n\nGiven two strings s and t, write a function to determine if t is an anagram of s.\n\nFor example,\ns = \"anagram\", t = \"nagaram\", return true.\ns = \"rat\", t = \"car\", return false.\n\nNote:\nYou may assume the string contains only lowercase alphabets.\n\n\"\"\"\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if not s and not t:\n return True\n elif not s or not t:\n return False\n elif len(s) != len(t):\n return False\n \n ss = \"\".join(sorted(list(s)))\n tt = \"\".join(sorted(list(t)))\n for i in range(len(s)):\n if ss[i] != tt[i]:\n return False\n return True\n ","sub_path":"src/validAnagram.py","file_name":"validAnagram.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"192628808","text":"import sys\nfrom datetime import datetime\nfrom operator import itemgetter\n\n\ndef time_transfer(t):\n \"\"\"\n Transfer time from string type to datetime type\n :param t: time as string format\n :return: datetime type\n \"\"\"\n return datetime.strptime(t, '%Y-%m-%d %H:%M:%S')\n\n\ndef time_diff(first, last):\n \"\"\"\n Calculate the time period of 2 time stamps in second\n :param first: first time stamp\n :param last: second time stamp\n :return: time period in second\n \"\"\"\n return int((time_transfer(last) - time_transfer(first)).total_seconds())\n\n\ndef get_line(ip, values):\n \"\"\"\n Get the print line string\n :param ip: the ip\n :param values: the values corresponding to ip\n :return: string line\n \"\"\"\n return '{0},{1},{2},{3},{4}\\n'.format(\n ip,\n values['first_datetime'],\n values['last_datetime'],\n time_diff(values['first_datetime'], values['last_datetime']) + 1,\n values['request'])\n\n\ndef write_line(l, ip_dic, path):\n \"\"\"\n Write line to file and remove corresponding ip in ip_dic\n :param l: sorted list ips in arriving order\n :param ip_dic: dictionary with ip information\n :param path: writing directory\n \"\"\"\n with open(path, 'a') as f:\n for each in l:\n ip = each[2]\n f.write(get_line(ip, ip_dic[ip]))\n ip_dic.pop(ip)\n\n\ndef check_session(ip_dic, cur_time, period, session_path):\n \"\"\"\n Check if any ip reaches its session and update it to file\n :param ip_dic: dictionary with ip information\n :param cur_time: current time in string\n :param period: activated session in second\n :param session_path: writing directory\n \"\"\"\n compare_list = []\n\n # To postpone check 1 second later to handle the chronological record of arriving\n # entry in the end of reading file\n if period != 1:\n period += 1\n\n for ip, v in ip_dic.items():\n if time_diff(ip_dic[ip]['last_datetime'], cur_time) >= period:\n tmp_time = time_transfer(ip_dic[ip]['first_datetime'])\n compare_list.append((tmp_time, ip_dic[ip]['index'], ip))\n\n compare_list.sort(key=itemgetter(0, 1))\n write_line(compare_list, ip_dic, session_path)\n\n\ndef main(argv=sys.argv):\n log_path = argv[1]\n period_path = argv[2]\n session_path = argv[3]\n ip_dic = {}\n\n with open(log_path, 'r') as f:\n lines = f.readlines()[1:]\n\n with open(period_path, 'r') as f:\n period = int(f.readline())\n\n last_time = ''\n index = 0\n for line in lines:\n attrs = line.split(',')\n ip = attrs[0]\n cur_time = attrs[1] + ' ' + attrs[2]\n if last_time == '':\n last_time = cur_time\n\n # if time move forward 1 second, update ip session\n if time_diff(last_time, cur_time) >= 1:\n check_session(ip_dic, cur_time, period, session_path)\n index = 0\n\n # update ip information to ip_dic\n if ip not in ip_dic:\n ip_dic.update({ip: {\n 'first_datetime': cur_time,\n 'last_datetime': cur_time,\n 'request': 1,\n 'index': index\n }})\n else:\n ip_dic[ip]['last_datetime'] = cur_time\n ip_dic[ip]['request'] += 1\n ip_dic[ip]['index'] = index\n\n last_time = cur_time\n index += 1\n\n # Retrieve the information of remaining IPs in ip_dic\n if ip_dic:\n compare_list = []\n for ip, v in ip_dic.items():\n tmp_time = time_transfer(ip_dic[ip]['first_datetime'])\n compare_list.append((tmp_time, ip_dic[ip]['index'], ip))\n\n compare_list.sort(key=itemgetter(0, 1))\n write_line(compare_list, ip_dic, session_path)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"insight_testsuite/temp/src/sessionization.py","file_name":"sessionization.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"452072998","text":"import cv2\nimport numpy as np \nimport os\n\n#KNN algo\ndef distance(v1,v2):\n\treturn np.sqrt(((v1-v2)**2).sum())\ndef knn(train,test,k=5):\n\tdist=[]\n\tfor i in range(train.shape[0]):\n\t\tix=train[i,:-1]\n\t\tiy=train[i,-1]\n\n\t\td=distance(test,ix)\n\t\tdist.append([d,iy])\n\t#sort on the distance ans get top k\n\tdk=sorted(dist,key=lambda x:x[0])[:k]\n\t#retrieve only the labels\n\tlabels=np.array(dk)[:,-1]\n\t#get frequencies of labels\n\toutput=np.unique(labels,return_counts=True)\n\t#find max frequency and label\n\tindex=np.argmax(output[1])\n\treturn output[0][index]\n\n\n#Initialize camera\ncap=cv2.VideoCapture(0)\n#face detection\nface_cascade=cv2.CascadeClassifier(\"haarCascade_frontalface_alt.xml\")\nskip=0\ndataset_path='./data/'\n\nface_data=[]\nlabels=[]\nclass_id=0 #labels for the given file\nnames={} #mapping between id-name\n\n#Data preparation\nfor fx in os.listdir(dataset_path):\n\tif fx.endswith('.npy'):\n\t\tdata_item=np.load(dataset_path+fx)\n\t\tface_data.append(data_item)\n\t\tnames[class_id]=fx[:-4]\n\t\t#create labels for the class\n\t\ttarget=class_id*np.ones((data_item.shape[0],))\n\t\tclass_id+=1\n\t\tlabels.append(target)\n\nface_dataset=np.concatenate(face_data,axis=0)\nface_labels=np.concatenate(labels,axis=0).reshape((-1,1))\t\ntrainset=np.concatenate((face_dataset,face_labels),axis=1)\n\n\n#testing \n\nwhile True:\n\tret,frame=cap.read()\n\tif ret==False:\n\t\tcontinue\n\tfaces=face_cascade.detectMultiScale(frame,1.3,5)\n\t\n\tfor face in faces:\n\t\tx,y,w,h=face\n\t\t#get the face ROI\n\t\toffset=10\n\t\tface_selection=frame[y-offset:y+offset+h,x-offset:x+w+offset]\n\t\tface_selection=cv2.resize(face_selection,(100,100))\n\n\t\t#predicted label(out)\n\t\tout=knn(trainset,face_selection.flatten())\n\n\t\t#display rectangle & name on the screen\n\n\t\tpred_name=names[int(out)] \n\t\tcv2.putText(frame,pred_name,(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)\n\t\tcv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)\n\n\tcv2.imshow(\"Faces\",frame)\n\t\n\tkey=cv2.waitKey(1) & 0xFF\n\tif key==ord('q'):\n\t\tbreak\n\ncap.release()\ncv2.destroyAllWindows()\t\t\n\n\n\n\n\n\n\n\t\n\n\t\t","sub_path":"processing_and_testing.py","file_name":"processing_and_testing.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"468451477","text":"# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\nfrom abc import abstractmethod\n\nfrom pants.backend.jvm.tasks.classpath_entry import ClasspathEntry\nfrom pants.base.build_environment import get_buildroot\nfrom pants.engine.fs import Digest, PathGlobs, PathGlobsAndRoot\nfrom pants.task.task import Task\nfrom pants.util.dirutil import fast_relpath\n\n\nclass ResourcesTask(Task):\n \"\"\"A base class for tasks that process or create resource files.\n\n This base assumes that resources targets or targets that generate resources are independent from\n each other and can be processed in isolation in any order.\n\n :API: public\n \"\"\"\n\n @classmethod\n def product_types(cls):\n return ['runtime_classpath']\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register('--confs', advanced=True, type=list, default=['default'],\n help='Prepare resources for these Ivy confs.')\n\n @classmethod\n def prepare(cls, options, round_manager):\n round_manager.require_data('compile_classpath')\n\n @property\n def cache_target_dirs(self):\n return True\n\n def execute(self):\n # Tracked and returned for use in tests.\n # TODO: Rewrite those tests. execute() is not supposed to return anything.\n processed_targets = []\n\n compile_classpath = self.context.products.get_data('compile_classpath')\n runtime_classpath = self.context.products.get_data('runtime_classpath', compile_classpath.copy)\n\n all_relevant_resources_targets = self.find_all_relevant_resources_targets()\n if not all_relevant_resources_targets:\n return processed_targets\n\n with self.invalidated(targets=all_relevant_resources_targets,\n fingerprint_strategy=self.create_invalidation_strategy(),\n invalidate_dependents=False,\n topological_order=False) as invalidation:\n for vt in invalidation.invalid_vts:\n # Generate resources to the chroot.\n self.prepare_resources(vt.target, vt.results_dir)\n processed_targets.append(vt.target)\n for vt, digest in self._capture_resources(invalidation.all_vts):\n # Register the target's chroot in the products.\n for conf in self.get_options().confs:\n runtime_classpath.add_for_target(vt.target, [(conf, ClasspathEntry(vt.results_dir, digest))])\n\n return processed_targets\n\n def _capture_resources(self, vts):\n \"\"\"Given a list of VersionedTargets, capture DirectoryDigests for all of them.\n\n :returns: A list of tuples of VersionedTargets and digests for their content.\n \"\"\"\n # Capture Snapshots for each directory, using an optional adjacent digest. Create the digest\n # afterward if it does not exist.\n buildroot = get_buildroot()\n snapshots = self.context._scheduler.capture_snapshots(\n tuple(\n PathGlobsAndRoot(\n PathGlobs([os.path.join(fast_relpath(vt.results_dir, buildroot), '**')]),\n buildroot,\n Digest.load(vt.current_results_dir),\n ) for vt in vts\n ))\n result = []\n for vt, snapshot in zip(vts, snapshots):\n snapshot.directory_digest.dump(vt.current_results_dir)\n result.append((vt, snapshot.directory_digest))\n return result\n\n @abstractmethod\n def find_all_relevant_resources_targets(self):\n \"\"\"Returns an iterable over all the relevant resources targets in the context.\"\"\"\n\n def create_invalidation_strategy(self):\n \"\"\"Creates a custom fingerprint strategy for determining invalid resources targets.\n\n :returns: A custom fingerprint strategy to use for determining invalid targets, or `None` to\n use the standard target payload.\n :rtype: :class:`pants.base.fingerprint_strategy.FingerprintStrategy`\n \"\"\"\n return None\n\n @abstractmethod\n def prepare_resources(self, target, chroot):\n \"\"\"Prepares the resources associated with `target` in the given `chroot`.\n\n :param target: The target to prepare resource files for.\n :type target: :class:`pants.build_graph.target.Target`\n :param string chroot: An existing, clean chroot dir to generate `target`'s resources to.\n \"\"\"\n","sub_path":"src/python/pants/backend/jvm/tasks/resources_task.py","file_name":"resources_task.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"291253","text":"import json, boto3, urllib, base64, pprint\n\ns3Client = boto3.client('s3')\nssmClient = boto3.client('ssm')\nnubayRepo = ssmClient.get_parameter(Name='GithubRepository')['Parameter']['Value']\ngithubToken = ssmClient.get_parameter(Name='GithubToken', WithDecryption=True)['Parameter']['Value']\ngithubURL = 'https://api.github.com'\ngithubHeader = { 'Authorization': \"token {}\".format(githubToken) }\ngithubRawHeader = { 'Authorization': \"token {}\".format(githubToken), 'Accept': 'application/vnd.github.v3.raw' }\ngithubContentURL = \"{}/repos/{}/contents/{}\".format(githubURL, nubayRepo, 'AWS/cloudformation')\ngithubBranchesURL = \"{}/repos/{}/branches\".format(githubURL, nubayRepo)\ns3Bucket = 'nubay-config3'\n\ndef handlePushEvent(event, context):\n branchesResponse = githubRequest(githubBranchesURL, githubHeader, True)\n for branchDesc in branchesResponse:\n branch = branchDesc['name']\n if not (branch.startswith('stable') or branch.startswith('master')):\n continue\n \n cfFiles = githubRequest(githubContentURL, githubHeader, True)\n if not cfFiles:\n continue\n for f in cfFiles:\n if f['type'] != 'file' or not f['name'].lower().endswith('.json'):\n continue\n cfFileName = f['name']\n cfFileNameBranch = f['name'][:-4] + branch + f['name'][-5:]\n cfFileResponse = githubRequest(\"{}/{}?ref={}\".format(githubContentURL, f['name'], branch), githubRawHeader, False)\n if not cfFileResponse:\n continue\n cfFileBody = bytes.decode(cfFileResponse)\n \n s3FileName = 'cfTemplates/{}'.format(cfFileNameBranch)\n print('cfFile: ' + cfFileName + ', s3File: ' + s3FileName)\n s3FileBody = None\n try:\n s3File = s3Client.get_object(Bucket=s3Bucket, Key=s3FileName)\n s3FileBody = bytes.decode(s3File['Body'].read())\n except s3Client.exceptions.NoSuchKey as e:\n print('Key ' + s3FileName + ' does not exist in S3')\n s3FileBody = ''\n if cfFileBody == s3FileBody:\n print('Files match: {}'.format(cfFileNameBranch))\n else:\n print('Files are not equal; putting S3 file: ' + s3FileName)\n s3Client.put_object(Bucket=s3Bucket, Key=s3FileName, Body=cfFileBody.encode())\n return 'done'\n\ndef githubRequest(uri, header, jsonify):\n print('githubRequest: ' + uri)\n req = urllib.request.Request(uri, None, header)\n try:\n with urllib.request.urlopen(req) as response:\n resp = response.read()\n if jsonify:\n resp = json.loads(resp)\n return resp\n except urllib.error.HTTPError as e:\n print('HTTPError ({}): {}'.format(uri, e.code))\n return None\n except urllib.error.URLError as e:\n print('URLError ({}): {}'.format(uri, e.reason))\n return None\n","sub_path":"AWS/lambda/ProcessGithubPushEvents.py","file_name":"ProcessGithubPushEvents.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"528629415","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\nimport logging\nimport Artus.Utility.logger as logger\nlog = logging.getLogger(__name__)\n\nimport ROOT\n\nimport HiggsAnalysis.KITHiggsToTauTau.plotting.modules.analysis_modules.estimatebase as estimatebase\nimport HiggsAnalysis.KITHiggsToTauTau.tools as tools\nimport HiggsAnalysis.KITHiggsToTauTau.uncertainties.uncertainties as uncertainties\n\n\nclass EstimateQcd(estimatebase.EstimateBase):\n\tdef __init__(self):\n\t\tsuper(EstimateQcd, self).__init__()\n\n\tdef modify_argument_parser(self, parser, args):\n\t\tsuper(EstimateQcd, self).modify_argument_parser(parser, args)\n\t\t\n\t\tself.estimate_qcd_options = parser.add_argument_group(\"QCD estimation options\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-data-shape-nicks\", nargs=\"+\", default=[\"qcd\"],\n\t\t\t\thelp=\"Nicks for histogram to plot. [Default: %(default)s]\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-data-yield-nicks\", nargs=\"+\", default=[\"noplot_data_qcd_yield\"],\n\t\t\t\thelp=\"Nicks for histogram containing the yield in data with the final selection that is then scaled. [Default: %(default)s]\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-data-control-nicks\", nargs=\"+\", default=[\"noplot_data_qcd_control\"],\n\t\t\t\thelp=\"Nicks for histogram to plot. [Default: %(default)s]\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-data-substract-nicks\", nargs=\"+\",\n\t\t\t\tdefault=[\"noplot_ztt_mc_qcd_control noplot_zll_qcd_control noplot_ttj_qcd_control noplot_vv_qcd_control noplot_wj_ss\"],\n\t\t\t\thelp=\"Nicks for control region histogram to substract from data (whitespace separated). [Default: %(default)s]\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-extrapolation-factors-ss-os\", nargs=\"+\", type=float, default=[1.06],\n\t\t\t\thelp=\"Extrapolation factors of OS/SS yields. [Default: %(default)s]\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-subtract-shape\", action=\"store_true\", default=False,\n\t\t\t\thelp=\"Subtract the shape of control region histograms from data. [Default: %(default)s]\")\n\t\tself.estimate_qcd_options.add_argument(\"--qcd-scale-factor\", default=1.0, type=float,\n\t\t\t\thelp=\"Scale QCD by this factor. [Default: %(default)s]\")\n\n\tdef prepare_args(self, parser, plotData):\n\t\tsuper(EstimateQcd, self).prepare_args(parser, plotData)\n\t\t\n\t\tself._plotdict_keys = [\"qcd_data_shape_nicks\", \"qcd_data_yield_nicks\", \"qcd_data_control_nicks\", \"qcd_data_substract_nicks\", \"qcd_extrapolation_factors_ss_os\", \"qcd_subtract_shape\"]\n\t\tself.prepare_list_args(plotData, self._plotdict_keys)\n\t\t\n\t\tplotData.plotdict[\"qcd_data_substract_nicks\"] = [nicks.split() for nicks in plotData.plotdict[\"qcd_data_substract_nicks\"]]\n\t\t\n\t\t# make sure that all necessary histograms are available\n\t\tfor nicks in zip(*[plotData.plotdict[key] for key in self._plotdict_keys]):\n\t\t\tfor nick in nicks:\n\t\t\t\tif isinstance(nick, basestring):\n\t\t\t\t\tassert isinstance(plotData.plotdict[\"root_objects\"].get(nick), ROOT.TH1)\n\t\t\t\telif (not isinstance(nick, float) and not isinstance(nick, bool)):\n\t\t\t\t\tfor subnick in nick:\n\t\t\t\t\t\tassert isinstance(plotData.plotdict[\"root_objects\"].get(subnick), ROOT.TH1)\n\t\t\n\t\t#if any(plotData.plotdict[\"qcd_subtract_shape\"]):\n\t\t\t#log.warning(\"Shape substraction for QCD estimation is currently not supported! The calculations are instead done on the yields.\")\n\t\n\tdef run(self, plotData=None):\n\t\tsuper(EstimateQcd, self).run(plotData)\n\t\t\n\t\tfor qcd_data_shape_nick, qcd_data_yield_nick, qcd_data_control_nick, qcd_data_substract_nicks, qcd_extrapolation_factor_ss_os, qcd_subtract_shape in zip(*[plotData.plotdict[key] for key in self._plotdict_keys]):\n\t\t\t\n\t\t\tyield_data_control = tools.PoissonYield(plotData.plotdict[\"root_objects\"][qcd_data_control_nick])()\n\t\t\tyield_qcd_control = yield_data_control\n\t\t\tfor nick in qcd_data_substract_nicks:\n\t\t\t\tyield_bkg_control = tools.PoissonYield(plotData.plotdict[\"root_objects\"][nick])()\n\t\t\t\tif nick in plotData.metadata:\n\t\t\t\t\tyield_bkg_control = uncertainties.ufloat(\n\t\t\t\t\t\t\tplotData.metadata[nick].get(\"yield\", yield_bkg_control.nominal_value),\n\t\t\t\t\t\t\tplotData.metadata[nick].get(\"yield_unc\", yield_bkg_control.std_dev)\n\t\t\t\t\t)\n\t\t\t\tyield_qcd_control -= yield_bkg_control\n\t\t\t\t\n\t\t\t\tif qcd_subtract_shape:\n\t\t\t\t\tplotData.plotdict[\"root_objects\"][qcd_data_control_nick].Add(plotData.plotdict[\"root_objects\"][nick], -1.0/plotData.plotdict[\"qcd_scale_factor\"])\n\t\t\t\n\t\t\tif qcd_subtract_shape:\n\t\t\t\tplotData.plotdict[\"root_objects\"][qcd_data_shape_nick] = plotData.plotdict[\"root_objects\"][qcd_data_control_nick]\n\t\t\t\n\t\t\tyield_qcd_control = max(0.0, yield_qcd_control)\n\t\t\t\n\t\t\tscale_factor = yield_qcd_control * qcd_extrapolation_factor_ss_os\n\t\t\tif yield_data_control != 0.0:\n\t\t\t\tscale_factor /= yield_data_control\n\t\t\t\n\t\t\tfinal_yield = tools.PoissonYield(plotData.plotdict[\"root_objects\"][qcd_data_yield_nick])() * scale_factor\n\t\t\tlog.debug(\"Relative statistical uncertainty of the yield for process QCD (nick \\\"{nick}\\\") is {unc}.\".format(nick=qcd_data_shape_nick, unc=final_yield.std_dev/final_yield.nominal_value if final_yield.nominal_value != 0.0 else 0.0))\n\t\t\t\n\t\t\tplotData.metadata[qcd_data_shape_nick] = {\n\t\t\t\t\"yield\" : final_yield.nominal_value,\n\t\t\t\t\"yield_unc\" : final_yield.std_dev,\n\t\t\t\t\"yield_unc_rel\" : abs(final_yield.std_dev/final_yield.nominal_value if final_yield.nominal_value != 0.0 else 0.0),\n\t\t\t}\n\t\t\t\n\t\t\tintegral_shape = tools.PoissonYield(plotData.plotdict[\"root_objects\"][qcd_data_shape_nick])()\n\t\t\tif integral_shape != 0.0:\n\t\t\t\tscale_factor = final_yield / integral_shape\n\t\t\t\tlog.debug(\"Scale factor for process QCD (nick \\\"{nick}\\\") is {scale_factor}.\".format(nick=qcd_data_shape_nick, scale_factor=scale_factor))\n\t\t\t\tplotData.plotdict[\"root_objects\"][qcd_data_shape_nick].Scale(scale_factor.nominal_value)\n","sub_path":"python/plotting/modules/analysis_modules/estimateqcd.py","file_name":"estimateqcd.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"36532102","text":"# -*- coding: utf-8 -*-\n\nfrom .exception import CleoException\n\n\nclass CommandNotFound(CleoException):\n\n def __init__(self, name, alternatives=None, code=1):\n if alternatives is None:\n alternatives = []\n\n self._name = name\n self._alternatives = alternatives\n\n super(CommandNotFound, self).__init__(self.message, code=1)\n\n @property\n def message(self):\n message = 'Command \"{}\" is not defined.'.format(self._name)\n\n if self.alternatives:\n if len(self._alternatives) == 1:\n message += '\\n\\nDid you mean this?\\n '\n else:\n message += '\\n\\nDid you mean one of these?\\n '\n\n message += '\\n '.join(self._alternatives)\n\n return message\n\n @property\n def alternatives(self):\n return self._alternatives\n\n\nclass NamespaceNotFound(CommandNotFound):\n\n @property\n def message(self):\n message = 'There are no commands defined in the \"{}\" namespace.'.format(self._name)\n\n if self.alternatives:\n if len(self._alternatives) == 1:\n message += '\\n\\nDid you mean this?\\n '\n else:\n message += '\\n\\nDid you mean one of these?\\n '\n\n message += '\\n '.join(self._alternatives)\n\n return message\n\n\nclass AmbiguousCommand(CommandNotFound):\n\n @property\n def message(self):\n message = 'Command \"{}\" is ambiguous ({}).'.format(\n self._name, self._get_abbreviation_suggestions()\n )\n\n return message\n\n def _get_abbreviation_suggestions(self):\n \"\"\"\n Returns abbreviated suggestions in string format.\n\n :rtype: str\n \"\"\"\n rest = ''\n if len(self._alternatives) > 2:\n rest = ' and {} more'.format(len(self._alternatives) - 2)\n\n return '{}, {}{}'.format(\n self._alternatives[0], self._alternatives[1], rest\n )\n\n\nclass AmbiguousNamespace(AmbiguousCommand):\n \n @property\n def message(self):\n message = 'The namespace \"{}\" is ambiguous ({}).'.format(\n self._name, self._get_abbreviation_suggestions()\n )\n\n return message\n","sub_path":"cleo/exceptions/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"25218784","text":"## QUESTÃO 4 ##\n# Escreva um programa para aprovar o empréstimo bancário para compra de uma casa. \n# O programa deve perguntar o valor da casa a comprar, o salário e a quantidade de anos \n# a pagar. O valor da prestação mensal não pode ser superior a 30% do salário. Calcule o \n# valor da prestação como sendo o valor da casa a comprar dividido pelo número de \n# meses a pagar.\n##\n\n\n##\n# A sua resposta da questão deve ser desenvolvida dentro da função main()!!! \n# Deve-se substituir o comado print existente pelo código da solução.\n# Para a correta execução do programa, a estrutura atual deve ser mantida,\n# substituindo apenas o comando print(questão...) existente.\n##\ndef main():\n\n # Programa que avalia a concessão de empréstimo para a compra de uma casa.\n\n valor_casa = float(input('Digite o valor da casa a comprar: '))\n salario = float(input('Digite seu salário: '))\n anos_a_pagar = float(input('Digite a quantidade de anos a pagar: '))\n prestacao = valor_casa / (anos_a_pagar * 12)\n if (prestacao > (salario * 0.30)):\n print('Empréstimo reprovado.')\n else:\n print('Empréstimo aprovado.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"questoes/questao_4.py","file_name":"questao_4.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"274533203","text":"import os\nimport uuid\nimport zipfile\n\nfrom pathlib import Path\n\nfrom django.core import exceptions\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import validators\n\n\nclass CompressField(models.FileField):\n description = _('Compress path')\n\n default_validators = [\n validators.validate_file_extension,\n validators.validate_zip_compression\n ]\n\n def pre_save(self, model_instance, add):\n value = super().pre_save(model_instance, add)\n\n if value.name:\n outpath = Path(value.path).with_suffix('')\n\n if not outpath.is_dir() and zipfile.is_zipfile(value):\n with zipfile.ZipFile(value) as zip_file:\n zip_file.extractall(outpath.as_posix())\n\n return value\n\n\nclass SymlinkField(models.Field):\n description = _('Symlink path')\n default_error_messages = {\n 'invalid': _(\"'%(value)s' is not a valid path\"),\n }\n\n def __init__(self, verbose_name=None, name=None, **kwargs):\n kwargs.setdefault('max_length', 255)\n source = kwargs.pop('source', None)\n\n if source is None:\n raise exceptions.FieldError(\n \"{} requires a 'source' argument\".format(\n self.__class__.__name__\n )\n )\n\n self.source = source\n super().__init__(verbose_name, name, **kwargs)\n\n def contribute_to_class(self, cls, name):\n self.symlink_attname = \"_symlink_{}\".format(name)\n models.signals.post_init.connect(self._save_initial, sender=cls)\n super().contribute_to_class(cls, name)\n\n def _save_initial(self, sender, instance, **kwargs):\n setattr(\n instance,\n self.symlink_attname,\n Path(getattr(instance, self.attname))\n )\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs['source'] = self.source\n\n if kwargs.get('max_length') == 255:\n del kwargs['max_length']\n\n return name, path, args, kwargs\n\n def get_prep_value(self, value):\n value = super().get_prep_value(value)\n if value is None:\n return None\n return str(value)\n\n def get_source_path(self, instance):\n return Path(getattr(instance, self.source).path)\n\n def pre_save(self, model_instance, add):\n value = Path(getattr(model_instance, self.attname))\n previous = getattr(model_instance, self.symlink_attname)\n path = self.get_source_path(model_instance).with_suffix('')\n\n if value.is_absolute():\n # Absolute to relative path\n value = value.resolve().relative_to(path)\n\n # Search the index preview\n file_index_path = next((path / value).glob('**/index.html'), None)\n\n # Relative path to index\n if file_index_path is not None:\n value = file_index_path.relative_to(path).parent\n\n # Any changes?\n if previous.resolve() == (path / value).resolve():\n return previous\n\n # Create the symlink\n symlink = path.parent / uuid.uuid4().hex\n symlink.symlink_to(path.name / value, target_is_directory=True)\n\n if previous.is_symlink():\n previous.unlink()\n\n setattr(model_instance, self.attname, symlink.as_posix())\n self._save_initial(model_instance.__class__, model_instance)\n\n return symlink\n\n def get_internal_type(self):\n return 'FilePathField'\n\n def to_python(self, value):\n if value and (Path(value).is_absolute() or os.pardir in value):\n raise exceptions.ValidationError(\n self.error_messages['invalid'],\n code='invalid',\n params={'value': value})\n\n return value\n","sub_path":"arcgis_marketplace/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"403629244","text":"class Solution:\n \"\"\"\n @param triangle: a list of lists of integers\n @return: An integer, minimum path sum\n \"\"\"\n\n # Mathod3: Memoization, O(n^2)\n def minimumTotal(self, triangle):\n if not triangle or not triangle[0]:\n return 0\n\n return self.dfs(triangle, 0, 0, {})\n\n def dfs(self, triangle, x, y, mem):\n if len(triangle) == x:\n return 0\n\n if (x, y) in mem:\n return mem[(x, y)]\n\n left = self.dfs(triangle, x + 1, y, mem)\n right = self.dfs(triangle, x + 1, y + 1, mem)\n\n mem[(x, y)] = min(left, right) + triangle[x][y]\n\n return min(left, right) + triangle[x][y]\n\n # Method2: Divide & Conquer O(2^n) time\n # def minimumTotal(self, triangle):\n # if not triangle or not triangle[0]:\n # return 0\n\n # return self.dfs(triangle, 0, 0)\n\n # def dfs(self, triangle, x, y):\n # if len(triangle) == x:\n # return 0\n\n # left = self.dfs(triangle, x + 1, y)\n # right = self.dfs(triangle, x + 1, y + 1)\n\n # return min(left, right) + triangle[x][y]\n\n # Method1: Pure Traverse O(2^n) time\n # def minimumTotal(self, triangle):\n # if not triangle or not triangle[0]:\n # return 0\n\n # self.min_sum = sys.maxsize\n # self.dfs(triangle, 0, 0, 0)\n # return self.min_sum\n\n # def dfs(self, triangle, x, y, cur_sum):\n # if len(triangle) == x:\n # self.min_sum = min(self.min_sum, cur_sum)\n # return\n\n # self.dfs(triangle, x + 1, y, cur_sum + triangle[x][y])\n # self.dfs(triangle, x + 1, y + 1, cur_sum + triangle[x][y])\n","sub_path":"109_Triangle.py","file_name":"109_Triangle.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"652747327","text":"from vizydrop.sdk.application import Application\n\nfrom .authentication import TrelloOAuth, TrelloTokenAuth\nfrom .cards import TrelloCardSource\n\n\nclass Trello(Application):\n class Meta:\n version = \"1.0\"\n name = \"Trello\"\n website = \"http://www.trello.com/\"\n color = \"#0076C0\"\n description = \"Infinitely flexible. Incredibly easy to use. Great mobile apps. It's free. \" \\\n \"Trello keeps track of everything, from the big picture to the minute details.\"\n tags = ['kanban', 'project management', ]\n\n authentication = [TrelloOAuth, TrelloTokenAuth, ]\n\n sources = [TrelloCardSource, ]\n","sub_path":"trello/trelloapp.py","file_name":"trelloapp.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"526232406","text":"#!/usr/bin/env python\nimport sys\nfrom os.path import expanduser\nimport difflib\n#from difflib_data import *\n\nbase_dir = expanduser(\"~/ansible/CFG/HPN/REPOS/CHANGES/\")\n\n# Task uses the names of files to compare and the output file\nfile1 = base_dir + sys.argv[1] + sys.argv[2]\nfile2 = base_dir + sys.argv[1] + sys.argv[3] \nfileo = base_dir + sys.argv[1] + sys.argv[4]\nlineAdd = False\nlineDel = False\n\nwith open(file1,'r') as f:\n flines = f.readlines()\n \nwith open(file2,'r') as g:\n glines = g.readlines()\n\n# d = difflib.Differ()\n# diff = d.compare(flines, glines)\n# print(\"\\n\".join(diff))\n\ndiff = difflib.ndiff(flines,glines)\n\nchanges = [l for l in diff if l.startswith('+ ') or l.startswith('- ')]\n\nwith open(fileo,'w') as f:\n for c in changes:\n if c.startswith('- '):\n if lineDel == False:\n lineAdd = False \n f.write('!\\n')\n lineDel = True\n f.write(c.replace('- ', 'no '))\n if c.startswith('+ '):\n if lineAdd == False:\n lineDel = False\n f.write('!\\n')\n lineAdd = True\n f.write(c.replace('+ ', ''))\n f.write('!\\n')\n","sub_path":"Ansible/LAB/scripts/diff2.py","file_name":"diff2.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"520354048","text":"from Tkinter import *\n\nclass myTkinterApp():\n\n\tdef __init__(self):\n\t\tself.myApp = Tk()\n\t\tself.myApp.title(\"MAVEN MANAGEMENT\")\n\n\t\tself.mainApp()\n\n\t\tself.myApp.mainloop()\n\n\tdef mainApp(self):\n\t\tmiPrimeraEtiquitea = Label(self.myApp, text=\"Hola Mundo!\")\n\t\tmiPrimeraEtiquitea.pack()\n\nif __name__ == \"__main__\":\n\tmyTkinterApp = myTkinterApp()","sub_path":"ejemplos/Tkinter/myTkinterApp.py","file_name":"myTkinterApp.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"239993468","text":"# -*- coding: utf-8 -*-\n\"\"\" Implements a Habitica synchronisation task.\nThis is borrowed essentially wholesale from scriptabit by DeeDee (see README).\n\"\"\"\n# Ensure backwards compatibility with Python 2\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals)\nfrom builtins import *\nfrom datetime import datetime\nfrom tzlocal import get_localzone\nimport time\n\nfrom .dates import parse_date_utc\nfrom .task import CharacterAttribute, ChecklistItem, Difficulty, Task\n\nclass HabTask(object):\n def __init__(self, task_dict=None):\n \"\"\" Initialise the task.\n\n Args: task_dict (dict): the Todoist task dictionary, as released by task_all.\n \"\"\"\n super().__init__()\n\n if not task_dict:\n task_dict = {'text': 'Todoist todo'}\n\n if not isinstance(task_dict, dict):\n raise TypeError(type(task_dict))\n\n self.__task_dict = task_dict\n\n if 'priority' not in task_dict:\n task_dict['priority'] = Difficulty.default.value\n\n if 'attribute' not in task_dict:\n task_dict['attribute'] = CharacterAttribute.default.value\n\n # The Habitica API chokes if you attempt to update a task with a\n # checklist in the request data. To work around this we move the\n # checklist (if any) out of task_dict so it can be handled separately.\n # We also need separate API calls for deleted, added, and updated\n # checklist items items\n self.new_checklist_items = []\n if 'checklist' in list(task_dict.keys()):\n self.existing_checklist_items = task_dict['checklist']\n del task_dict['checklist']\n else:\n self.existing_checklist_items = []\n\n @property\n def task_dict(self):\n \"\"\" Gets the internal task dictionary. \"\"\"\n return self.__task_dict\n\n @property\n #When will this recurring task next be due?\n def starting(self):\n if self.__task_dict['type'] == 'daily':\n start = self.__task_dict['startDate']\n else:\n start = ''\n \n @starting.setter\n def starting(self, starting):\n \"\"\" Task name \"\"\"\n self.__task_dict['startDate'] = starting\n \n @property\n #Is this a weekly daily or something that repeats every X days?\n def rep_pattern(self):\n if self.__task_dict['type'] == 'daily':\n return self.__task_dict['frequency']\n else:\n return ''\n \n @rep_pattern.setter\n def rep_pattern(self, rep):\n \"\"\" Task name \"\"\"\n self.__task_dict['frequency'] = rep\n \n @property\n #What days of the week does this daily repeat--or in how many days?\n def dailies_due(self):\n if self.__task_dict['type'] == 'daily':\n if self.__task_dict['frequency'] == 'weekly':\n days = ['ev ']\n if self.__task_dict['repeat'][\"m\"] == True:\n days.append(\"monday\")\n if self.__task_dict['repeat'][\"t\"] == True:\n days.append(\"tuesday\")\n if self.__task_dict['repeat'][\"w\"] == True:\n days.append(\"wednesday\")\n if self.__task_dict['repeat'][\"th\"] == True:\n days.append(\"thursday\")\n if self.__task_dict['repeat'][\"f\"] == True:\n days.append(\"friday\")\n if self.__task_dict['repeat'][\"s\"] == True:\n days.append(\"saturday\")\n if self.__task_dict['repeat'][\"su\"] == True:\n days.append(\"sunday\")\n days.append(', ')\n days.pop()\n due_dates = ''.join(days)\n return due_dates\n else:\n dayCycle = self.__task_dict['everyX']\n return dayCycle\n else:\n return ''\n \n @property\n #Is this task due today?\n def due_now(self):\n now = time.strftime()\n if self.__task_dict['type'] == 'daily':\n return ''\n else:\n return ''\n\n @property\n #is task complete? 0 for no, 1 for yes\n def complete(self):\n return self.__task_dict['checked']\n \n @property\n def id(self):\n \"\"\" Task id \"\"\"\n return self.__task_dict['_id']\n\n @property\n def name(self):\n \"\"\" Task name \"\"\"\n return self.__task_dict['text']\n\n @name.setter\n def name(self, name):\n \"\"\" Task name \"\"\"\n self.__task_dict['text'] = name\n \n @property\n def alias(self):\n \"\"\" Task name \"\"\"\n return self.__task_dict['alias']\n\n @property\n def date(self):\n \"\"\" Task name \"\"\"\n if self.__task_dict['type'] == 'todo':\n try:\n return self.__task_dict['date']\n except:\n return ''\n else:\n return self.__task_dict['startDate']\n\n @property\n def dueToday(self):\n \"\"\"This is intended to tell us if a given daily is due today or not.\"\"\"\n from datetime import datetime\n from dateutil import parser\n\n now = datetime.today().date()\n\n if self.__task_dict['type'] == 'daily':\n datestr = self.__task_dict['startDate']\n startDate = parser.parse(datestr).date()\n type = self.__task_dict['frequency']\n \n if startDate >= now:\n return False\n elif type == 'weekly':\n \n weekDay = now.weekday()\n if weekDay == 0:\n return (self.__task_dict['repeat']['m'])\n elif weekDay == 1:\n return (self.__task_dict['repeat']['t'])\n elif weekDay == 2:\n return (self.__task_dict['repeat']['w'])\n elif weekDay == 3:\n return (self.__task_dict['repeat']['th'])\n elif weekDay == 4:\n return (self.__task_dict['repeat']['f'])\n elif weekDay == 5:\n return (self.__task_dict['repeat']['s'])\n elif weekDay == 6:\n return (self.__task_dict['repeat']['su'])\n else:\n return \"Error: what day is it\" \n elif type == 'daily':\n evXdays = self.__task_dict['everyX']\n if evXdays > 1:\n daysSinceStart = now - startDate\n return (daysSinceStart.days % evXdays == 0) \n else:\n return True \n else:\n return \"Error: check your daily?\"\n else:\n return 'TODO, NA'\n\n @property\n def category(self):\n \"\"\" Task type \"\"\"\n return self.__task_dict['type']\n\n @category.setter\n def category(self, name):\n \"\"\" Task name \"\"\"\n self.__task_dict['type'] = name \n\n @property\n def description(self):\n \"\"\" Task description \"\"\"\n return self.__task_dict['notes']\n\n @description.setter\n def description(self, description):\n \"\"\" Task description \"\"\"\n self.__task_dict['notes'] = description\n\n @property\n def completed(self):\n \"\"\" Task completed \"\"\"\n return self.__task_dict['completed']\n\n @completed.setter\n def completed(self, completed):\n \"\"\" Task completed \"\"\"\n self.__task_dict['completed'] = completed\n\n @property\n def difficulty(self):\n \"\"\" Task difficulty \"\"\"\n return Difficulty.from_value(self.__task_dict['priority'])\n\n @difficulty.setter\n def difficulty(self, difficulty):\n \"\"\" Task difficulty \"\"\"\n if not isinstance(difficulty, Difficulty):\n raise TypeError\n self.__task_dict['priority'] = difficulty.value\n\n @property\n def attribute(self):\n \"\"\" Task character attribute \"\"\"\n return CharacterAttribute.from_value(self.__task_dict['attribute'])\n\n @attribute.setter\n def attribute(self, attribute):\n \"\"\" Task character attribute \"\"\"\n if not isinstance(attribute, CharacterAttribute):\n raise TypeError\n self.__task_dict['attribute'] = attribute.value\n\n @property\n def due_date(self):\n \"\"\" The due date if there is one, or None. \"\"\"\n datestr = self.__task_dict.get('date', None)\n if datestr:\n return parse_date_utc(datestr, milliseconds=True)\n return None\n\n @due_date.setter\n def due_date(self, due_date):\n \"\"\" Sets or clears the due date. \"\"\"\n if due_date and not isinstance(due_date, datetime):\n raise TypeError\n if due_date:\n self.__task_dict['date'] = \\\n due_date.astimezone(get_localzone()).date()\n elif 'date' in self.__task_dict:\n del self.__task_dict['date']\n\n @property\n def last_modified(self):\n \"\"\" The last modified timestamp in UTC. \"\"\"\n timestamp = self.__task_dict['updatedAt']\n if timestamp:\n return parse_date_utc(timestamp)\n\n @property\n def checklist(self):\n \"\"\" The checklist.\n\n Returns:\n list: The checklist, or an empty list if there are no\n checklist items.\n \"\"\"\n checklist = []\n\n for i in self.new_checklist_items:\n checklist.append(ChecklistItem(\n name=i['text'],\n checked=i['completed']))\n\n for i in self.existing_checklist_items:\n checklist.append(ChecklistItem(\n name=i['text'],\n checked=i['completed']))\n\n return checklist\n\n @checklist.setter\n def checklist(self, checklist):\n \"\"\" Sets, or clears the checklist. \"\"\"\n for i in checklist:\n # create new item\n self.new_checklist_items.append({\n 'text': i.name,\n 'completed': i.checked})\n","sub_path":"habiticaTodo/hab_task.py","file_name":"hab_task.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"473436536","text":"import random\nimport torch\nimport matplotlib.pyplot as plt\nfrom torchvision.utils import make_grid\n\n\n\"\"\"\nUtilities to help with visualizing images and other data\n\"\"\"\n\n\ndef tensor_to_image(tensor):\n \"\"\"\n Convert a torch tensor into a numpy ndarray for visualization.\n\n Inputs:\n - tensor: A torch tensor of shape (3, H, W) with elements in the range [0, 1]\n\n Returns:\n - ndarr: A uint8 numpy array of shape (H, W, 3)\n \"\"\"\n tensor = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0)\n ndarr = tensor.to('cpu', torch.uint8).numpy()\n return ndarr\n\n\ndef visualize_dataset(X_data, y_data, samples_per_class, class_list):\n \"\"\"\n Make a grid-shape image to plot\n\n Inputs:\n - X_data: set of [batch, 3, width, height] data\n - y_data: paired label of X_data in [batch] shape\n - samples_per_class: number of samples want to present\n - class_list: list of class names\n e.g.) ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n Outputs:\n - An grid-image that visualize samples_per_class number of samples per class\n \"\"\"\n img_half_width = X_data.shape[2] // 2\n samples = []\n for y, cls in enumerate(class_list):\n tx = -4\n ty = (img_half_width * 2 + 2) * y + (img_half_width + 2)\n plt.text(tx, ty, cls, ha='right')\n idxs = (y_data == y).nonzero().view(-1)\n for i in range(samples_per_class):\n idx = idxs[random.randrange(idxs.shape[0])].item()\n samples.append(X_data[idx])\n\n img = make_grid(samples, nrow=samples_per_class)\n return tensor_to_image(img)\n","sub_path":"A1/eecs598/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"73327583","text":"\"\"\" Cliente \"\"\"\nimport socket, pickle\n\ndef programa_cliente():\n \"\"\" Função para abrir o programa do cliente \"\"\"\n\n # como os dois códigos estão rodando na mesma máquina, o host é o mesmo\n host = socket.gethostname()\n porta = 5000 # o número da porta do socket do servidor\n\n socket_cliente = socket.socket() # pega a instância do socket do cliente\n socket_cliente.connect((host, porta)) # estabelece a conexão com o servidor através da tupla\n\n saldo_medio = float(input(\" Saldo médio do cliente: \")) # recebe o saldo médio do último ano de um cliente\n socket_cliente.send(pickle.dumps(saldo_medio))\n\n socket_cliente.close() # fecha a conexão\n\nif __name__ == '__main__':\n programa_cliente()\n","sub_path":"Lista 1/Exercício 8/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"380804143","text":"from ..base import to_attr\nfrom ..motion import MotionWidget\nfrom .base_mpl import MplPlotter\n\nimport numpy as np\nfrom matplotlib.colors import Normalize\n\n\nclass MotionPlotter(MplPlotter):\n def do_plot(self, data_plot, **backend_kwargs):\n import matplotlib.pyplot as plt\n from spikeinterface.sortingcomponents.motion_interpolation import correct_motion_on_peaks\n\n dp = to_attr(data_plot)\n backend_kwargs = self.update_backend_kwargs(**backend_kwargs)\n\n assert backend_kwargs[\"axes\"] is None\n assert backend_kwargs[\"ax\"] is None\n\n self.make_mpl_figure(**backend_kwargs)\n fig = self.figure\n fig.clear()\n\n is_rigid = dp.motion.shape[1] == 1\n\n gs = fig.add_gridspec(2, 2, wspace=0.3, hspace=0.3)\n ax0 = fig.add_subplot(gs[0, 0])\n ax1 = fig.add_subplot(gs[0, 1])\n ax2 = fig.add_subplot(gs[1, 0])\n if not is_rigid:\n ax3 = fig.add_subplot(gs[1, 1])\n ax1.sharex(ax0)\n ax1.sharey(ax0)\n\n if dp.motion_lim is None:\n motion_lim = np.max(np.abs(dp.motion)) * 1.05\n else:\n motion_lim = dp.motion_lim\n\n if dp.times is None:\n temporal_bins_plot = dp.temporal_bins\n x = dp.peaks[\"sample_index\"] / dp.sampling_frequency\n else:\n # use real times and adjust temporal bins with t_start\n temporal_bins_plot = dp.temporal_bins + dp.times[0]\n x = dp.times[dp.peaks[\"sample_index\"]]\n\n corrected_location = correct_motion_on_peaks(\n dp.peaks,\n dp.peak_locations,\n dp.sampling_frequency,\n dp.motion,\n dp.temporal_bins,\n dp.spatial_bins,\n direction=\"y\",\n )\n\n y = dp.peak_locations[\"y\"]\n y2 = corrected_location[\"y\"]\n if dp.scatter_decimate is not None:\n x = x[:: dp.scatter_decimate]\n y = y[:: dp.scatter_decimate]\n y2 = y2[:: dp.scatter_decimate]\n\n if dp.color_amplitude:\n amps = dp.peaks[\"amplitude\"]\n amps_abs = np.abs(amps)\n q_95 = np.quantile(amps_abs, 0.95)\n if dp.scatter_decimate is not None:\n amps = amps[:: dp.scatter_decimate]\n amps_abs = amps_abs[:: dp.scatter_decimate]\n cmap = plt.get_cmap(dp.amplitude_cmap)\n if dp.amplitude_clim is None:\n amps = amps_abs\n amps /= q_95\n c = cmap(amps)\n else:\n norm_function = Normalize(vmin=dp.amplitude_clim[0], vmax=dp.amplitude_clim[1], clip=True)\n c = cmap(norm_function(amps))\n color_kwargs = dict(\n color=None,\n c=c,\n alpha=dp.amplitude_alpha,\n )\n else:\n color_kwargs = dict(color=\"k\", c=None, alpha=dp.amplitude_alpha)\n\n ax0.scatter(x, y, s=1, **color_kwargs)\n if dp.depth_lim is not None:\n ax0.set_ylim(*dp.depth_lim)\n ax0.set_title(\"Peak depth\")\n ax0.set_xlabel(\"Times [s]\")\n ax0.set_ylabel(\"Depth [um]\")\n\n ax1.scatter(x, y2, s=1, **color_kwargs)\n ax1.set_xlabel(\"Times [s]\")\n ax1.set_ylabel(\"Depth [um]\")\n ax1.set_title(\"Corrected peak depth\")\n\n ax2.plot(temporal_bins_plot, dp.motion, alpha=0.2, color=\"black\")\n ax2.plot(temporal_bins_plot, np.mean(dp.motion, axis=1), color=\"C0\")\n ax2.set_ylim(-motion_lim, motion_lim)\n ax2.set_ylabel(\"Motion [um]\")\n ax2.set_title(\"Motion vectors\")\n axes = [ax0, ax1, ax2]\n\n if not is_rigid:\n im = ax3.imshow(\n dp.motion.T,\n aspect=\"auto\",\n origin=\"lower\",\n extent=(\n temporal_bins_plot[0],\n temporal_bins_plot[-1],\n dp.spatial_bins[0],\n dp.spatial_bins[-1],\n ),\n )\n im.set_clim(-motion_lim, motion_lim)\n cbar = fig.colorbar(im)\n cbar.ax.set_xlabel(\"motion [um]\")\n ax3.set_xlabel(\"Times [s]\")\n ax3.set_ylabel(\"Depth [um]\")\n ax3.set_title(\"Motion vectors\")\n axes.append(ax3)\n self.axes = np.array(axes)\n\n\nMotionPlotter.register(MotionWidget)\n","sub_path":"src/spikeinterface/widgets/matplotlib/motion.py","file_name":"motion.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"280144229","text":"#!usr/bin/python3.7\n#Author: DulLah ©2019\n#contact: fb.me/dulahz\n#github: github.com/dz-id\n\nimport os, sys, requests, mechanize\nfrom bs4 import BeautifulSoup as parser\n\nglobal W, G, R\nif sys.platform in ['linux','linux2']:\n\tW = '\\033[0m'\n\tG = '\\033[1;92m'\n\tR = '\\033[1;91m'\n\tY = '\\033[1;93m'\nelse:\n\tW = ''\n\tG = ''\n\tR = ''\n\tY = ''\n\nclass Checkpoint:\n\tdef __init__(self):\n\t\tself.Main()\n\t\n\tdef Continue(self):\n\t\ttry:\n\t\t\tself.br.open('https://mbasic.facebook.com/login/checkpoint/?ref=dbl')\n\t\t\tself.br._factory.is_html = True\n\t\t\tself.br.select_form(nr=0)\n\t\t\tcek = self.br.submit().read()\n\t\t\ttipe = parser(cek, 'html.parser')\n\t\t\tfor i in tipe.find_all('option'):\n\t\t\t\tprint(Y + ' - ' + i.text)\n\t\t\tprint(W + '-'*45)\n\t\texcept: pass\n\t\t\t\n\tdef Login(self,user):\n\t\ttry:\n\t\t\tprint(W + '[' + G + '*' + W + '] trying login ->',user)\n\t\t\tself.br = mechanize.Browser()\n\t\t\tself.br.set_handle_equiv(True)\n\t\t\tself.br.set_handle_gzip(True)\n\t\t\tself.br.set_handle_redirect(True)\n\t\t\tself.br.set_handle_referer(True)\n\t\t\tself.br.set_handle_robots(False)\n\t\t\tself.br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n\t\t\tself.br.addheaders = [('User-Agent', open('UserAgent/ua.txt').read())]\n\t\t\tself.br.open('https://mbasic.facebook.com')\n\t\t\tself.br._factory.is_html = True\n\t\t\tself.br.select_form(nr=0)\n\t\t\tself.br.form['email'] = user.split('|')[0]\n\t\t\tself.br.form['pass'] = user.split('|')[1]\n\t\t\tsub = self.br.submit().read()\n\t\t\tif 'checkpoint' in str(sub):\n\t\t\t\tself.Continue()\n\t\t\telif 'save-device' in str(sub) or 'logout.php' in str(sub):\n\t\t\t\tprint(G + ' - no cekpoint detected')\n\t\t\t\tprint(W + '-'*45)\n\t\t\telse:\n\t\t\t\tprint(R + ' - failed login')\n\t\t\t\tprint(W + '-'*45)\n\t\texcept KeyboardInterrupt:\n\t\t\tprint(W + '[' + R + '!' + W + '] ' + R + 'key interupt!')\n\t\t\tprint(W + '[' + R + '!' + W + '] ' + R + 'stopped!')\n\t\t\tsys.exit()\n\t\texcept requests.exceptions.ConnectionError:\n\t\t\tprint(W + '[' + R + '!' + W + '] ' + R + 'connections error!')\n\t\t\tprint(W + '[' + R + '!' + W + '] ' + R + 'stopped!')\n\t\t\tsys.exit()\n\t\t\n\tdef Main(self):\n\t\ttry:\n\t\t\tprint(W + '\\n[' + R + '!' + W + '] sparator email|password')\n\t\t\tlist = input(W + '[' + G + '?' + W + '] account lists : '+G)\n\t\t\tprint(W + '-'*45)\n\t\t\tfor id in open(list).readlines():\n\t\t\t\tself.Login(id.strip())\n\t\t\tprint(W + '[' + G + '•' + W + '] done!')\n\t\texcept FileNotFoundError:\n\t\t\tprint(W + '[' + R + '!' + W + '] ' + R + 'file not found!')\n\t\t\tsys.exit()\n\nCheckpoint()","sub_path":"data/CheckpointDetector.py","file_name":"CheckpointDetector.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"285783041","text":"\"\"\" This is practice program \"\"\"\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport csv\nimport math\nimport random\nimport json\nimport logging\nimport pickle\nimport pprint\nfrom datetime import datetime, date\nfrom collections import defaultdict\nfrom pandas.io.parsers import TextFileReader\nimport pytz\nimport copy\nimport numpy as np\nimport pandas as pd\nimport codecs\nimport io\nimport matplotlib.pyplot as plt\n\nsys.dont_write_bytecode = True # dont make .pyc files\n\n\n# -[no-use>>]-------------------------------------------------------------\ndef read_binary_file():\n file_name = 'data/spot_2021.csv'\n with open(file_name, 'rb') as fin:\n if fin.seekable():\n fin.seek(796, os.SEEK_SET)\n b = fin.read()\n s = b.decode()\n else:\n print('# Not seekable')\n print(s)\n\n\ndef read_sjis_file():\n file_name = 'data/spot_2021.csv'\n with open(file_name, encoding='shift_jis') as f:\n line_strip = [s.strip() for s in f.readlines()]\n print(line_strip[0])\n print(line_strip[1])\n print(line_strip[2])\n\n\ndef convert_sjis_to_utf8():\n src_file_path = \"sjis_file.csv\"\n src_codec = codecs.lookup(\"shift_jis\") # 変換前の文字コード\n dest_file_path = \"utf8_file.csv\"\n dest_codec = codecs.lookup(\"utf_8\") # 変換後の文字コード\n # ファイルオブジェクトを開く\n with open(src_file_path, \"rb\") as src, open(dest_file_path, \"wb\") as dest:\n # 変換ストリームを作成\n stream = codecs.StreamRecoder(\n src,\n dest_codec.encode, src_codec.decode,\n src_codec.streamreader, dest_codec.streamwriter,\n )\n reader = io.BufferedReader(stream)\n # 書き込み\n while True:\n data = reader.read1()\n if not data:\n break\n dest.write(data)\n dest.flush()\n\n# -[< None:\n pass\n\n @classmethod\n def get_area_names(cls):\n return [\"hokkaido\", \"tohoku\", \"tokyo\", \"chubu\", \"hokuriku\", \"kansai\", \"chugoku\", \"shikoku\", \"kyushu\"]\n\n @classmethod\n def get_area_column(cls, init_col=6):\n '''\n name and column number of area prices in JEPX_spot.csv\n '''\n area = cls.get_area_names()\n return dict(zip(area, [init_col + v[0] for v in enumerate(area)]))\n\n @classmethod\n def get_system_area_column(cls, system_col=5, init_col=6):\n '''\n name and column number of system price and area prices in JEPX_spot.csv\n '''\n syst_cols = {\"system\": system_col}\n area_cols = cls.get_area_column(init_col)\n return dict(area_cols, **syst_cols)\n\n\nclass EPowerTime:\n def __init__(self) -> None:\n pass\n\n @classmethod\n def get_time_slot_zone(cls):\n return {\n 1: '00:00−00:30', 2: '00:30−01:00',\n 3: '01:00−01:30', 4: '01:30−02:00',\n 5: '02:00−02:30', 6: '02:30−03:00',\n 7: '03:00−03:30', 8: '03:30−04:00',\n 9: '04:00−04:30', 10: '04:30−05:00',\n 11: '05:00−05:30', 12: '05:30−06:00',\n 13: '06:00−06:30', 14: '06:30−07:00',\n 15: '07:00−07:30', 16: '07:30−08:00',\n 17: '08:00−08:30', 18: '08:30−09:00',\n 19: '09:00−09:30', 20: '09:30−10:00',\n 21: '10:00−10:30', 22: '10:30−11:00',\n 23: '11:00−11:30', 24: '11:30−12:00',\n 25: '12:00−12:30', 26: '12:30−13:00',\n 27: '13:00−13:30', 28: '13:30−14:00',\n 29: '14:00−14:30', 30: '14:30−15:00',\n 31: '15:00−15:30', 32: '15:30−16:00',\n 33: '16:00−16:30', 34: '16:30−17:00',\n 35: '17:00−17:30', 36: '17:30−18:00',\n 37: '18:00−18:30', 38: '18:30−19:00',\n 39: '19:00−19:30', 40: '19:30−20:00',\n 41: '20:00−20:30', 42: '20:30−21:00',\n 43: '21:00−21:30', 44: '21:30−22:00',\n 45: '22:00−22:30', 46: '22:30−23:00',\n 47: '23:00−23:30', 48: '23:30−24:00',\n }\n\n @classmethod\n def to_date_YMD(cls, str_date, fmt='%Y/%m/%d'):\n '''\n change string date into data type with yyyy/MM/dd\n '''\n tdatetime = datetime.strptime(str_date, fmt)\n return date(tdatetime.year, tdatetime.month, tdatetime.day)\n\n\nclass PandaUtils():\n def __init__(self, df: TextFileReader) -> None:\n self.df = df\n\n def extract_column_pd(self, ncol=0):\n '''\n extract one column from pandas data\n '''\n str2d = self.df.iloc[:, [ncol]].values\n return [x for row in str2d for x in row]\n\n\nclass JEPXSpot():\n def __init__(self) -> None:\n pass\n\n\ndef get_price_slot_line(irow, target_slot, dict_slot, list_price, last_slot=48):\n '''\n get price within 48 slots\n '''\n assert irow < len(list_price), '# Error: irow < len(list_price) is required'\n for jc in dict_slot:\n if target_slot == jc:\n return list_price[irow]\n else:\n pass\n\n\ndef get_dict_date_prices(slots, dates, dict_slot, prices, last_slot=48):\n '''\n arrage row-oredered prices in 48 slots into column-ordered prices, then make dictionary of key:date, value:prices\n '''\n assert len(slots) == len(dates), '# Error: len(slots) == len(dates) is required'\n list_price = list()\n dict_date_prices = dict()\n for ir, slot in enumerate(slots):\n date = dates[ir]\n if slot == 1:\n list_price.clear()\n price = get_price_slot_line(ir, slot, dict_slot, prices)\n list_price.append(price)\n if slot == last_slot:\n dict_date_prices[date] = copy.deepcopy(list_price)\n return dict_date_prices\n\n\ndef write_area_spot_price_by_row_date_col_slot(area_name, writer, slots, dates, dict_slot, prices_area):\n '''\n write [date, prices in 48 slots] with header and footer into writer-stream\n '''\n header = [area_name, *[str(x) for x in dict_slot]]\n writer.writerow(header)\n dict_date_prices = get_dict_date_prices(slots, dates, dict_slot, prices_area[area_name])\n for k, v in dict_date_prices.items():\n # oneline = str(k) + ',' + ','.join([str(x) for x in v])\n writer.writerow([k, *v])\n footer = ['' for x in header]\n writer.writerow(footer)\n\n\ndef calc_return_list(price_list):\n '''\n calculate daily return by (price.current - price.prev) / price.prev\n '''\n daily_return = list()\n price_prev = price_list[0]\n for i, p in enumerate(price_list):\n if i > 0:\n price_curr = p\n return_1day = (price_curr - price_prev) / price_prev\n daily_return.append(return_1day)\n price_prev = p\n return daily_return\n\n\ndef calc_mid_value_list(bins, nround=2):\n '''\n calculate mid value by (value.current - value.prev) / 2.0\n '''\n class_values = list()\n bin_prev = bins[0]\n for i, b in enumerate(bins):\n if i > 0:\n bin_curr = b\n class_value = (bin_curr + bin_prev) / 2.0\n class_values.append(round(class_value, nround))\n bin_prev = b\n return class_values\n\n\ndef seasonal_func(t, mu, s0, s1, s2, s3, s4):\n '''\n seasonal function: f(t)=s0+mu.t+s1.sin(2PI.t)+s2.cos(2PI.t)+s3.sin(4PI.t)+s4.sin(4PI.t)\n '''\n const_term = s0 + mu*t\n x2pi = 2.0*math.pi\n x4pi = 4.0*math.pi\n frqcy_term = s1 * math.sin(x2pi*t) + s2 * math.cos(x2pi*t) + s3 * math.sin(x4pi*t) + s4 * math.sin(x4pi*t)\n return const_term + frqcy_term\n\n\ndef main():\n file_name = 'data/spot_merged.csv'\n df = pd.read_csv(file_name, encoding='shift_jis')\n header = df.columns.values.tolist()\n # print(df)\n # print(header)\n # print(df.describe()) # stats data\n # sys.exit()\n\n syst_area_cols = EPowerArea.get_system_area_column()\n # for k, v in syst_area_cols.items():\n # print(k, v)\n # sys.exit()\n\n dict_slot = EPowerTime.get_time_slot_zone()\n # for k, v in dict_slot.items():\n # print(k, v)\n # print(df.iloc[:, [0, 1, syst_area_cols[\"hokkaido\"], syst_area_cols[\"tokyo\"], syst_area_cols[\"system\"]]])\n # sys.exit()\n\n pdu = PandaUtils(df)\n\n # extract 1-column data from pd\n dates_str1d = pdu.extract_column_pd(0)\n slots_str1d = pdu.extract_column_pd(1)\n prices_area_str1d = {k: pdu.extract_column_pd(v) for k, v in syst_area_cols.items()}\n\n # transform data type for extracted 1-column data\n dates = [EPowerTime.to_date_YMD(x) for x in dates_str1d]\n slots = [int(x) for x in slots_str1d]\n prices_area = {k: [float(x) for x in v] for k, v in prices_area_str1d.items()}\n # print(prices_area['system'])\n # print(prices_area['hokkaido'])\n # sys.exit()\n\n # [output]: prices by row.date, col.slot\n # with open('data/spot_merged_adj.csv', 'w') as fout:\n # writer = csv.writer(fout)\n # for area in syst_area_cols:\n # write_area_spot_price_by_row_date_col_slot(area, writer, slots, dates, dict_slot, prices_area)\n # sys.exit()\n\n area_name = 'hokkaido'\n dict_date_prices = get_dict_date_prices(slots, dates, dict_slot, prices_area[area_name])\n # for k, v in dict_date_prices.items():\n # print(k, v[0], v[1], v[2], v[3], v[4], v[5])\n nb_slot = 23\n price_all_dates = [v[nb_slot] for v in dict_date_prices.values()]\n daily_return = calc_return_list(price_all_dates)\n limit_value = 0.98\n daily_return_cut = [x for x in daily_return if -limit_value < x and x < limit_value]\n print('# mean val: ', np.mean(daily_return_cut))\n print('# variance: ', np.var(daily_return_cut))\n print('# std dev : ', np.std(daily_return_cut))\n for k, v in dict_date_prices.items():\n print(k, v[nb_slot])\n sys.exit()\n\n # [plot]\n # fig, ax = plt.subplots()\n # n, bins, patches = ax.hist(daily_return_cut, bins=100, range=(-1, 1))\n # class_values = calc_mid_value_list(bins)\n\n # ax.set_title('Distribution of daily return:' + area_name + ', #=' + str(nb_slot + 1))\n # ax.set_xlabel('daily return')\n # ax.set_ylabel('Count')\n # class_sample = class_values[::5]\n # label = [str(x) for x in class_sample]\n # ax.set_xticks(class_sample)\n # ax.set_xticklabels(label, rotation=90)\n # ax.grid()\n # # print(sum(n), len(daily_return))\n # # print(class_values)\n # plt.show()\n\n\ndef main_seasonal():\n mu = 1.99\n s0 = 0.14\n s1 = 0.02\n s2 = 0.06\n s3 = 0.12\n s4 = 0.08\n t0 = 0.0\n tn = 5.0\n # dt = 0.0027\n dt = 0.1\n n = int((tn - t0) / dt)\n ts = [t0 + i*dt for i in range(n)]\n fs = [seasonal_func(t, mu, s0, s1, s2, s3, s4) for t in ts]\n # [plot]\n # plt.plot(ts, fs, marker=\"o\", color = \"red\", linestyle = \"--\")\n plt.plot(ts, fs, marker=\".\", color = \"blue\", linestyle = \"dotted\")\n plt.show()\n\n\nif __name__ == '__main__':\n print(os.getcwd())\n flag = 1\n if flag == 1:\n main()\n elif flag == 2:\n main_seasonal()\n else:\n print(\"# END\")\n","sub_path":"jepx.py","file_name":"jepx.py","file_ext":"py","file_size_in_byte":11093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"47119372","text":"import time\n\nfrom dagster_graphql.test.utils import define_context_for_file, execute_dagster_graphql\n\nfrom dagster import (\n Materialization,\n Output,\n RepositoryDefinition,\n execute_pipeline,\n pipeline,\n seven,\n solid,\n)\nfrom dagster.core.instance import DagsterInstance, InstanceType\nfrom dagster.core.storage.event_log import InMemoryEventLogStorage\nfrom dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager\nfrom dagster.core.storage.root import LocalArtifactStorage\nfrom dagster.core.storage.runs import InMemoryRunStorage\n\nGET_ASSET_KEY_QUERY = '''\n{\n assetsOrError {\n __typename\n ...on AssetConnection {\n nodes {\n key\n }\n }\n }\n}\n'''\n\nGET_ASSET_MATERIALIZATION = '''\n query AssetQuery($assetKey: String!) {\n assetOrError(assetKey: $assetKey) {\n ... on Asset {\n assetMaterializations(limit: 1) {\n materializationEvent {\n materialization {\n label\n }\n }\n }\n }\n }\n }\n'''\n\nGET_ASSET_RUNS = '''\n query AssetRunsQuery($assetKey: String!) {\n assetOrError(assetKey: $assetKey) {\n ... on Asset {\n runs {\n runId\n }\n }\n }\n }\n'''\n\n\ndef get_instance(temp_dir):\n return DagsterInstance(\n instance_type=InstanceType.EPHEMERAL,\n local_artifact_storage=LocalArtifactStorage(temp_dir),\n run_storage=InMemoryRunStorage(),\n event_storage=InMemoryEventLogStorage(),\n compute_log_manager=NoOpComputeLogManager(),\n )\n\n\ndef asset_repo():\n @solid\n def solid_a(_):\n yield Materialization(asset_key='a', label='a')\n yield Output(1)\n\n @solid\n def solid_b(_, num):\n yield Materialization(asset_key='b', label='b')\n time.sleep(0.1)\n yield Materialization(asset_key='c', label='c')\n yield Output(num)\n\n @pipeline\n def single_asset_pipeline():\n solid_a()\n\n @pipeline\n def multi_asset_pipeline():\n solid_b(solid_a())\n\n return RepositoryDefinition(\n 'asset_repo', pipeline_defs=[single_asset_pipeline, multi_asset_pipeline]\n )\n\n\ndef test_get_all_asset_keys(snapshot):\n with seven.TemporaryDirectory() as temp_dir:\n instance = get_instance(temp_dir)\n repo = asset_repo()\n execute_pipeline(repo.get_pipeline('multi_asset_pipeline'), instance=instance)\n context = define_context_for_file(__file__, 'asset_repo', instance)\n result = execute_dagster_graphql(context, GET_ASSET_KEY_QUERY)\n assert result.data\n snapshot.assert_match(result.data)\n\n\ndef test_get_asset_key_materialization(snapshot):\n with seven.TemporaryDirectory() as temp_dir:\n instance = get_instance(temp_dir)\n repo = asset_repo()\n execute_pipeline(repo.get_pipeline('single_asset_pipeline'), instance=instance)\n context = define_context_for_file(__file__, 'asset_repo', instance)\n result = execute_dagster_graphql(\n context, GET_ASSET_MATERIALIZATION, variables={'assetKey': 'a'}\n )\n assert result.data\n snapshot.assert_match(result.data)\n\n\ndef test_get_asset_runs():\n with seven.TemporaryDirectory() as temp_dir:\n instance = get_instance(temp_dir)\n repo = asset_repo()\n single_run_id = execute_pipeline(\n repo.get_pipeline('single_asset_pipeline'), instance=instance\n ).run_id\n multi_run_id = execute_pipeline(\n repo.get_pipeline('multi_asset_pipeline'), instance=instance\n ).run_id\n context = define_context_for_file(__file__, 'asset_repo', instance)\n result = execute_dagster_graphql(context, GET_ASSET_RUNS, variables={'assetKey': 'a'})\n assert result.data\n fetched_runs = [run['runId'] for run in result.data['assetOrError']['runs']]\n assert len(fetched_runs) == 2\n assert multi_run_id in fetched_runs\n assert single_run_id in fetched_runs\n","sub_path":"python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py","file_name":"test_assets.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"588581058","text":"from abc import ABC, abstractmethod\nimport re\nimport vim\nimport sys\n\nclass filter_predicate_base(ABC):\n\n def __init__(self, lines):\n self.processed_lines = self.__process_lines(lines)\n self.__get_set_matches(self.processed_lines)\n\n def __process_lines(self, lines):\n lines = self.__run_pre_processors(lines)\n lines = self.__check_for_excluded(lines)\n lines = self.__run_formatters(lines)\n lines = self.__run_post_processors(lines)\n return lines\n\n def __check_for_excluded(self, lines):\n result = []\n for line in lines:\n for excluded_line in self.excluded_lines:\n if excluded_line in line:\n lines.remove(line)\n return lines\n\n def __run_formatters(self, lines):\n result = []\n for formatter in self.line_formatters:\n single_formatter = []\n for line in lines:\n tmp_line = formatter(line)\n if tmp_line:\n if isinstance(tmp_line, list):\n single_formatter.extend(tmp_line)\n else:\n single_formatter.append(tmp_line)\n lines = single_formatter\n return lines\n\n def __get_set_matches(self, lines):\n for matcher in self.line_matchers:\n if matcher['type'].lower() == 'array':\n self.__iterate_lines_for_array_match(lines, matcher)\n\n def __iterate_lines_for_array_match(self, lines, matcher):\n matches_list = []\n regex = re.compile(matcher['regex'])\n for line in lines:\n match = re.search(matcher['regex'], line)\n if match:\n matches_list.append(match.group(1))\n vim.command(\"let g:\" + matcher['vg_name'] + \" = %s\" % matches_list)\n\n def __run_pre_processors(self, lines):\n for processor in self.pre_processors:\n lines = processor(lines)\n return lines\n\n def __run_post_processors(self, lines):\n for processor in self.post_processors:\n lines = processor(lines)\n return lines\n\n # the following properties/methods are intended to be overwritten\n\n @property\n def excluded_lines(self):\n return []\n\n @property\n def line_formatters(self):\n return []\n\n @property\n def line_matchers(self):\n return []\n\n @property\n def pre_processors(self):\n return []\n\n @property\n def post_processors(self):\n return []\n","sub_path":"autoload/vgdb/filter_predicate_base.py","file_name":"filter_predicate_base.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"120029347","text":"import types\nfrom syscall import SystemCall\n\nclass Task(object):\n \"\"\"\n A task is a wrapper of a coroutine.\n \"\"\"\n taskid = 0\n\n def __init__(self, target):\n Task.taskid += 1;\n self.tid = Task.taskid\n self.target = target\n self.sendval = None\n self.stack = []\n\n \"\"\"\n run() executes the task to the next yield, then the control is back to\n the Scheduler\n \"\"\"\n def run(self):\n while True:\n try:\n result = self.target.send(self.sendval)\n if isinstance(result, SystemCall):\n return result\n if isinstance(result, types.GeneratorType):\n self.stack.append(self.target)\n self.sendval = None\n self.target = result\n else:\n if not self.stack:\n return\n self.sendval = result\n self.target = self.stack.pop()\n except StopIteration:\n if not self.stack:\n raise\n self.sendval = None\n self.target = self.stack.pop()\n\n","sub_path":"multitask_pyos/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"219383498","text":"from __future__ import print_function, division, absolute_import\n\nimport os\nimport re\nimport array\nimport shlex\nimport subprocess\nimport collections\n\nimport hou\nimport soho\n\nimport PBRTapi as api\nfrom PBRTnodes import BaseNode, MaterialNode, PBRTParam, ParamSet\nfrom PBRTshading import wrangle_shading_network\nfrom PBRTstate import scene_state, temporary_file, HVER_17_5, HVER_18\n\n\ndef create_suffix(\n oppath=\"\", shape_num=None, override_num=None, prim_num=None, instance_info=None\n):\n\n # Material / Texture Namespacing\n # mat_node_path:soppath=shape_num+override_num[prim_num]:instance_src:instance_num\n\n if instance_info is not None:\n instance = \":{}:{}\".format(instance_info.source, instance_info.number)\n else:\n instance = \"\"\n\n shape = \"={}\".format(shape_num) if shape_num is not None else \"\"\n override = \"+{}\".format(override_num) if override_num is not None else \"\"\n prim = \"[{}]\".format(prim_num) if prim_num is not None else \"\"\n\n return \"{oppath}{shape}{override}{prim}{instance}\".format(\n oppath=oppath, shape=shape, override=override, prim=prim, instance=instance\n )\n\n\ndef primitive_alpha_texs(properties):\n paramset = ParamSet()\n if not properties:\n return paramset\n if \"alpha\" not in properties:\n return paramset\n tex = properties[\"alpha\"].Value[0]\n if not tex:\n return paramset\n paramset.add(PBRTParam(\"texture\", \"alpha\", tex))\n return paramset\n\n\n# NOTE: HOUDINI COMPATIBILITY\n# To match Houdini's parametric uvs, we need to do\n# col,row ; col+1,row ; col,row+1 ; col+1,row+1\n# However this causes backfaces which breaks Mediums\n# so we'll need to api.ReverseOrientation\ndef patch_vtx_gen(gdp):\n for prim in gdp.iterPrims():\n for col in range(prim.numCols() - 1):\n for row in range(prim.numRows() - 1):\n yield prim.vertex(col, row)\n yield prim.vertex(col + 1, row)\n yield prim.vertex(col, row + 1)\n yield prim.vertex(col + 1, row + 1)\n\n\ndef mesh_vtx_gen(gdp):\n return (vtx for prim in gdp.iterPrims() for vtx in prim.vertices())\n\n\ndef vtx_attrib_gen(vertices, attrib):\n \"\"\"Per prim, per vertex fetching vertex/point values\n\n Args:\n gdp (hou.Geometry): Input geometry\n attrib (hou.Attrib): Attribute to evaluate\n\n Yields:\n Values of attrib for each vertex\n \"\"\"\n # NOTE: Having one loop with a conditional inside is a significant cost.\n # We'll pull the conditional out of the loop so its computed once\n # at the expense of some code dupilcation.\n if attrib is None:\n for vtx in vertices:\n yield vtx.point().number()\n elif attrib.type() == hou.attribType.Vertex:\n for vtx in vertices:\n yield vtx.attribValue(attrib)\n elif attrib.type() == hou.attribType.Point:\n for vtx in vertices:\n yield vtx.point().attribValue(attrib)\n\n\ndef linear_vtx_gen(gdp, vtx_per_face_hint=None):\n \"\"\"Generate the linearvertex for input geometry\n\n A linear vertex is a unique value for every vertex in the mesh\n where as a vertex number is the vertex offset on a prim\n\n We need a linear vertex for generating indices when we have uniqe points\n http://www.sidefx.com/docs/houdini/vex/functions/vertexindex.html\n\n Args:\n gdp (hou.Geometry): Input geometry\n\n Yields:\n Linear vertex number for every vertex\n \"\"\"\n # NOTE: The following can be skipped reduced down to a simple range since we\n # know that the meshes will always have {vtx_per_face_hint} verts\n if vtx_per_face_hint is None:\n vertices = mesh_vtx_gen(gdp)\n return vtx_attrib_gen(vertices, None)\n\n return range(len(gdp.iterPrims()) * vtx_per_face_hint)\n\n\ndef prim_transform(prim):\n \"\"\"Return a tuple representing the Matrix4 of the transform intrinsic\"\"\"\n # VDBs have a transform intrinsic that is a hou.Matrix4\n # rot_mat = hou.Matrix3(prim.intrinsicValue(\"transform\"))\n # so we'll rely on the prim's transform() method\n rot_mat = prim.transform()\n vtx = prim.vertex(0)\n pt = vtx.point()\n pos = pt.position()\n xlate = hou.hmath.buildTranslate(pos)\n return (hou.Matrix4(rot_mat) * xlate).asTuple()\n\n\n# TODO: Write a find_attrib_value(name, type, size)\n# this way we can scope the exact attribute we want\n# instead of getting a string when we want a float.\n# Update zmin_attrib = gdp.findPrimAttrib(\"zmin\")\n# as an example\n\n# NOTE: HOUDINI COMPATIBILITY\n# We can match Houdini's Sphere's with a 1,1,-1 Scale.\ndef sphere_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Outputs a \"sphere\" Shapes for the input geometry\n\n Args:\n gdp (hou.Geometry): Input geo\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n zmin_attrib = gdp.findPrimAttrib(\"zmin\")\n zmax_attrib = gdp.findPrimAttrib(\"zmax\")\n phimax_attrib = gdp.findPrimAttrib(\"phimax\")\n\n match_uvs = True\n if (\n properties\n and \"pbrt_matchhoudiniuv\" in properties\n and not properties[\"pbrt_matchhoudiniuv\"].Value[0]\n ):\n match_uvs = False\n\n with api.AttributeBlock():\n # Because we are inverting z-axis per sphere, we need to reverse orientation\n\n if match_uvs:\n api.ReverseOrientation()\n for prim in gdp.prims():\n shape_paramset = ParamSet(paramset)\n\n if zmin_attrib is not None:\n shape_paramset.add(\n PBRTParam(\"float\", \"zmin\", prim.attribValue(zmin_attrib))\n )\n if zmax_attrib is not None:\n shape_paramset.add(\n PBRTParam(\"float\", \"zmax\", prim.attribValue(zmax_attrib))\n )\n if phimax_attrib is not None:\n shape_paramset.add(\n PBRTParam(\"float\", \"phimax\", prim.attribValue(phimax_attrib))\n )\n\n with api.AttributeBlock():\n xform = prim_transform(prim)\n api.ConcatTransform(xform)\n if match_uvs:\n # Scale required to match Houdini's uvs\n api.Scale(1, 1, -1)\n api.Shape(\"sphere\", shape_paramset)\n return\n\n\n# NOTE: HOUDINI COMPATIBILITY\n# The parameteric uvs do not match between the two. The u coordinate is\n# flipped. This is not resolvable within the export.\ndef disk_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Outputs \"disk\" Shapes for the input geometry\n\n Args:\n gdp (hou.Geometry): Input geo\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n # NOTE we could hou.math.clamp our radius and phimax here, but instead will let the\n # user pass them as is and let pbrt-v4 deal with it. The reasoning for this is that\n # this is slightly more advanced and we would expect the user to know what they are\n # doing.\n innerradius_attrib = gdp.findPrimAttrib(\"innerradius\")\n phimax_attrib = gdp.findPrimAttrib(\"phimax\")\n\n for prim in gdp.prims():\n shape_paramset = ParamSet(paramset)\n\n if innerradius_attrib is not None:\n shape_paramset.add(\n PBRTParam(\"float\", \"innerradius\", prim.attribValue(innerradius_attrib))\n )\n if phimax_attrib is not None:\n shape_paramset.add(\n PBRTParam(\"float\", \"phimax\", prim.attribValue(phimax_attrib))\n )\n\n with api.AttributeBlock():\n xform = prim_transform(prim)\n api.ConcatTransform(xform)\n api.Shape(\"disk\", shape_paramset)\n return\n\n\ndef ply_displacement_wrangler(prim, properties):\n\n paramset = ParamSet()\n\n disp_tex = prim.attribValue(\"displacement\")\n if not disp_tex:\n return paramset\n if not hou.node(disp_tex):\n return paramset\n\n instance_info = properties.get(\".instance_info\")\n\n suffix = create_suffix(\n properties[\"object:soppath\"].Value[0],\n properties[\".shape_count\"],\n properties[\".override_count\"],\n prim.number(),\n instance_info,\n )\n\n # TODO: We might need to cache parms and nodes if there are a lot of plys\n wrangle_shading_network(\n disp_tex, use_named=False, exported_nodes=set(), name_suffix=suffix\n )\n texture_name = \"%s%s\" % (disp_tex, suffix)\n paramset.add(PBRTParam(\"texture\", \"displacement\", texture_name))\n edgelen_attrib = prim.geometry().findPrimAttrib(\"displacement_edgelength\")\n if edgelen_attrib is not None:\n edgelen_val = prim.attribValue(edgelen_attrib)\n paramset.add(PBRTParam(\"float\", \"displacement.edgelength\", edgelen_val))\n\n return paramset\n\n\ndef packeddisk_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Outputs \"ply\" Shapes for the input geometry\n\n Args:\n gdp (hou.Geometry): Input geo\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n disp_tex_attrib = gdp.findPrimAttrib(\"displacement\")\n\n for prim in gdp.prims():\n shape_paramset = ParamSet(paramset)\n filename = prim.intrinsicValue(\"filename\")\n if not filename:\n continue\n if os.path.splitext(filename)[1].lower() != \".ply\":\n continue\n shape_paramset.replace(PBRTParam(\"string\", \"filename\", filename))\n with api.AttributeBlock():\n xform = prim_transform(prim)\n api.ConcatTransform(xform)\n if disp_tex_attrib is not None:\n disp_paramset = ply_displacement_wrangler(prim, properties)\n shape_paramset |= disp_paramset\n api.Shape(\"plymesh\", shape_paramset)\n return\n\n\ndef tube_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Handles \"cylinder\" Shapes for the input geometry\n\n Args:\n gdp (hou.Geometry): Input geo\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n for prim in gdp.prims():\n\n shape_paramset = ParamSet(paramset)\n\n phimax_attrib = gdp.findPrimAttrib(\"phimax\")\n if phimax_attrib is not None:\n shape_paramset.add(\n PBRTParam(\"float\", \"phimax\", prim.attribValue(phimax_attrib))\n )\n\n with api.AttributeBlock():\n\n side_paramset = ParamSet(shape_paramset)\n\n xform = prim_transform(prim)\n taper = prim.intrinsicValue(\"tubetaper\")\n\n if taper != 1:\n api.Comment(\n \"Skipping cylinder, prim #{}\"\n \"taper values other than 1 not supported\".format(prim.number())\n )\n continue\n\n closed = prim.intrinsicValue(\"closed\")\n\n api.ConcatTransform(xform)\n api.Rotate(-90, 1, 0, 0)\n shape = \"cylinder\"\n side_paramset.add(PBRTParam(\"float\", \"zmin\", -0.5))\n side_paramset.add(PBRTParam(\"float\", \"zmax\", 0.5))\n\n with api.AttributeBlock():\n # NOTE: We are disabling this so that phimax will line up\n # between the disks and the cylinder. This means Houdini's UV's\n # will not match, but that is preferred over non-aligned\n # disks and cylinders\n # api.ReverseOrientation()\n # api.Scale(1, -1, 1)\n api.Shape(shape, side_paramset)\n\n if closed:\n disk_paramset = ParamSet(shape_paramset)\n disk_paramset.add(PBRTParam(\"float\", \"height\", 0.5))\n api.Shape(\"disk\", disk_paramset)\n disk_paramset.replace(PBRTParam(\"float\", \"height\", -0.5))\n with api.AttributeBlock():\n api.ReverseOrientation()\n api.Shape(\"disk\", disk_paramset)\n return\n\n\ndef splice_into_every_n_end(iter_a, iter_b, n):\n for i, v in enumerate(iter_a):\n yield v\n if i % n == n - 1:\n yield next(iter_b)\n\n\ndef splice_into_every_n_start(iter_a, iter_b, n):\n for i, v in enumerate(iter_a):\n if i % n == 0:\n yield next(iter_b)\n yield v\n\n\nclass OutputGeo(object):\n \"\"\"Prepares a Hou.Geometry for a trianglemesh or patchgrid\n\n The following attributes are checked for -\n P (point), built-in attribute\n N (vertex/point), float[3]\n uv (vertex/point), float[3]\n S (vertex/point), float[3]\n faceIndices (prim), integer, used for ptex\n\n Args:\n mesh_gdp (hou.Geometry): Input geo\n computeN (bool): Whether to auto-compute normals if they don't exist\n Defaults to True\n is_patchmesh (bool): Is this a patch mesh\n Defaults to False\n \"\"\"\n\n def __init__(self, mesh_gdp, computeN=True, is_patchmesh=False):\n self.computeN = computeN\n self.is_patchmesh = is_patchmesh\n self._init_mesh(mesh_gdp)\n\n def _init_mesh(self, mesh_gdp):\n self._gdp = mesh_gdp\n\n self.num_points = len(self.gdp.iterPoints())\n self.num_prims = len(self.gdp.iterPrims())\n\n N_attrib = self.gdp.findVertexAttrib(\"N\")\n if N_attrib is None:\n N_attrib = self.gdp.findPointAttrib(\"N\")\n\n # If there are no vertex or point normals and we need to compute\n # them with a SopVerb\n if N_attrib is None and self.computeN:\n normal_verb = hou.sopNodeTypeCategory().nodeVerb(\"normal\")\n # type 0 is point normals\n normal_verb.setParms({\"type\": 0})\n normal_verb.execute(self.gdp, [self.gdp])\n N_attrib = self.gdp.findPointAttrib(\"N\")\n\n uv_attrib = self.gdp.findVertexAttrib(\"uv\")\n if uv_attrib is None:\n uv_attrib = self.gdp.findPointAttrib(\"uv\")\n\n if self.is_patchmesh:\n S_attrib = None\n faceIndices_attrib = None\n else:\n S_attrib = self.gdp.findVertexAttrib(\"S\")\n if S_attrib is None:\n S_attrib = self.gdp.findPointAttrib(\"S\")\n\n faceIndices_attrib = self.gdp.findPrimAttrib(\"faceIndices\")\n\n # We need to unique the points if any of the handles\n # to vtx attributes exists.\n to_promote = []\n for attrib in (N_attrib, uv_attrib, S_attrib):\n if attrib is None:\n continue\n if attrib.type() == hou.attribType.Vertex:\n to_promote.append(attrib.name())\n\n self.unique_points = True if to_promote else False\n\n if self.unique_points:\n if hou.applicationVersion() >= HVER_18:\n unique_verb = hou.sopNodeTypeCategory().nodeVerb(\"splitpoints\")\n else:\n unique_verb = hou.sopNodeTypeCategory().nodeVerb(\"facet\")\n unique_verb.setParms({\"unique\": True})\n unique_verb.execute(self.gdp, [self.gdp])\n\n promote_verb = hou.sopNodeTypeCategory().nodeVerb(\"attribpromote\")\n # inclass 3 = vertex, method 8 = first match\n promote_str = \" \".join(to_promote)\n promote_verb.setParms({\"inclass\": 3, \"method\": 8, \"inname\": promote_str})\n promote_verb.execute(self.gdp, [self.gdp])\n\n # If we sort the points by their vtx number we can just get a simple\n # range, the C++ Sort is much faster than looking up the actual point\n # numbers from the verts. The previous implementation of this was doing\n # the sort indirectly by iterator per vert per prim.\n if not self.is_patchmesh:\n sort_verb = hou.sopNodeTypeCategory().nodeVerb(\"sort\")\n sort_verb.setParms({\"ptsort\": 1})\n sort_verb.execute(self.gdp, [self.gdp])\n\n @property\n def gdp(self):\n return self._gdp\n\n @property\n def indices(self):\n if self.unique_points and not self.is_patchmesh:\n # This is an optimization where we know the indices are 0,1,2,3,4,5....\n return linear_vtx_gen(self.gdp, 3)\n\n if self.is_patchmesh:\n vertices = patch_vtx_gen(self.gdp)\n else:\n vertices = mesh_vtx_gen(self.gdp)\n\n return vtx_attrib_gen(vertices, None)\n\n @property\n def has_S(self):\n return True if self.gdp.findPointAttrib(\"S\") is not None else False\n\n @property\n def has_uv(self):\n return True if self.gdp.findPointAttrib(\"uv\") is not None else False\n\n @property\n def has_N(self):\n return True if self.gdp.findPointAttrib(\"N\") is not None else False\n\n @property\n def has_faceIndices(self):\n return True if self.gdp.findPrimAttrib(\"faceIndices\") is not None else False\n\n def mesh_params(self):\n\n mesh_paramset = ParamSet()\n mesh_paramset.add(PBRTParam(\"integer\", \"indices\", self.indices))\n\n # NOTE: We are using arrays here for very fast access since we can\n # fetch all the values at once compactly, while faster, this\n # will take more RAM than a generator approach. If this becomes\n # and issue we can change it.\n\n P = array.array(\"f\")\n P.fromstring(self.gdp.pointFloatAttribValuesAsString(\"P\"))\n mesh_paramset.add(PBRTParam(\"point\", \"P\", P))\n\n if self.has_N:\n N = array.array(\"f\")\n N.fromstring(self.gdp.pointFloatAttribValuesAsString(\"N\"))\n mesh_paramset.add(PBRTParam(\"normal\", \"N\", N))\n\n if self.has_S:\n S = array.array(\"f\")\n S.fromstring(self.gdp.pointFloatAttribValuesAsString(\"S\"))\n mesh_paramset.add(PBRTParam(\"vector\", \"S\", S))\n\n if self.has_faceIndices:\n faceIndices = array.array(\"i\")\n faceIndices.fromstring(self.gdp.primIntAttribValuesAsString(\"faceIndices\"))\n mesh_paramset.add(PBRTParam(\"integer\", \"faceIndices\", faceIndices))\n\n if self.has_uv:\n uv = array.array(\"f\")\n uv.fromstring(self.gdp.pointFloatAttribValuesAsString(\"uv\"))\n # Houdini's uvs are stored as 3 floats, but pbrt only needs two\n # We'll use some array slicing of continous memory to avoid\n # costly iteration\n # The follow is the equivalent of\n # uv_xy = (x for i, x in enumerate(uv) if i % 3 != 2)\n # but avoids having to do a mod for N times.\n uv_x = uv[::3]\n uv_y = uv[1::3]\n uv_xy = array.array(\"f\", uv_x + uv_y)\n uv_xy[::2] = uv_x\n uv_xy[1::2] = uv_y\n mesh_paramset.add(PBRTParam(\"point2\", \"uv\", uv_xy))\n\n return mesh_paramset\n\n def save_ply(self, path):\n\n soho.makeFilePathDirsIfEnabled(path)\n with open(path, \"wb\") as f_handle:\n\n num_elements = 3\n\n header = [\n \"ply\",\n \"format binary_little_endian 1.0\",\n \"element vertex %i\" % self.num_points,\n \"property float x\",\n \"property float y\",\n \"property float z\",\n ]\n if self.has_N:\n header.extend(\n [\"property float nx\", \"property float ny\", \"property float nz\"]\n )\n num_elements += 3\n if self.has_uv:\n header.extend([\"property float u\", \"property float v\"])\n num_elements += 2\n header.extend(\n [\n \"element face %i\" % self.num_prims,\n \"property list int int vertex_indices\",\n ]\n )\n if self.has_faceIndices:\n header.append(\"property int face_indices\")\n header.extend([\"end_header\", \"\"])\n\n f_handle.write(\"\\r\\n\".join(header))\n\n data_pool = array.array(\"f\")\n data_pool.fromstring(\"\\x00\" * 4 * num_elements * self.num_points)\n\n tmp = array.array(\"f\")\n tmp.fromstring(self.gdp.pointFloatAttribValuesAsString(\"P\"))\n offset = 0\n data_pool[offset::num_elements] = tmp[0::3]\n offset += 1\n data_pool[offset::num_elements] = tmp[1::3]\n offset += 1\n data_pool[offset::num_elements] = tmp[2::3]\n offset += 1\n del tmp\n\n if self.has_N:\n tmp = array.array(\"f\")\n tmp.fromstring(self.gdp.pointFloatAttribValuesAsString(\"N\"))\n data_pool[offset::num_elements] = tmp[0::3]\n offset += 1\n data_pool[offset::num_elements] = tmp[1::3]\n offset += 1\n data_pool[offset::num_elements] = tmp[2::3]\n offset += 1\n del tmp\n\n if self.has_uv:\n tmp = array.array(\"f\")\n tmp.fromstring(self.gdp.pointFloatAttribValuesAsString(\"uv\"))\n data_pool[offset::num_elements] = tmp[0::3]\n offset += 1\n data_pool[offset::num_elements] = tmp[1::3]\n offset += 1\n del tmp\n\n data_pool.tofile(f_handle)\n\n if self.has_faceIndices:\n tmp = array.array(\"i\")\n tmp.fromstring(self.gdp.primIntAttribValuesAsString(\"faceIndices\"))\n data_pool = array.array(\n \"i\",\n splice_into_every_n_start(\n splice_into_every_n_end(self.indices, iter(tmp), 3),\n iter(lambda: 3, 0),\n 4,\n ),\n )\n del tmp\n else:\n data_pool = array.array(\n \"i\", splice_into_every_n_start(self.indices, iter(lambda: 3, 0), 3)\n )\n data_pool.tofile(f_handle)\n\n\n# NOTE: HOUDINI COMPATIBILITY\n# The parametric uvs for trianglemeshs do NOT match Houdini's. This is acceptable\n# since the common use case is to supply uvs. I believe it would be possible to\n# match the parametric uvs with pbrt however that means we'd lose the ability\n# to dump the various data arrays directly and slow things down.\ndef mesh_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Outputs meshes (trianglemesh or loopsubdiv) depending on properties\n\n If the pbrt_rendersubd property is set and true, a loopsubdiv shape will\n be generated, otherwise a trianglemesh\n\n Args:\n gdp (hou.Geometry): Input geo\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n if properties is None:\n properties = {}\n\n mesh_paramset = ParamSet(paramset)\n\n # Triangle Meshes in PBRT uses \"vertices\" to denote positions.\n # These are similar to Houdini \"points\". Since the PBRT verts\n # are shared between primitives if hard edges or \"vertex normals\"\n # (Houdini-ese) are required then need to unique the points so\n # so each point can have its own normal.\n # To support this, if any of the triangle mesh params (N, uv, S)\n # are vertex attributes, then we'll uniquify the points.\n\n # We can only deal with triangles, where Houdini is a bit more\n # general, so we'll need to tesselate\n\n # If subdivs are turned on, instead of running the\n # trianglemesh wrangler, use the loop subdiv one instead\n\n gdp = scene_state.tesselate_geo(gdp)\n\n # Exit out if there are no prims\n if not any(gdp.iterPrims()):\n api.Comment(\"No primitives found\")\n return None\n\n shape = \"trianglemesh\"\n if \"pbrt_rendersubd\" in properties:\n if properties[\"pbrt_rendersubd\"].Value[0]:\n shape = \"loopsubdiv\"\n\n if shape == \"loopsubdiv\":\n wrangler_paramset = loopsubdiv_params(gdp)\n if \"levels\" in properties:\n wrangler_paramset.replace(properties[\"levels\"].to_pbrt())\n\n mesh_paramset.update(wrangler_paramset)\n api.Shape(shape, mesh_paramset)\n return None\n\n computeN = True\n if \"pbrt_computeN\" in properties:\n computeN = properties[\"pbrt_computeN\"].Value[0]\n\n output_geo = OutputGeo(gdp, computeN)\n\n # There are various conditions which must be met in order to save a ply file\n geofile_mode = properties[\"pbrt_allow_geofiles\"].Value[0]\n # Is the mode even enabled?\n save_ply = geofile_mode > 0\n # If the mode is 1 (threshold mode), are under the poly count we can't save a ply\n save_ply &= (\n False\n if (\n geofile_mode == 1\n and output_geo.num_prims <= properties[\"pbrt_geofile_threshold\"].Value[0]\n )\n else True\n )\n # If the geometry uses an S attribute which ply does not support we can't use it\n save_ply &= False if output_geo.has_S else True\n\n if save_ply:\n shape = \"plymesh\"\n save_locations = scene_state.get_geo_path_and_part(\n properties[\"pbrt_geo_location\"].Value[0],\n properties[\"object:soppath\"].Value[0],\n \"ply\",\n properties[\".time_dependent\"],\n )\n output_geo.save_ply(save_locations.save_path)\n wrangler_paramset = ParamSet()\n wrangler_paramset.add(PBRTParam(\"string\", \"filename\", save_locations.pbrt_path))\n else:\n wrangler_paramset = output_geo.mesh_params()\n\n mesh_paramset.update(wrangler_paramset)\n api.Shape(shape, mesh_paramset)\n\n return None\n\n\ndef loopsubdiv_params(mesh_gdp):\n \"\"\"Generates a ParamSet for a loopsubdiv\n\n The following attributes are checked for -\n P (point), built-in attribute\n\n Args:\n mesh_gdp (hou.Geometry): Input geo\n Returns: ParamSet of the attributes on the geometry\n \"\"\"\n\n mesh_paramset = ParamSet()\n\n P = array.array(\"f\")\n P.fromstring(mesh_gdp.pointFloatAttribValuesAsString(\"P\"))\n\n vertices = mesh_vtx_gen(mesh_gdp)\n indices = vtx_attrib_gen(vertices, None)\n\n mesh_paramset.add(PBRTParam(\"integer\", \"indices\", indices))\n mesh_paramset.add(PBRTParam(\"point\", \"P\", P))\n\n return mesh_paramset\n\n\n# NOTE: HOUDINI COMPATIBILITY\n# see comment at patch_vtx_gen()\ndef patch_wrangler(gdp, paramset=None, properties=None):\n if properties is None:\n properties = {}\n\n blast_verb = hou.sopNodeTypeCategory().nodeVerb(\"blast\")\n blast_verb.setParms({\"group\": '@intrinsic:connectivity!=\"quads\"', \"grouptype\": 4})\n blast_verb.execute(gdp, [gdp])\n\n # Exit out if there are no prims\n if not any(gdp.iterPrims()):\n api.Comment(\"No primitives found\")\n return None\n\n computeN = True\n if \"pbrt_computeN\" in properties:\n computeN = properties[\"pbrt_computeN\"].Value[0]\n\n emission_attrib = gdp.findPrimAttrib(\"emissionfilename\")\n if emission_attrib is None:\n if \"pbrt_emissionfilename\" in properties:\n emission_file = properties[\"pbrt_emissionfilename\"].Value[0]\n else:\n emission_file = \"\"\n patch_gdps = {emission_file: gdp}\n else:\n patch_gdps = partition_by_attrib(gdp, emission_attrib)\n\n with api.AttributeBlock():\n api.ReverseOrientation()\n\n for emission_file, emission_gdp in patch_gdps.iteritems():\n prim_paramset = ParamSet(paramset)\n if emission_file:\n prim_paramset.add(\n PBRTParam(\"string\", \"emissionfilename\", emission_file)\n )\n output_geo = OutputGeo(emission_gdp, computeN, is_patchmesh=True)\n wrangler_paramset = output_geo.mesh_params()\n prim_paramset.update(wrangler_paramset)\n\n api.Shape(\"bilinearmesh\", prim_paramset)\n\n return None\n\n\nclass VDBGrid(object):\n def __init__(self, prim):\n self.density = prim\n self.temperature = None\n\n def does_res_match(self):\n return True\n\n\ndef build_vdb_grid_list(sop_path, gdp):\n prims = gdp.prims()\n\n name_attrib = gdp.findPrimAttrib(\"name\")\n medium_grids_attrib = gdp.findPrimAttrib(\"medium_grids\")\n\n name_map = collections.defaultdict(set)\n medium_grids_map = collections.defaultdict(set)\n for prim in prims:\n name_map[prim.attribValue(name_attrib)].add(prim)\n medium_name = \"\"\n if medium_grids_attrib is not None:\n medium_name = prim.attribValue(medium_grids_attrib)\n medium_grids_map[medium_name].add(prim)\n\n # The only senarios which are valid are-\n\n # Scenario 1\n # We do not have any secondary fields so we don't have to worry about linking\n # density fields to temperature\n name_counts = collections.Counter(prim.attribValue(name_attrib) for prim in prims)\n if not name_counts[\"temperature\"]:\n return [VDBGrid(prim) for prim in prims]\n\n # Scenario 2\n # If a single temperature field and a density field exist but not a medium_grids\n # attribute we will infer that they are meant to be linked.\n if (\n name_counts[\"density\"] == 1\n and name_counts[\"temperature\"] == 1\n and medium_grids_attrib is None\n ):\n density_prim = gdp.globPrims(\"@name=density\")[0]\n temperature_prim = gdp.globPrims(\"@name=temperature\")[0]\n vdb = VDBGrid(density_prim)\n vdb.temperature = temperature_prim\n return [vdb]\n\n # Scenario 3\n # We have a medium_grids attribute and for each unqiue value we have only\n # one density, and 0 or 1 of our secondary field\n # But first we'll exit out if some of these conditions are not met.\n if (\n name_counts[\"density\"] > 1\n and name_counts[\"temperature\"] > 1\n and medium_grids_attrib is None\n ):\n soho.warning(\n \"{}: has multiple density and temperature VDBs and no way to link\"\n \" them, please delete the temperature fields or use a \"\n '\"medium_grids\" attribute'.format(sop_path)\n )\n return []\n\n grids = []\n for medium, medium_prims in medium_grids_map.iteritems():\n medium_counts = collections.Counter(\n prim.attribValue(name_attrib) for prim in medium_prims\n )\n # Issue warnings for poorly defined medium_grids.\n # The only cases that are valid are, 1 density to 0 or 1 temperature grids.\n # Or multiple density grids and 0 temperature grids\n if medium_counts[\"temperature\"] > 1:\n soho.warning(\n \"{}: has multiple temperature VDBs in a {}\".format(sop_path, medium)\n )\n continue\n if medium_counts[\"density\"] > 1 and medium_counts[\"temperature\"]:\n soho.warning(\n \"{}: the medium_grid {} has a mismatch of density and \"\n \"temperature VDBs\".format(sop_path, medium)\n )\n continue\n if not medium_counts[\"density\"]:\n soho.warning(\n \"{}: the medium_grid {} has no density VDB\".format(sop_path, medium)\n )\n continue\n\n if medium_counts[\"density\"] == 1 and medium_counts[\"temperature\"] in (0, 1):\n density_prim = name_map[\"density\"] & medium_prims\n if len(density_prim) != 1:\n soho.warning(\"{}: Invalid density and medium_grid\".format(sop_path))\n continue\n density_grid = VDBGrid(density_prim.pop())\n temperature_prim = name_map[\"temperature\"] & medium_prims\n if temperature_prim:\n density_grid.temperature = temperature_prim.pop()\n grids.append(density_grid)\n else:\n soho.warning(\"{}: density grid failure\".format(sop_path))\n\n return grids\n\n\ndef vdb_wrangler(gdp, paramset=None, properties=None):\n\n medium_paramset = ParamSet(paramset)\n\n # Perform a series of checks to see if we have a valid VDB\n if properties is None:\n properties = {}\n sop_path = None\n else:\n sop_path = properties[\"object:soppath\"].Value[0]\n\n if not properties[\"pbrt_allow_geofiles\"].Value[0]:\n return None\n\n if not scene_state.nanovdb_converter:\n return None\n\n if \"pbrt_ignorevolumes\" in properties and properties[\"pbrt_ignorevolumes\"].Value[0]:\n api.Comment(\"Ignoring volumes because pbrt_ignorevolumes is enabled\")\n return None\n\n name_attrib = gdp.findPrimAttrib(\"name\")\n if name_attrib is None:\n soho.warning(\"Skipping {}, VDB prims do not have name attrib\".format(sop_path))\n return None\n\n non_vdb_prims = gdp.globPrims(\"@name!=density,temperature\")\n gdp.deletePrims(non_vdb_prims)\n\n if not gdp.prims():\n soho.warning(\n \"Skipping {}, No VDBs prims named 'density' or 'temperature' found.\".format(\n sop_path\n )\n )\n return None\n\n medium_grids = build_vdb_grid_list(sop_path, gdp)\n if not medium_grids:\n return None\n\n for medium_grid in medium_grids:\n\n # Cull prims we are not interested in\n grid_prim_numbers = set([medium_grid.density.number()])\n if medium_grid.temperature is not None:\n grid_prim_numbers.add(medium_grid.temperature.number())\n\n medium_gdp = hou.Geometry(gdp)\n cull_prims = [\n prim\n for prim in medium_gdp.iterPrims()\n if prim.number() not in grid_prim_numbers\n ]\n medium_gdp.deletePrims(cull_prims)\n\n bbox = medium_gdp.boundingBox()\n\n # NOTE: We may want to reevaluate this with user testing and possibly use\n # tempdirs for the .vdb files instead?\n # See PBRTstate.get_geo_path_and_part for more details but the gist of this\n # we have 3 paths, the vdb path Houdini saves to. The nvdb which gain from\n # converting from the vdb path. Then ultimately the path of the nvdb file\n # in the pbrt scene file which might be a different relative path to the\n # one we exported.\n save_locations = scene_state.get_geo_path_and_part(\n properties[\"pbrt_geo_location\"].Value[0],\n sop_path,\n \"nvdb\",\n properties[\".time_dependent\"],\n )\n\n nvdb_path = save_locations.save_path\n\n pbrt_geo_dir = os.path.dirname(save_locations.pbrt_path)\n pbrt_geo_dir = \".\" if not pbrt_geo_dir else pbrt_geo_dir\n\n nvdb_basename = os.path.basename(nvdb_path)\n # Can't use os.path.join due to Houdini's / use on Windows\n pbrt_nvdb_path = \"{}/{}\".format(pbrt_geo_dir, nvdb_basename)\n\n if (\n \"{vdb}\" not in scene_state.nanovdb_converter\n or \"{nanovdb}\" not in scene_state.nanovdb_converter\n ):\n soho.error(\"'OpenVDB->NanoVDB Tool' needs {vdb} and {nanovdb} tokens\")\n medium_gdp.clear()\n return None\n\n soho.makeFilePathDirsIfEnabled(nvdb_path)\n\n with temporary_file(suffix=\".vdb\") as vdb_path:\n convert_str = scene_state.nanovdb_converter.format(\n vdb=vdb_path, nanovdb=nvdb_path\n )\n\n # We could pass the full string, but I prefer to use a list\n convert_args = shlex.split(convert_str)\n\n medium_gdp.saveToFile(vdb_path)\n try:\n proc = subprocess.Popen(\n convert_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n except OSError:\n soho.error(\n \"Failed to run {}\\n\"\n \"Convert String: {}\\n\"\n \"You can disable VDB conversion by setting the\"\n \"'OpenVDB->NanoVDB Tool' to be empty on the PBRT ROP\".format(\n convert_args[0], convert_str\n )\n )\n medium_gdp.clear()\n return None\n\n stdout, stderr = proc.communicate()\n\n if proc.returncode:\n soho.error(\n \"Failed to run {}\\n\"\n \"Convert String: {}\\n\"\n \"Convert Error: {}\\n\"\n \"You can disable VDB conversion by setting the\"\n \"'OpenVDB->NanoVDB Tool' to be empty on the PBRT ROP\".format(\n convert_args[0], convert_str, stderr\n )\n )\n medium_gdp.clear()\n return None\n\n vdb_paramset = ParamSet()\n\n # NOTE Full Point instancing might be an issue when using VDBs.\n # conversion might take forever, might need to cache based on\n # parameters and reuse?\n vdb_paramset |= medium_paramset\n if \"pbrt_interior\" in properties:\n interior = BaseNode.from_node(properties[\"pbrt_interior\"].Value[0])\n if interior is not None:\n if (\n interior.directive_type != \"nanovdb\"\n and interior.directive != \"medium\"\n ):\n soho.warning(\n \"{} is not a valid medium for {}\".format(\n interior.node.path(), sop_path\n )\n )\n else:\n vdb_paramset.update(interior.paramset)\n # These are special overrides that come from full point instancing.\n # It allows \"per point\" medium values to be \"stamped\" out to volume prims.\n interior_paramset = properties.get(\".interior_overrides\")\n if interior_paramset is not None:\n vdb_paramset.update(interior_paramset)\n\n exterior = None\n if \"pbrt_exterior\" in properties:\n exterior = properties[\"pbrt_exterior\"].Value[0]\n exterior = \"\" if exterior is None else exterior\n\n extra_attribs = [\n (\"float\", \"LeScale\"),\n (\"float\", \"temperaturecutoff\"),\n (\"float\", \"temperaturescale\"),\n (\"rgb\", \"sigma_a\"),\n (\"rgb\", \"sigma_s\"),\n ]\n medium_prim_overrides = medium_prim_paramset(\n medium_grid.density, extra_attribs=extra_attribs\n )\n vdb_paramset.update(medium_prim_overrides)\n\n # By default we'll set a sigma_a and sigma_s to be more Houdini-like\n # however the object's pbrt_interior, or prim's pbrt_interior\n # or prim attribs will override these.\n\n if (\n PBRTParam(\"rgb\", \"sigma_a\") not in vdb_paramset\n and PBRTParam(\"rgb\", \"sigma_s\") not in vdb_paramset\n ):\n vdb_paramset.add(PBRTParam(\"spectrum\", \"sigma_a\", [400.0, 0.0, 800.0, 0.0]))\n vdb_paramset.add(PBRTParam(\"spectrum\", \"sigma_s\", [400.0, 1.0, 800.0, 1.0]))\n\n medium_suffix = \"\"\n instance_info = properties.get(\".instance_info\")\n if instance_info is not None:\n medium_suffix = \":%s:%i\" % (instance_info.source, instance_info.number)\n\n medium_name = \"{}-{}{}\".format(sop_path, save_locations.part, medium_suffix)\n\n vdb_paramset.replace(PBRTParam(\"string\", \"filename\", pbrt_nvdb_path))\n with api.AttributeBlock():\n api.MakeNamedMedium(medium_name, \"nanovdb\", vdb_paramset)\n api.Material(\"interface\")\n api.MediumInterface(medium_name, exterior)\n vals = [x for pair in zip(bbox.minvec(), bbox.maxvec()) for x in pair]\n bounds_to_api_box(vals)\n\n medium_gdp.clear()\n\n return None\n\n\nclass FloatVolume(object):\n def __init__(self, den):\n self.den = den\n\n def prims(self):\n return (self.den,)\n\n @property\n def voxeldata(self):\n voxs = array.array(\"f\")\n voxs.fromstring(self.den.allVoxelsAsString())\n return voxs\n\n @property\n def ptype(self):\n return \"float\"\n\n\nclass RGBVolume(object):\n def __init__(self, r, g, b):\n self.r = r\n self.g = g\n self.b = b\n\n def prims(self):\n return (self.r, self.g, self.b)\n\n @property\n def voxeldata(self):\n tmp_voxs = array.array(\"f\")\n tmp_voxs.fromstring(self.r.allVoxelsAsString())\n voxs = array.array(\"f\", tmp_voxs * 3)\n\n # Set the r values\n voxs[0::3] = tmp_voxs\n del tmp_voxs[:]\n\n # Set the g values\n tmp_voxs.fromstring(self.g.allVoxelsAsString())\n voxs[1::3] = tmp_voxs\n del tmp_voxs[:]\n\n # And last, the b values\n tmp_voxs.fromstring(self.b.allVoxelsAsString())\n voxs[2::3] = tmp_voxs\n del tmp_voxs[:]\n\n return voxs\n\n @property\n def ptype(self):\n return \"rgb\"\n\n\nclass DensityGrid(object):\n def __init__(self, density):\n self.density = density\n self.temperature = None\n self.lescale = None\n\n @property\n def gridtype(self):\n return \"uniformgrid\"\n\n @property\n def resolution(self):\n return self.refprim.resolution()\n\n @property\n def refprim(self):\n return self.density.prims()[0]\n\n def does_res_match(self):\n resolutions = {res.resolution() for res in self.density.prims()}\n if self.lescale is not None:\n resolutions.add(self.lescale.resolution())\n if self.temperature is not None:\n resolutions.add(self.temperature.resolution())\n return len(resolutions) == 1\n\n def paramset(self):\n grid_paramset = ParamSet()\n grid_paramset.add(PBRTParam(\"float\", \"density\", self.density.voxeldata))\n if self.lescale is not None:\n grid_paramset.add(\n PBRTParam(\"float\", \"Lescale\", FloatVolume(self.lescale).voxeldata)\n )\n if self.temperature is not None:\n grid_paramset.add(\n PBRTParam(\n \"float\", \"temperature\", FloatVolume(self.temperature).voxeldata\n )\n )\n\n return grid_paramset\n\n @property\n def primnums(self):\n nums = [str(i.number()) for i in self.density.prims()]\n if self.lescale is not None:\n nums.append(str(self.lescale.number()))\n if self.temperature is not None:\n nums.append(str(self.temperature.number()))\n return \",\".join(nums)\n\n\nclass SigmaGrid(object):\n def __init__(self, sigma_a=None, sigma_s=None, Le=None):\n self.sigma_a = sigma_a\n self.sigma_s = sigma_s\n self.Le = Le\n\n @property\n def gridtype(self):\n return \"rgbgrid\"\n\n @property\n def resolution(self):\n return self.refprim.resolution()\n\n @property\n def refprim(self):\n if self.sigma_a is not None:\n return self.sigma_a.prims()[0]\n return self.sigma_s.prims()[0]\n\n def does_res_match(self):\n resolutions = set()\n if self.sigma_a is not None:\n for prim in self.sigma_a.prims():\n resolutions.add(prim.resolution())\n if self.sigma_s is not None:\n for prim in self.sigma_s.prims():\n resolutions.add(prim.resolution())\n if self.Le is not None:\n for prim in self.Le.prims():\n resolutions.add(prim.resolution())\n return len(resolutions) == 1\n\n def paramset(self):\n grid_paramset = ParamSet()\n if self.sigma_a is not None:\n grid_paramset.add(PBRTParam(\"rgb\", \"sigma_a\", self.sigma_a.voxeldata))\n if self.sigma_s is not None:\n grid_paramset.add(PBRTParam(\"rgb\", \"sigma_s\", self.sigma_s.voxeldata))\n if self.Le is not None:\n grid_paramset.add(PBRTParam(\"rgb\", \"Le\", self.Le.voxeldata))\n\n return grid_paramset\n\n @property\n def primnums(self):\n nums = []\n if self.sigma_a is not None:\n nums.extend([str(i.number()) for i in self.sigma_a.prims()])\n if self.sigma_s is not None:\n nums.extend([str(i.number()) for i in self.sigma_s.prims()])\n if self.Le is not None:\n nums.extend([str(i.number()) for i in self.Le.prims()])\n return \",\".join(nums)\n\n\ndef build_uniform_grid_list(sop_path, gdp):\n prims = gdp.prims()\n\n name_attrib = gdp.findPrimAttrib(\"name\")\n medium_grids_attrib = gdp.findPrimAttrib(\"medium_grids\")\n\n # The only senarios which are valid are-\n # The first few senarios are convenience and don't enforce a medium_grids\n # attribute on the user\n\n #######################################################\n # Scenario 1\n # No name attribute, we'll assume everything is density\n #\n # prims | name value\n # ------------------\n # 0 | None\n # 1 | None\n\n if name_attrib is None:\n if \"SOHO_PBRT_DEV\" in os.environ: # noqa # pragma: no coverage\n api.Comment(\"Volume Scenario 1\")\n return [DensityGrid(FloatVolume(prim)) for prim in prims]\n\n name_map = collections.defaultdict(set)\n medium_grids_map = collections.defaultdict(set)\n res_map = collections.defaultdict(set)\n\n rgb_density_renamer = {\n \"sigma_a.x\": \"sigma_a.r\",\n \"sigma_a.y\": \"sigma_a.g\",\n \"sigma_a.z\": \"sigma_a.b\",\n \"sigma_s.x\": \"sigma_s.r\",\n \"sigma_s.y\": \"sigma_s.g\",\n \"sigma_s.z\": \"sigma_s.b\",\n \"Le.x\": \"Le.r\",\n \"Le.y\": \"Le.g\",\n \"Le.z\": \"Le.b\",\n }\n\n name_counts = collections.defaultdict(int)\n for prim in prims:\n res = tuple(prim.resolution())\n res_map[res].add(prim)\n name = prim.attribValue(name_attrib)\n # Instead of dealing with two different variation of vectors\n # we'll rename to rgb\n name = rgb_density_renamer.get(name, name)\n name_counts[name] += 1\n name_map[name].add(prim)\n medium_name = \"\"\n if medium_grids_attrib is not None:\n medium_name = prim.attribValue(medium_grids_attrib)\n medium_grids_map[medium_name].add(prim)\n\n #######################################################\n # Scenario 2\n # We just have density grids and no rgbs, sigmas or Lescale\n #\n # prims | name value\n # ------------------\n # 0 | density\n # 1 | density\n # 2 | density\n\n if len(prims) == name_counts[\"density\"]:\n if \"SOHO_PBRT_DEV\" in os.environ: # noqa # pragma: no coverage\n api.Comment(\"Volume Scenario 2\")\n return [DensityGrid(FloatVolume(prim)) for prim in prims]\n\n sigma_a_strs = (\"sigma_a.r\", \"sigma_a.g\", \"sigma_a.b\")\n is_one_sigma_a = all(name_counts[c] == 1 for c in sigma_a_strs)\n is_no_sigma_a = all(name_counts[c] == 0 for c in sigma_a_strs)\n\n sigma_s_strs = (\"sigma_s.r\", \"sigma_s.g\", \"sigma_s.b\")\n is_one_sigma_s = all(name_counts[c] == 1 for c in sigma_s_strs)\n is_no_sigma_s = all(name_counts[c] == 0 for c in sigma_s_strs)\n\n Le_strs = (\"Le.r\", \"Le.g\", \"Le.b\")\n is_one_Le = all(name_counts[c] == 1 for c in Le_strs) and is_one_sigma_a\n is_no_Le = all(name_counts[c] == 0 for c in Le_strs)\n\n is_no_sigma = is_no_sigma_s and is_no_sigma_a\n is_one_sigma = is_one_sigma_s or is_one_sigma_a\n\n #######################################################\n # Scenario 3\n # We have just one density grid or one sigma grid\n # and an optional Lescale\n #\n # prims | name value\n # ------------------\n # 0 | density\n # 1 | Lescale\n\n if medium_grids_attrib is None and (\n (\n name_counts[\"density\"] <= 1\n and name_counts[\"Lescale\"] <= 1\n and name_counts[\"temperature\"] <= 1\n )\n and ((is_one_sigma or is_no_sigma) and (is_one_Le or is_no_Le))\n ):\n if \"SOHO_PBRT_DEV\" in os.environ: # noqa # pragma: no coverage\n api.Comment(\"Volume Scenario 3\")\n grid_list = []\n if is_one_sigma:\n sigma_a = sigma_s = Le = None\n if is_one_sigma_a:\n sigma_a = RGBVolume(\n name_map[\"sigma_a.r\"].pop(),\n name_map[\"sigma_a.g\"].pop(),\n name_map[\"sigma_a.b\"].pop(),\n )\n\n if is_one_sigma_s:\n sigma_s = RGBVolume(\n name_map[\"sigma_s.r\"].pop(),\n name_map[\"sigma_s.g\"].pop(),\n name_map[\"sigma_s.b\"].pop(),\n )\n\n if is_one_Le:\n Le = RGBVolume(\n name_map[\"Le.r\"].pop(),\n name_map[\"Le.g\"].pop(),\n name_map[\"Le.b\"].pop(),\n )\n grid_list.append(SigmaGrid(sigma_a, sigma_s, Le))\n\n if name_counts[\"density\"] == 1:\n grid = DensityGrid(FloatVolume(name_map[\"density\"].pop()))\n if name_counts[\"Lescale\"] == 1:\n grid.lescale = name_map[\"Lescale\"].pop()\n if name_counts[\"temperature\"] == 1:\n grid.temperature = name_map[\"temperature\"].pop()\n grid_list.append(grid)\n\n return grid_list\n\n #######################################################\n # Scenario 4\n # We have no Lescale, temperature, but >=0 density and 0 or 1 density.rgb and\n # 0 or 1 sigma_[as]/Le\n #\n # prims | name value\n # ------------------\n # 0 | density\n # 1 | density\n # 2,3,4 | sigma_a.[rgb]\n # 5,6,7 | sigma_s.[rgb]\n # 8,9,10| Le.[rgb]\n\n if (\n (\n name_counts[\"density\"] >= 0\n and not name_counts[\"Lescale\"]\n and not name_counts[\"temperature\"]\n )\n and ((is_one_sigma or is_no_sigma) and (is_one_Le or is_no_Le))\n and medium_grids_attrib is None\n ):\n if \"SOHO_PBRT_DEV\" in os.environ: # noqa # pragma: no coverage\n api.Comment(\"Volume Scenario 4\")\n\n grid_list = [DensityGrid(FloatVolume(x)) for x in name_map[\"density\"]]\n if is_one_sigma:\n sigma_a = sigma_s = Le = None\n if is_one_sigma_a:\n sigma_a = RGBVolume(\n name_map[\"sigma_a.r\"].pop(),\n name_map[\"sigma_a.g\"].pop(),\n name_map[\"sigma_a.b\"].pop(),\n )\n\n if is_one_sigma_s:\n sigma_s = RGBVolume(\n name_map[\"sigma_s.r\"].pop(),\n name_map[\"sigma_s.g\"].pop(),\n name_map[\"sigma_s.b\"].pop(),\n )\n\n if is_one_Le:\n Le = RGBVolume(\n name_map[\"Le.r\"].pop(),\n name_map[\"Le.g\"].pop(),\n name_map[\"Le.b\"].pop(),\n )\n\n grid_list.append(SigmaGrid(sigma_a, sigma_s, Le))\n return grid_list\n\n #######################################################\n # Scenario 5\n # From this point on we won't be able to derive pairings without using the\n # medium_grids attribute. Exit out if we don't fit basic requirements\n\n if medium_grids_attrib is None:\n if \"SOHO_PBRT_DEV\" in os.environ: # noqa # pragma: no coverage\n api.Comment(\"Volume Scenarios Failed\")\n soho.warning(\n \"{}: has density/sigma_[as]/Lescale/temperature/Le and no way to link\"\n \" them, please use a medium_grids attribute\".format(sop_path)\n )\n return []\n\n if \"SOHO_PBRT_DEV\" in os.environ: # noqa # pragma: no coverage\n api.Comment(\"Volume Scenario 5\")\n grids = []\n for medium, medium_prims in medium_grids_map.iteritems():\n\n medium_counts = collections.defaultdict(int)\n for prim in medium_prims:\n name = prim.attribValue(name_attrib)\n name = rgb_density_renamer.get(name, name)\n medium_counts[name] += 1\n\n is_one_sigma_a = all(medium_counts[c] == 1 for c in sigma_a_strs)\n is_no_sigma_a = all(medium_counts[c] == 0 for c in sigma_a_strs)\n\n is_one_sigma_s = all(medium_counts[c] == 1 for c in sigma_s_strs)\n is_no_sigma_s = all(medium_counts[c] == 0 for c in sigma_s_strs)\n\n is_one_Le = all(medium_counts[c] == 1 for c in Le_strs) and is_one_sigma_a\n is_no_Le = all(medium_counts[c] == 0 for c in Le_strs)\n\n is_no_sigma = is_no_sigma_s and is_no_sigma_a\n is_one_sigma = is_one_sigma_s or is_one_sigma_a\n\n if (\n ## density and Lescale\n medium_counts[\"density\"] == 1\n and medium_counts[\"Lescale\"] <= 1\n and medium_counts[\"temperature\"] <= 1\n and is_no_sigma\n ):\n density_prim = name_map[\"density\"] & medium_prims\n if len(density_prim) != 1:\n soho.warning(\"{}: Invalid density and medium_grid\".format(sop_path))\n density_grid = DensityGrid(FloatVolume(density_prim.pop()))\n lescale_prim = name_map[\"Lescale\"] & medium_prims\n if lescale_prim:\n density_grid.lescale = lescale_prim.pop()\n temperature_prim = name_map[\"temperature\"] & medium_prims\n if temperature_prim:\n density_grid.temperature = temperature_prim.pop()\n grids.append(density_grid)\n elif is_one_sigma:\n sigma_a = sigma_s = Le = None\n if is_one_sigma_a:\n sigma_a = RGBVolume(\n (name_map[\"sigma_a.r\"] & medium_prims).pop(),\n (name_map[\"sigma_a.g\"] & medium_prims).pop(),\n (name_map[\"sigma_a.b\"] & medium_prims).pop(),\n )\n\n if is_one_sigma_s:\n sigma_s = RGBVolume(\n (name_map[\"sigma_s.r\"] & medium_prims).pop(),\n (name_map[\"sigma_s.g\"] & medium_prims).pop(),\n (name_map[\"sigma_s.b\"] & medium_prims).pop(),\n )\n\n if is_one_Le:\n Le = RGBVolume(\n (name_map[\"Le.r\"] & medium_prims).pop(),\n (name_map[\"Le.g\"] & medium_prims).pop(),\n (name_map[\"Le.b\"] & medium_prims).pop(),\n )\n\n grids.append(SigmaGrid(sigma_a, sigma_s, Le))\n else:\n soho.warning(\n \"{}: Can not map density grids for {}\".format(sop_path, medium)\n )\n\n return grids\n\n\ndef volume_wrangler(gdp, paramset=None, properties=None):\n\n # Houdini only supports one type of Volume primitive to be in a geometry network.\n # So if both a heightfield and a fog volume exists, the heightfield takes priority\n # and the fog volumes are ignored. We'll follow that logic here, but instead of\n # rendering the heightfield or volume we'll exit out since pbrt-v4 does not support\n # heightfields.\n\n if properties is None:\n properties = {}\n sop_path = None\n else:\n sop_path = properties[\"object:soppath\"].Value[0]\n\n if \"pbrt_ignorevolumes\" in properties and properties[\"pbrt_ignorevolumes\"].Value[0]:\n api.Comment(\"Ignoring volumes because pbrt_ignorevolumes is enabled\")\n return None\n\n prims = gdp.prims()\n if any(prim.isHeightField() for prim in prims):\n api.Comment(\"Heightfields are not supported\")\n return None\n\n # Filter out any SDFs as those are not supported either\n prims = [prim for prim in prims if not prim.isSDF()]\n\n # Houdini's Mantra's workflow for rendering volumes is a little difficult to match\n # to pbrt. Any fog volume primitives are rendered, with the density field by\n # default being used as the acceleration structure. (This can be overridden.)\n # This means you can have multiple density volumes and all are rendered. As those\n # are being rendered any other primitives with names that match parameters are\n # bound. Which means you can have multiple volume primitives with names like\n # temperature or Cd.[xyz]. Because of this it makes hard to match sets with volume\n # fields are associated with each other which we need to do when declaring mediums.\n # Potential Options include:\n # * Volume Merge all same named fields into one.\n # * Only render the first discovered density and cooresponding fields pbrt\n # understands\n # * Render all density volumes, the first found Cd gets mapped to the first\n # found density\n # * Match any volume prims that have the same x,y,z sample grid dimensions\n # as density.\n # Further complicating things is pbrt can render either density or density.[rgb]\n #\n # The approach we will take is to require a medium_name attribute to group\n # prims together that form a medium. We'll derive some of the base mappings\n # automatically if the medium_name attribute does not exist.\n\n grids = build_uniform_grid_list(sop_path, gdp)\n smoke_prim_wrangler(grids, paramset, properties)\n\n return None\n\n\ndef bounds_to_api_box(b):\n \"\"\"Output a trianglemesh Shape of box based on the input bounds\"\"\"\n\n paramset = ParamSet()\n paramset.add(\n PBRTParam(\n \"point\",\n \"P\",\n [\n # fmt: off\n b[1], b[2], b[5],\n b[0], b[2], b[5],\n b[1], b[3], b[5],\n b[0], b[3], b[5],\n b[0], b[2], b[4],\n b[1], b[2], b[4],\n b[0], b[3], b[4],\n b[1], b[3], b[4],\n # fmt: on\n ],\n )\n )\n paramset.add(\n PBRTParam(\n \"integer\",\n \"indices\",\n [\n # fmt: off\n 0, 3, 1,\n 0, 2, 3,\n 4, 7, 5,\n 4, 6, 7,\n 6, 2, 7,\n 6, 3, 2,\n 5, 1, 4,\n 5, 0, 1,\n 5, 2, 0,\n 5, 7, 2,\n 1, 6, 4,\n 1, 3, 6,\n # fmt: on\n ],\n )\n )\n api.Shape(\"trianglemesh\", paramset)\n\n\n# NOTE: In pbrt the medium interface and shading parameters\n# are strongly coupled unlike in Houdini/Mantra where\n# the volume shaders define the volume properties and\n# and the volume primitives only define grids.\n#\n\n\ndef medium_prim_paramset(prim, paramset=None, extra_attribs=()):\n \"\"\"Build a ParamSet of medium values based off of hou.Prim attribs\"\"\"\n medium_paramset = ParamSet(paramset)\n\n # NOTE:\n # Testing for prim attribs on each prim is a bit redundat but\n # in general its not an issue as you won't have huge numbers of\n # volumes. If this does become an issue, attribs can be stored in\n # a dict and searched from there. (This includes evaluating the\n # pbrt_interior node.\n\n # Initialize with the interior shader on the prim, if it exists.\n try:\n interior = prim.stringAttribValue(\"pbrt_interior\")\n interior = BaseNode.from_node(interior)\n except hou.OperationFailed:\n interior = None\n\n if interior and interior.directive == \"medium\":\n medium_paramset |= interior.paramset\n\n try:\n g_value = prim.floatAttribValue(\"g\")\n medium_paramset.replace(PBRTParam(\"float\", \"g\", g_value))\n except hou.OperationFailed:\n pass\n\n try:\n scale_value = prim.floatAttribValue(\"scale\")\n medium_paramset.replace(PBRTParam(\"float\", \"scale\", scale_value))\n except hou.OperationFailed:\n pass\n\n if not extra_attribs:\n return medium_paramset\n\n # This won't support every possible param types, just the\n # ones that are common (known)\n for attrib_type, name in extra_attribs:\n try:\n if attrib_type == \"float\":\n val = prim.floatAttribValue(name)\n elif attrib_type == \"integer\":\n val = prim.intAttribValue(name)\n elif attrib_type == \"rgb\":\n val = prim.floatListAttribValue(name)\n if len(val) != 3:\n continue\n elif attrib_type == \"string\":\n val = prim.stringAttribValue(name)\n else:\n continue\n except hou.OperationFailed:\n continue\n medium_paramset.replace(PBRTParam(attrib_type, name, val))\n\n return medium_paramset\n\n\ndef smoke_prim_wrangler(grids, paramset=None, properties=None):\n \"\"\"Outputs a \"uniformgrid\" Medium and bounding Shape for the input geometry\n\n The following attributes are checked for via medium_prim_paramset() -\n (See pbrt_medium node for what each parm does)\n pbrt_interior (prim), string\n g (prim), float\n scale (prim), float\n\n Args:\n prims (list of hou.Prims): Input prims\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n if properties is None:\n properties = {}\n sop_path = None\n else:\n sop_path = properties[\"object:soppath\"].Value[0]\n\n medium_suffix = \"\"\n instance_info = properties.get(\".instance_info\")\n if instance_info is not None:\n medium_suffix = \":%s:%i\" % (instance_info.source, instance_info.number)\n\n exterior = None\n if \"pbrt_exterior\" in properties:\n exterior = properties[\"pbrt_exterior\"].Value[0]\n exterior = \"\" if exterior is None else exterior\n\n for grid in grids:\n\n if not grid.does_res_match():\n soho.warning(\n \"{}: Skipping volumes that do not have matching resolutions\".format(\n sop_path\n )\n )\n continue\n\n medium_paramset = ParamSet()\n if \"pbrt_interior\" in properties:\n interior = BaseNode.from_node(properties[\"pbrt_interior\"].Value[0])\n if (\n interior is not None\n and interior.directive == \"medium\"\n and interior.directive_type == grid.gridtype\n ):\n medium_paramset |= interior.paramset\n # These are special overrides that come from full point instancing.\n # It allows \"per point\" medium values to be \"stamped\" out to volume prims.\n interior_paramset = properties.get(\".interior_overrides\")\n if interior_paramset is not None:\n medium_paramset.update(interior_paramset)\n\n smoke_paramset = grid.paramset()\n\n medium_name = \"%s[%s]%s\" % (sop_path, grid.primnums, medium_suffix)\n\n smoke_paramset.add(PBRTParam(\"integer\", \"nx\", grid.resolution[0]))\n smoke_paramset.add(PBRTParam(\"integer\", \"ny\", grid.resolution[1]))\n smoke_paramset.add(PBRTParam(\"integer\", \"nz\", grid.resolution[2]))\n smoke_paramset.add(PBRTParam(\"point\", \"p0\", [-1, -1, -1]))\n smoke_paramset.add(PBRTParam(\"point\", \"p1\", [1, 1, 1]))\n\n if grid.gridtype == \"uniformgrid\":\n extra_attribs = [(\"rgb\", \"Le\"), (\"rgb\", \"sigma_a\"), (\"rgb\", \"sigma_s\")]\n else:\n extra_attribs = [(\"float\", \"LeScale\")]\n medium_prim_overrides = medium_prim_paramset(\n grid.refprim, medium_paramset, extra_attribs=extra_attribs\n )\n smoke_paramset.update(medium_prim_overrides)\n smoke_paramset |= paramset\n\n # By default we'll set a sigma_a and sigma_s to be more Houdini-like\n # however the object's pbrt_interior, or prim's pbrt_interior\n # or prim attribs will override these.\n sigma_params = ParamSet(\n [PBRTParam(\"rgb\", \"sigma_a\"), PBRTParam(\"rgb\", \"sigma_s\")]\n )\n\n if not (sigma_params & smoke_paramset) and grid.gridtype == \"uniformgrid\":\n smoke_paramset.add(\n PBRTParam(\"spectrum\", \"sigma_a\", [400.0, 0.0, 800.0, 0.0])\n )\n smoke_paramset.add(\n PBRTParam(\"spectrum\", \"sigma_s\", [400.0, 1.0, 800.0, 1.0])\n )\n\n with api.AttributeBlock():\n xform = prim_transform(grid.refprim)\n api.ConcatTransform(xform)\n api.MakeNamedMedium(medium_name, grid.gridtype, smoke_paramset)\n api.Material(\"interface\")\n api.MediumInterface(medium_name, exterior)\n # Pad this slightly?\n bounds_to_api_box([-1, 1, -1, 1, -1, 1])\n\n return\n\n\ndef _convert_nurbs_to_bezier(gdp):\n \"\"\"Convert any NURBS Curves to Beziers\n\n Due to how knots are interrupted between Houdini and PBRT we won't be able to\n map NURBS to B-Splines. To work around this we just convert to Bezier degree 4\n curves, which is what PBRT is doing internally as well. \"yolo\"\n\n Args:\n gdp (hou.Geometry): Input geo\n Returns: None (Replaces input gdp)\n \"\"\"\n\n # The Convert SOP is only available as a Verb in H17.5 and greater\n if hou.applicationVersion() < HVER_17_5:\n return\n\n convert_verb = hou.sopNodeTypeCategory().nodeVerb(\"convert\")\n # fromtype: \"nurbCurve\", totype: \"bezCurve\"\n convert_verb.setParms({\"fromtype\": 9, \"totype\": 2})\n convert_verb.execute(gdp, [gdp])\n return\n\n\n# NOTE: HOUDINI COMPATIBILITY\n# The parametric uvs on curves do not match Houdini, v is flipped.\ndef curve_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Outputs a \"curve\" Shape for input geometry\n\n The following attributes are checked for -\n\n P (point), built-in attribute\n width (vertex/point/prim), float\n N (vertex/point), float[3]\n curvetype (prim), string (overrides the property pbrt_curvetype)\n\n Args:\n gdp (hou.Geometry): Input geo\n paramset (ParamSet): Any base params to add to the shape. (Optional)\n properties (dict): Dictionary of SohoParms (Optional)\n Returns: None\n \"\"\"\n\n if properties is None:\n properties = {}\n sop_path = None\n else:\n sop_path = properties[\"object:soppath\"].Value[0]\n\n shape_paramset = ParamSet(paramset)\n\n curve_type = None\n if \"pbrt_curvetype\" in properties:\n curve_type = properties[\"pbrt_curvetype\"].Value[0]\n shape_paramset.add(PBRTParam(\"string\", \"type\", curve_type))\n if \"splitdepth\" in properties:\n shape_paramset.add(properties[\"splitdepth\"].to_pbrt())\n\n _convert_nurbs_to_bezier(gdp)\n\n has_vtx_width = False if gdp.findVertexAttrib(\"width\") is None else True\n has_pt_width = False if gdp.findPointAttrib(\"width\") is None else True\n has_prim_width = False if gdp.findPrimAttrib(\"width\") is None else True\n has_prim_width01 = False\n if (\n gdp.findPrimAttrib(\"width0\") is not None\n and gdp.findPrimAttrib(\"width1\") is not None\n ):\n has_prim_width01 = True\n\n has_curvetype = False if gdp.findPrimAttrib(\"curvetype\") is None else True\n\n has_vtx_N = False if gdp.findVertexAttrib(\"N\") is None else True\n has_pt_N = False if gdp.findPointAttrib(\"N\") is None else True\n\n for prim in gdp.prims():\n\n curve_paramset = ParamSet()\n prim_curve_type = curve_type\n\n # Closed curve surfaces are not supported\n if prim.intrinsicValue(\"closed\"):\n continue\n\n order = prim.intrinsicValue(\"order\")\n degree = order - 1\n # PBRT only supports degree 2 or 3 curves\n # NOTE: We could possibly convert the curves to a format that\n # pbrt supports but for now we'll expect the user to have\n # a curve basis which is supported\n # https://www.codeproject.com/Articles/996281/NURBS-crve-made-easy\n if degree not in (2, 3):\n continue\n curve_paramset.add(PBRTParam(\"integer\", \"degree\", degree))\n\n if prim.intrinsicValue(\"typename\") == \"BezierCurve\":\n basis = \"bezier\"\n else:\n # In Houdini 17.5 and greater we convert everything to bezier,\n # for Houdini 17 this isn't possible so we instead we skip them\n # basis = \"bspline\"\n continue\n curve_paramset.add(PBRTParam(\"string\", \"basis\", [basis]))\n\n P = [pt.attribValue(\"P\") for pt in prim.points()]\n curve_paramset.add(PBRTParam(\"point\", \"P\", P))\n\n if has_curvetype:\n prim_val = prim.attribValue(\"curvetype\")\n prim_curve_type = prim_val if prim_val else curve_type\n\n if prim_curve_type is not None:\n curve_paramset.add(PBRTParam(\"string\", \"type\", [prim_curve_type]))\n\n if prim_curve_type == \"ribbon\":\n\n if has_vtx_N or has_pt_N:\n N = (prim.attribValueAt(\"N\", u) for u in prim.intrinsicValue(\"knots\"))\n else:\n # If ribbon, normals must exist\n # NOTE: pbrt-v4 requires normals when rendering ribbon curves. If the\n # user didn't supply them we'll set them here\n soho.warning(\n \"{} has ribbon curves without normals, \"\n \"defaulting to [0,0,1]\".format(sop_path)\n )\n N = [(0, 0, 1)] * len(prim.intrinsicValue(\"knots\"))\n\n if N is not None:\n curve_paramset.add(PBRTParam(\"normal\", \"N\", N))\n\n if has_vtx_width:\n curve_paramset.add(\n PBRTParam(\"float\", \"width0\", prim.vertex(0).attribValue(\"width\"))\n )\n curve_paramset.add(\n PBRTParam(\"float\", \"width1\", prim.vertex(-1).attribValue(\"width\"))\n )\n elif has_pt_width:\n curve_paramset.add(\n PBRTParam(\n \"float\", \"width0\", prim.vertex(0).point().attribValue(\"width\")\n )\n )\n curve_paramset.add(\n PBRTParam(\n \"float\", \"width1\", prim.vertex(-1).point().attribValue(\"width\")\n )\n )\n elif has_prim_width01:\n curve_paramset.add(PBRTParam(\"float\", \"width0\", prim.attribValue(\"width0\")))\n curve_paramset.add(PBRTParam(\"float\", \"width1\", prim.attribValue(\"width1\")))\n elif has_prim_width:\n curve_paramset.add(PBRTParam(\"float\", \"width\", prim.attribValue(\"width\")))\n else:\n # Houdini's default matches a width of 0.05\n curve_paramset.add(PBRTParam(\"float\", \"width\", 0.05))\n\n curve_paramset |= shape_paramset\n api.Shape(\"curve\", curve_paramset)\n return\n\n\ndef tesselated_wrangler(gdp, paramset=None, properties=None):\n \"\"\"Wrangler for any geo that needs to be tesselated\"\"\"\n prim_name = gdp.iterPrims()[0].intrinsicValue(\"typename\")\n api.Comment(\n \"%s prims is are not directly supported, they will be tesselated\" % prim_name\n )\n mesh_wrangler(gdp, paramset, properties)\n return\n\n\ndef not_supported(gdp, paramset=None, properties=None):\n \"\"\"Wrangler for unsupported prim types\"\"\"\n num_prims = len(gdp.iterPrims())\n prim_name = gdp.iterPrims()[0].intrinsicValue(\"typename\")\n api.Comment(\"Ignoring %i prims, %s is not supported\" % (num_prims, prim_name))\n return\n\n\nshape_wranglers = {\n \"Sphere\": sphere_wrangler,\n \"Circle\": disk_wrangler,\n \"Tube\": tube_wrangler,\n \"Poly\": mesh_wrangler,\n \"Mesh\": patch_wrangler,\n \"PolySoup\": mesh_wrangler,\n \"NURBMesh\": tesselated_wrangler,\n \"BezierCurve\": curve_wrangler,\n \"NURBCurve\": curve_wrangler,\n \"Volume\": volume_wrangler,\n \"VDB\": vdb_wrangler,\n \"PackedDisk\": packeddisk_wrangler,\n \"TriFan\": tesselated_wrangler,\n \"TriStrip\": tesselated_wrangler,\n \"TriBezier\": tesselated_wrangler,\n \"BezierMesh\": tesselated_wrangler,\n \"PasteSurf\": tesselated_wrangler,\n \"MetaBall\": tesselated_wrangler,\n \"MetaSQuad\": tesselated_wrangler,\n \"Tetrahedron\": tesselated_wrangler,\n}\n\n\ndef partition_by_intrinsic(input_gdp, intrinsic):\n \"\"\"Partition the input geo based on a prim intrinsic\n\n Args:\n input_gdp (hou.Geometry): Incoming geometry, not modified\n intrinsic (str): Intrinsic name\n Returns:\n Dictionary of hou.Geometry with keys of the intrinsic value.\n \"\"\"\n\n prim_values = set()\n for prim in input_gdp.iterPrims():\n prim_values.add(prim.intrinsicValue(intrinsic))\n\n split_gdps = {}\n\n blast_verb = hou.sopNodeTypeCategory().nodeVerbs()[\"blast\"]\n\n needs_escape_pat = re.compile('([][\"*?])')\n\n for prim_value in prim_values:\n escaped_value = needs_escape_pat.sub(r\"\\\\\\1\", prim_value)\n blast_verb.setParms(\n {\n \"negate\": 1,\n \"grouptype\": 4,\n \"group\": '@intrinsic:{}=\"{}\"'.format(intrinsic, escaped_value),\n }\n )\n\n gdp = hou.Geometry()\n blast_verb.execute(gdp, [input_gdp])\n split_gdps[prim_value] = gdp\n\n return split_gdps\n\n\ndef partition_by_attrib(input_gdp, attrib):\n \"\"\"Partition the input geo based on a attribute\n\n Args:\n input_gdp (hou.Geometry): Incoming geometry, not modified\n attrib (str, hou.Attrib): Attribute to partition by\n Returns:\n Dictionary of hou.Geometry with keys of the attrib value.\n \"\"\"\n\n attrib_name = attrib\n\n if isinstance(attrib, hou.Attrib):\n attrib_name = attrib.name()\n else:\n attrib = input_gdp.findPrimAttrib(attrib)\n\n if attrib.size() > 1:\n raise ValueError(\"Primitive attribute must be size 1\")\n\n sort_verb = hou.sopNodeTypeCategory().nodeVerbs()[\"sort\"]\n sort_verb.setParms({\"primsort\": 11, \"primattrib\": attrib_name})\n sort_verb.execute(input_gdp, [input_gdp])\n\n if attrib.dataType() == hou.attribData.String:\n prim_values = input_gdp.primStringAttribValues(attrib_name)\n elif attrib.dataType() == hou.attribData.Int:\n prim_values = input_gdp.primIntAttribValues(attrib_name)\n elif attrib.dataType() == hou.attribData.Float:\n prim_values = input_gdp.primFloatAttribValues(attrib_name)\n else:\n raise ValueError(\"Invalid attribute type\")\n\n split_gdps = {}\n\n def _put_in_cache(v, cache):\n if v in cache:\n return False\n cache.add(v)\n return True\n\n cache = set()\n run_lengths = [(v, i) for i, v in enumerate(prim_values) if _put_in_cache(v, cache)]\n run_lengths.append((prim_values[-1], len(prim_values)))\n\n blast_verb = hou.sopNodeTypeCategory().nodeVerbs()[\"blast\"]\n\n for i, encoded_v in enumerate(run_lengths[:-1]):\n prim_value, start = encoded_v\n end = run_lengths[i + 1][1] - 1\n blast_verb.setParms(\n {\"negate\": 1, \"grouptype\": 4, \"group\": \"{}-{}\".format(start, end)}\n )\n\n gdp = hou.Geometry()\n blast_verb.execute(gdp, [input_gdp])\n split_gdps[prim_value] = gdp\n\n return split_gdps\n\n\ndef output_geo(soppath, now, properties=None):\n \"\"\"Output the geometry by calling the appropriate wrangler\n\n Geometry is partitioned into subparts based on the shop_materialpath\n and material_override prim attributes.\n\n Args:\n soppath (str): oppath to SOP\n properties (dict, None): Dictionary of SohoParms\n (Optional, defaults to None)\n Returns:\n None\n \"\"\"\n\n # split by material\n # split by geo type\n # if mesh type, split by material override\n # else deal with overrides per prim\n #\n # NOTE: We won't be splitting based on medium interior/exterior\n # those will be left as a object level assignment only.\n # Note, that in the case of Houdini Volumes they will look\n # for the appropriate medium parameters as prim vars\n\n if properties is None:\n properties = {}\n\n ignore_materials = False\n if \"pbrt_ignorematerials\" in properties:\n ignore_materials = properties[\"pbrt_ignorematerials\"].Value[0]\n\n # Houdini / Mantra allows for shop_materialpaths on both prims and details\n # at the same time. However prims full stomp over detail. If you have a prim\n # with an empty material assignment, it will NOT fall back to the detail\n # assignment. (It will fall back to the object since that is further up the\n # stack). This means if the shop_materialpath exists on the prim, the\n # detail is ignored entirely.\n\n # PBRT allows setting Material parameters on the Shapes in order to\n # override a material's settings. (Shapes get checked first)\n # This paramset will be for holding those overrides and passing\n # them down to the actual shape api calls.\n\n # We need the soppath to come along and since we are creating new\n # hou.Geometry() we'll lose the original sop connection so we need\n # to stash it here.\n\n node = hou.node(soppath)\n if node is None or node.type().category() != hou.sopNodeTypeCategory():\n return\n\n properties[\".time_dependent\"] = node.isTimeDependent()\n\n input_gdp = node.geometry()\n if input_gdp is None:\n return\n gdp = hou.Geometry()\n gdp.merge(input_gdp.freeze())\n\n default_material = \"\"\n default_override = \"\"\n if not ignore_materials:\n try:\n default_material = gdp.stringAttribValue(\"shop_materialpath\")\n except hou.OperationFailed:\n pass\n if default_material not in scene_state.shading_nodes:\n default_material = \"\"\n\n try:\n default_override = gdp.stringAttribValue(\"material_override\")\n except hou.OperationFailed:\n default_override = \"\"\n\n # These handles are only valid until until we clear the geo\n prim_material_h = gdp.findPrimAttrib(\"shop_materialpath\")\n prim_override_h = gdp.findPrimAttrib(\"material_override\")\n\n has_prim_overrides = bool(\n not ignore_materials\n and prim_override_h is not None\n and prim_material_h is not None\n )\n\n if prim_material_h is not None and not ignore_materials:\n material_gdps = partition_by_attrib(gdp, prim_material_h)\n gdp.clear()\n del gdp\n else:\n material_gdps = {default_material: gdp}\n\n # The gdp these point to may have been cleared\n del prim_override_h\n del prim_material_h\n\n instance_info = properties.get(\".instance_info\")\n\n for material, material_gdp in material_gdps.iteritems():\n\n if material not in scene_state.shading_nodes:\n if material in scene_state.invalid_shading_nodes:\n api.Comment(\"Did not apply %s as it was not a PBRT material\" % material)\n material_node = None\n else:\n api.AttributeBegin()\n api.NamedMaterial(material)\n material_node = MaterialNode(material)\n\n shape_gdps = partition_by_intrinsic(material_gdp, \"typename\")\n material_gdp.clear()\n del material_gdp\n\n shape_count = 0\n for shape, shape_gdp in shape_gdps.iteritems():\n\n # Aggregate overrides, instead of per prim\n if has_prim_overrides:\n override_attrib_h = shape_gdp.findPrimAttrib(\"material_override\")\n override_gdps = partition_by_attrib(shape_gdp, override_attrib_h)\n shape_gdp.clear()\n del shape_gdp\n del override_attrib_h\n else:\n override_gdps = {default_override: shape_gdp}\n\n override_count = 0\n for override_str, override_gdp in override_gdps.iteritems():\n\n node_cache = {}\n param_cache = {}\n\n base_paramset = ParamSet()\n base_paramset |= primitive_alpha_texs(properties)\n\n if override_str:\n suffix = create_suffix(\n soppath,\n shape_count,\n override_count,\n instance_info=instance_info,\n )\n api.AttributeBegin()\n override_count += 1\n overrides = eval(override_str, {}, {})\n\n wrangle_shading_network(\n material,\n use_named=False,\n exported_nodes=set(),\n name_suffix=suffix,\n overrides=overrides,\n node_cache=node_cache,\n param_cache=param_cache,\n )\n\n # At this point the gdps are partitioned\n # * First by material\n # * Then by primitive type\n # * Lastly, for each different material override there is additional\n # partitioning\n #\n # At this point we will NOT have varying types or materials within the\n # shape_wrangler.\n\n properties[\".shape_count\"] = shape_count\n properties[\".override_count\"] = override_count\n\n shape_wrangler = shape_wranglers.get(shape, not_supported)\n if shape_wrangler:\n shape_wrangler(override_gdp, base_paramset, properties)\n override_gdp.clear()\n if override_str:\n api.AttributeEnd()\n\n shape_count += 1\n\n if material_node is not None:\n api.AttributeEnd()\n return\n","sub_path":"soho/python2.7/PBRTgeo.py","file_name":"PBRTgeo.py","file_ext":"py","file_size_in_byte":80529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"322109046","text":"from concurrent import futures\nimport grpc\nimport ping_pb2_grpc\nimport ping_pb2\nimport time\nimport threading\n\nclass Listener(ping_pb2_grpc.PingpongServiceServicer):\n\tdef __init__(self, *args, **kwargs):\n\t\tself.counter = 0\n\t\tself.lastPrintTime = time.time()\n\t#Request comming in\n\tdef ping(self, request, context):\n\t\tself.counter +=1\n\t\tif(self.counter > 100000):\n\t\t\tprint(f\"10000 Calls in {time.time()-self.lastPrintTime} Secounds\")\n\t\t\tself.lastPrintTime = time.time()\n\t\t\tself.counter = 0\n\t\treturn ping_pb2.Pong(count=request.count + 1)\n\ndef server():\n\tserver = grpc.server(futures.ThreadPoolExecutor(max_workers=1))\n\tping_pb2_grpc.add_PingpongServiceServicer_to_server(Listener(), server)\n\tserver.add_insecure_port(\"[::]:9999\")\n\tserver.start()\n\ttry:\n\t\twhile True:\n\t\t\tprint(f\"Server On...Threads {threading.active_count()}\")\n\t\t\ttime.sleep(10)\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\nKeyboardInterruption. Closing\")\n\t\tserver.stop(0)\n\nif __name__ == \"__main__\":\n\tserver()\n\n\n\n\n\n\n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"345590462","text":"from Quadrilateral import Quadrilateral\nfrom Vector3d import Vector3d\nimport numpy as np\nfrom Triangle import Triangle\n\n\nclass Rectangle(Quadrilateral):\n \"\"\"Rectangle.\"\"\"\n def __init__(self, vertex0, vertex1, vertex2):\n \"\"\"Constructor.\"\"\"\n vec01 = vertex1 - vertex0\n vec12 = vertex2 - vertex1\n vec20 = vertex0 - vertex2\n\n if Triangle.isconsistent(vec01, vec12, vec20) is False:\n # Given points do not form a triangle.\"\n raise ValueError\n\n inp0112 = vec01 ** vec12\n inp1220 = vec12 ** vec20\n inp2001 = vec20 ** vec01\n\n prod = inp0112 * inp1220 * inp2001\n\n if np.abs(prod) > 0.0:\n # Triangle is not a right one.\"\n raise ValueError\n else:\n eps = np.finfo(float).eps\n\n if np.abs(inp0112) < eps: \n vertex3 = vertex0 + vec12\n \n elif np.abs(inp1220) < eps: \n vertex3 = vertex1 + vec20 \n \n elif np.abs(inp2001) < eps: \n vertex3 = vertex2 + vec01 \n \n else:\n # Triangle is not a right one.\"\n raise ValueError\n\n super(Rectangle, self).__init__(vertex0, vertex1, vertex2, vertex3)\n\n####\nif __name__ == \"__main__\":\n vec0 = Vector3d()\n vec1 = Vector3d(1.0, 0.0, 0.0)\n vec2 = Vector3d(1.0, 1.0, 0.0)\n vec3 = Vector3d(0.0, 1.0, 0.0)\n\n vec = Vector3d(0.0, 1.0, 0.0)\n\n rect = Rectangle(vec0, vec1, vec2)\n rect.print_self()\n","sub_path":"Rectangle.py","file_name":"Rectangle.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"223148235","text":"import os, setuptools\n\ndef long_description():\n with open('README.md') as f:\n return f.read()\n\n# Copyright 2013, 2014, 2015, 2016, 2017, 2020 Andrzej Cichocki\n\n# This file is part of pyven.\n#\n# pyven is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# pyven is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with pyven. If not, see .\n\nimport os, setuptools, subprocess\n\nclass SourceInfo:\n\n class PYXPath:\n\n dotpyx = '.pyx'\n\n def __init__(self, package, name, path):\n self.package = package\n self.name = name\n self.path = path\n\n def make_ext(self):\n g = {}\n with open(self.path + 'bld') as f: # Assume project root.\n exec(f.read(), g)\n return g['make_ext'](self.package + '.' + self.name[:-len(self.dotpyx)], self.path)\n\n def __init__(self, rootdir):\n self.packages = setuptools.find_packages(rootdir)\n def g():\n for package in self.packages:\n dirpath = package.replace('.', os.sep)\n for name in os.listdir(os.path.join(rootdir, dirpath)):\n if name.endswith(self.PYXPath.dotpyx):\n yield self.PYXPath(package, name, os.path.join(dirpath, name))\n pyxpaths = list(g())\n if pyxpaths and os.path.isdir(os.path.join(rootdir, '.git')): # We could be an unpacked sdist.\n check_ignore = subprocess.Popen(['git', 'check-ignore'] + [p.path for p in pyxpaths], cwd = rootdir, stdout = subprocess.PIPE)\n ignoredpaths = set(check_ignore.communicate()[0].decode().splitlines())\n assert check_ignore.wait() in [0, 1]\n self.pyxpaths = [path for path in pyxpaths if path.path not in ignoredpaths]\n else:\n self.pyxpaths = pyxpaths\n\nsourceinfo = SourceInfo('.')\n\ndef ext_modules():\n paths = [path.make_ext() for path in sourceinfo.pyxpaths]\n if paths:\n # XXX: Can cythonize be deferred?\n from Cython.Build import cythonize\n return dict(ext_modules = cythonize(paths))\n return {}\n\nsetuptools.setup(\n name = 'outjack',\n version = '7',\n description = 'JACK integration for Python',\n long_description = long_description(),\n long_description_content_type = 'text/markdown',\n url = 'https://github.com/combatopera/outjack',\n author = 'Andrzej Cichocki',\n packages = sourceinfo.packages,\n py_modules = [],\n install_requires = ['nativecommon', 'numpy'],\n package_data = {'': ['*.pxd', '*.pyx', '*.pyxbld', '*.arid', '*.aridt']},\n scripts = [],\n entry_points = {'console_scripts': []},\n **ext_modules())\n","sub_path":"pypi_install_script/outjack-7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"295901260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 23 10:07:14 2018\n\n@author: Shi Weiqiang Econ4A Group_19\n\"\"\"\n\nimport numpy\nfrom PIL import Image\nimport time\n\nstart = time.clock()\n# Load img and set W, H\nhead = Image.open(\"Head.jpg\")\nW, H = head.size\nprint(W,H)\n# Img to numpy array\napic_origin = numpy.array(head)\n# prepare the affine Img array\nprint(apic_origin.shape)\n\"\"\"\nSeparate the Head Img to nice part\nUpperLeft UpperMid UpperRight\nLeft Head Right\nDownLeft DownMid DownRight\n\n\"\"\"\n\n\"\"\"\ncreate a function to affine the img:\n if the img enlarge the centre obj and keep the same size\n 1.Load the img form array\n 2.set the radio of the new img affined:\n 1.the 4 new point of img\n\"\"\"\n# ==========================================\n# set the img and UpperLeft DownRight point\n# also the point for new img\ndef enlarge_centre_img(origin_img,origin_UL,origin_DR,affine_UL,affine_DR):\n print(\"===start function [enlarge]====\")\n W = origin_img.shape[1]\n H = origin_img.shape[0]\n print(\"the shape of img W:\",W,\"H:\",H)\n # prepare the img to save\n affine_img = numpy.zeros((origin_img.shape[0], origin_img.shape[1], origin_img.shape[2]), dtype=numpy.uint8)\n\n\n\n # --------------------- Part I ---------------------\n \"\"\"\n resize the ROW to and enlarge the center row\n \"\"\"\n # First part of img\n # copy the Part 1 Img to apic_Part1\n apic_Part1_row = numpy.zeros((origin_UL[0], W, 3), dtype=numpy.uint8)\n for i in range(origin_UL[0]):\n for j in range(W):\n apic_Part1_row[i][j] = origin_img[i][j]\n '''\n Big Img => Small Img\n (Col,Row) to the matrix\n Affine Original\n (0,0) => (0,0)\n (X1,Y1) => (X1',Y1')\n (W,0) => (W,0)\n (X2,Y2) => (X2',Y2')\n (W,affine_UL) => (W,origin_UL[0])\n (X3,Y3) => (X3',Y3')\n Target Input\n '''\n\n # Three Point of Original Pic\n a_1_row = numpy.array([\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [W, 0, 0, 0, 1, 0],\n [0, 0, W, 0, 0, 1],\n [W, origin_UL[0], 0, 0, 1, 0],\n [0, 0, W, origin_UL[0], 0, 1]\n ])\n # Three Point of affine Pic\n b_1_row = numpy.array([0, 0, W, 0, W, affine_UL[0]])\n c_1_row = numpy.linalg.solve(a_1_row, b_1_row)\n print(c_1_row)\n for i in range(origin_UL[0]): # control row\n for j in range(W): # control col\n # find the new (x,y) for pixels\n src = numpy.array([\n [j, i, 0, 0, 1, 0],\n [0, 0, j, i, 0, 1]\n ])\n # the Local of the pixel in the img\n ans = numpy.round(numpy.matmul(src, c_1_row), 0)\n if ans[1] >= origin_UL[0] or ans[0] >= W:\n continue # over range\n if ans[1] < 0 or ans[0] < 0:\n continue # over range\n # transfor the Img from Part1 to apic_affine\n affine_img[int(ans[1]), int(ans[0])] = apic_Part1_row[i, j]\n print(\"Finish resize row up\")\n\n\n # Second part of Pic\n # copy the Part 2 Img to apic_Part2\n # origin_DR[0]-origin_UL[0] is the diff of H\n apic_Part2 = numpy.zeros((origin_DR[0]-origin_UL[0], W, 3), dtype=numpy.uint8)\n for i in range(origin_DR[0]-origin_UL[0]):\n for j in range(W):\n apic_Part2[i][j] = origin_img[i + origin_UL[0] ][j]\n # prepare the affine img for Part2\n apic_affine_Part2 = numpy.zeros((affine_DR[0]-affine_UL[0], W, 3), dtype=numpy.uint8)\n '''\n Small Img => Big Img\n (Col,Row) to the matrix\n Affine Original\n (0, 0) => (0,0)\n (X1,Y1) => (X1',Y1')\n (0, int(affine_DR[0]-affine_UL[0])) => (0, origin_DR[0]-origin_UL[0])\n (X2,Y2) => (X2',Y2')\n (W, int(affine_DR[0]-affine_UL[0])) => (W, origin_DR[0]-origin_UL[0])\n (X3,Y3) => (X3',Y3')\n Target Input\n '''\n\n # because of Small Img to Big Img,a_2 Use Affine's point\n a_2_row = numpy.array([\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, int(affine_DR[0]-affine_UL[0]), 0, 0, 1, 0],\n [0, 0, 0, int(affine_DR[0]-affine_UL[0]), 0, 1],\n [W, int(affine_DR[0]-affine_UL[0]), 0, 0, 1, 0],\n [0, 0, W, int(affine_DR[0]-affine_UL[0]), 0, 1]\n ])\n\n # because of Small Img to Big Img,b_2 Use Original's point\n b_2_row = numpy.array([0, 0, 0, int(origin_DR[0]-origin_UL[0]), W, int(origin_DR[0]-origin_UL[0])])\n c_2_row = numpy.linalg.solve(a_2_row, b_2_row)\n print(c_2_row)\n\n for i in range(int(affine_DR[0]-affine_UL[0])): # control row\n for j in range(W): # control col\n # find the new (x,y) for pixels\n src = numpy.array([\n [j, i, 0, 0, 1, 0],\n [0, 0, j, i, 0, 1]\n ])\n ans = numpy.round(numpy.matmul(src, c_2_row), 0)\n # the Local of the pixel in the img\n if ans[1] >= int(origin_DR[0]-origin_UL[0]) or ans[0] >= W:\n continue # over range\n if ans[1] < 0 or ans[0] < 0:\n continue # over range\n # transfor the Img from Part2 to apic_affine_Part2\n apic_affine_Part2[i, j] = apic_Part2[int(ans[1]), int(ans[0])]\n\n # copy the apic_affine_Part2 img to apic_affine\n for i in range(int(affine_DR[0]-affine_UL[0])):\n for j in range(W):\n affine_img[i + affine_UL[0] + 1][j] = apic_affine_Part2[i][j]\n\n print(\"Finish resize row mid\")\n\n # Third part of Pic\n # copy the Part 3 Img to apic_Part3\n apic_Part3 = numpy.zeros((int(H-origin_DR[0]), W, 3), dtype=numpy.uint8)\n for i in range(int(H-origin_DR[0])):\n for j in range(W):\n apic_Part3[i][j] = origin_img[i + origin_DR[0]][j]\n '''\n Big Img => Small Img\n (Col,Row)\n Affine Original\n (0,0) => (0,0)\n (X1,Y1) => (X1',Y1')\n (W,0) => (W,0)\n (X2,Y2) => (X2',Y2')\n (W,int(affine_DR[0])) => (W,int(H-origin_DR[0]))\n (X3,Y3) => (X3',Y3')\n Target Input\n '''\n\n # Three Point of Original Pic\n a_3 = numpy.array([\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [W, 0, 0, 0, 1, 0],\n [0, 0, W, 0, 0, 1],\n [W, H-origin_DR[0], 0, 0, 1, 0],\n [0, 0, W, H-origin_DR[0], 0, 1]\n ])\n # Three Point of affine Pic\n b_3 = numpy.array([0, 0, W, 0, W, int(H-affine_DR[0])])\n c_3 = numpy.linalg.solve(a_3, b_3)\n print(c_3)\n\n for i in range(H-origin_DR[0]): # control row\n for j in range(W): # control col\n # find the new (x,y) for pixels\n src = numpy.array([\n [j, i, 0, 0, 1, 0],\n [0, 0, j, i, 0, 1]\n ])\n # the Local of the pixel in the img\n ans = numpy.round(numpy.matmul(src, c_3), 0)\n if ans[1] >= int(H-affine_DR[0]) or ans[0] >= W:\n continue # over range\n if ans[1] < 0 or ans[0] < 0:\n continue # over range\n # transfor the Img from Part3 to apic_affine\n # and offset down int(1199*H/1348) pixels\n affine_img[int(ans[1]) + affine_DR[0], int(ans[0])] = apic_Part3[i, j]\n\n print(\"Finish resize down\")\n \n return affine_img\n # finish function\n# ==========================================\n\n# affine the head\n# call the function by created\n# enlarge_centre_img(origin_img,origin_UL,origin_DR,affine_UL,affine_DR)\napic_affine_row = enlarge_centre_img(apic_origin,(650,830),(1750,1580),(200,200),(2461,2236))\n\n\n\nfinish_part_row = (time.clock() - start)\nprint(\"Time used for Row:\",finish_part_row)\nsolve_Img = Image.fromarray(apic_affine_row, mode=\"RGB\")\nsolve_Img.save(\"Output_Quiz2_row.jpg\")\n\n\n\n# start part 2 resize the col side\napic_affine_row = Image.open(\"Output_Quiz2_row.jpg\")\n# transpose the array to enlarge the Col side\napic_affine_row = numpy.array(apic_affine_row)\napic_affine_col = apic_affine_row.transpose((1,0,2))\nprint(\"transpose:\",apic_affine_col.shape)\n \n# and the point should be transpose from Output_Quiz2_row\napic_affine_col = enlarge_centre_img(apic_affine_col,(830,200),(1580,2461),(200,200),(2236,2461))\nfinish_part_col = (time.clock() - start)\nprint(\"Time used for Row:\",finish_part_col)\n\nsolve_Img = Image.fromarray(apic_affine_col, mode=\"RGB\")\nsolve_Img.save(\"Output_Quiz2_col.jpg\")\n\n\n\n# then transpose to return the position\napic_affine_col = Image.open(\"Output_Quiz2_col.jpg\")\napic_affine_col = numpy.array(apic_affine_col)\napic_affine_col = apic_affine_col.transpose((1,0,2))\nsolve_Img = Image.fromarray(apic_affine_col, mode=\"RGB\")\nsolve_Img.save(\"Output_Quiz2_Head.jpg\")\n\n\n# affine the nose\n\napic_affine_head = Image.open(\"Output_Quiz2_Head.jpg\")\napic_affine_head = numpy.array(apic_affine_head)\nhead_P2 = numpy.zeros((2261,2036,3), dtype=numpy.uint8)\nfor i in range(2261):\n for j in range(2036):\n head_P2[i][j] = apic_affine_head[i+200][j+200]\n# call the function by created\n# enlarge_centre_img(origin_img,origin_UL,origin_DR,affine_UL,affine_DR)\napic_affine_row = enlarge_centre_img(head_P2,(1130,818),(1650,1218),(565,509),(1695,1527))\napic_affine_col = apic_affine_row.transpose((1,0,2))\napic_affine_col = enlarge_centre_img(apic_affine_col,(818,1130),(1218,1650),(509,565),(1527,1695))\napic_affine_col = apic_affine_col.transpose((1,0,2))\n\nfor i in range(2261):\n for j in range(2036):\n apic_affine_head[i+200][j+200] = apic_affine_col[i][j]\n\nfinish_part_all = (time.clock() - start)\nprint(\"Time used for Row:\",finish_part_all)\nsolve_Img = Image.fromarray(apic_affine_head, mode=\"RGB\")\nsolve_Img.save(\"Output_Quiz2_finish.jpg\")\n\n","sub_path":"第19組_quiz2/Quiz_2.py","file_name":"Quiz_2.py","file_ext":"py","file_size_in_byte":9548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"360310708","text":"# name: ui.py\n# auth: christian bitter\n# desc: building simple ui with pygame\n# TODO:\n# we need to ensure that rendering is blitting to the callers x, y coordinates\n# Controls render into their surface in local coordinates. For that, they have a local rendering surface.\n# Control captions/ text can be aligned horizontally and vertically.\n# UI elements are re-rendered upon invalidation. In that case the elements paint method is called, causing the\n# UI elements surface buffer to be refreshed, so that it can be blitted with valid content subsequently.\n# todo: ui element text box\n# UI element Button allows to have an image set.\n# We make no distinction between clicked or not for now (which would justify an Image Button).\n# todo: on mouse over event\n# todo: on focus event\n# todo: ui element label\n# todo: support for rendering form with background and alpha blending of controls\n# todo: having to forward the pygame event loop events to the controls is cumbersome, there\n# should be something cleaner\n# todo: there should be something like overlays - where we have one form and can create smaller one blending it\n# over the existing one.\n# todo: add unit testing\n# todo: controls should have a global coordinate pair as well, this would simplify the hit-testing\n# todo: screen transitions either in the screen or in the UI\n# todo: initialization needs to be fixed, because for some ui elements we need to set fields that are used in the super\n# initialization routine\n# TODO: rework\n# TODO: allow specification of UI in a json file or something similar\n\nimport os\nimport pygame\nimport re\nfrom enum import Enum, IntFlag\n\nC_BLACK = (0, 0, 0, 255)\nC_RED = (255, 0, 0, 255)\nC_GREEN = (0, 255, 0, 255)\nC_BLUE = (0, 0, 255, 255)\nC_MENUGRAY = (192, 192, 192, 255)\nC_FORMBLUE = (32, 32, 128, 255)\nC_ELEMENT_BORDER_DARKGRAY = (64, 64, 64, 255)\nC_WHITE = (255, 255, 255, 255)\nC_BTN_FACE = (168, 168, 168, 255)\nC_BTN_BORDER = (128, 128, 128, 255)\nI_MARGIN = 3\n\n\ndef xy_inside(x: int, y: int, x0: int, y0: int, w: int, h: int) -> bool:\n return x0 <= x <= x0 + w and y0 <= y <= y0 + h\n\n\nclass UIEvent:\n SCREEN_TRANSITION: int = 99999\n\n @staticmethod\n def transition_screen(event_from: str, event_to: str):\n return pygame.event.Event(\n pygame.USEREVENT,\n {\n \"mode\": UIEvent.SCREEN_TRANSITION,\n \"source\": event_from,\n \"target\": event_to,\n },\n )\n\n\nclass FillStyle(Enum):\n Empty = 1\n Colour = 2\n Image = 3\n\n\nclass VerticalAlignment(Enum):\n Center = 0\n Top = 1\n Bottom = 2\n\n def __str__(self):\n return self.name\n\n\nclass HorizontalAlignment(Enum):\n Center = 0\n Left = 1\n Right = 2\n\n def __str__(self):\n return self.name\n\n\nclass FontStyle(IntFlag):\n \"\"\"\n Style of a font\n \"\"\"\n\n Normal = (0,)\n Bold = (1,)\n Italic = (2,)\n Underline = 8\n\n def __str__(self):\n return self.name\n\n\nclass UIElement(object):\n \"\"\"\n This is the base type of all UI elements. Critically, it has a name/id and some\n spatial location and extent.\n \"\"\"\n\n def __init__(\n self, name, x: int = None, y: int = None, w: int = 0, h: int = 0, **kwargs\n ):\n \"\"\"Constructor for UIElement\"\"\"\n super().__init__()\n self._name = name\n self._x0 = x\n self._y0 = y\n self._w = w\n self._h = h\n self._x1 = None\n self._y1 = None\n if self._x0 is not None and self._w is not None:\n self._x1 = x + w\n if self._y0 is not None and self._h is not None:\n self._y1 = y + h\n self._client_rect = (0, 0, self._w, self._h)\n self._invalidated = kwargs.get(\"invalidated\", False)\n self._has_focus = kwargs.get(\"has_focus\", False)\n\n @property\n def name(self):\n return self._name\n\n @property\n def x(self):\n return self._x0\n\n @x.setter\n def x(self, x: int):\n if not x:\n raise ValueError(\"x\")\n if x < 0:\n raise ValueError(\"x < 0\")\n\n if self._x0 != x:\n self._x0 = x\n self._x1 = self._x0 + self._w\n self.invalidate()\n\n @property\n def y(self):\n return self._y0\n\n @y.setter\n def y(self, y: int):\n if not y:\n raise ValueError(\"y\")\n if y < 0:\n raise ValueError(\"y < 0\")\n\n if self._y0 != y:\n self._y0 = y\n self._y1 = self._y0 + self._h\n self.invalidate()\n\n @property\n def width(self):\n return self._w\n\n @width.setter\n def width(self, w: int):\n if w is None:\n raise ValueError(\"w cannot be None\")\n if w < 0:\n raise ValueError(\"w < 0\")\n\n if self._w != w:\n self._w = w\n self._x1 = self._x0 + w\n self.invalidate()\n\n @property\n def height(self):\n return self._h\n\n @height.setter\n def height(self, h: int = None):\n if h is None:\n raise ValueError(\"h cannot be None\")\n if h < 0:\n raise ValueError(\"h < 0\")\n\n if self._h != h:\n self._h = h\n self._y1 = self._y0 + h\n self.invalidate()\n\n def invalidate(self, **kwargs):\n self._client_rect = (0, 0, self._w, self._h)\n self._invalidated = True\n\n def get_bounds(self):\n return self._x0, self._y0, self._x1, self._y1\n\n @property\n def has_focus(self):\n return self._has_focus\n\n @has_focus.setter\n def has_focus(self, f):\n self._has_focus = f\n\n def __repr__(self):\n return \"UIElement: {} ({})\".format(self._name, type(self))\n\n def initialize(self) -> None:\n pass\n\n def finalize(self) -> None:\n pass\n\n def update(self, t) -> None:\n pass\n\n def process_event(self, e) -> None:\n pass\n\n\nclass Renderable(UIElement):\n \"\"\"\n The basic class for all ui renderable elements\n \"\"\"\n\n def __init__(\n self, name, x: int = None, y: int = None, w: int = None, h: int = None, **kwargs\n ):\n \"\"\"Constructor for Renderable\"\"\"\n super(Renderable, self).__init__(name, x=x, y=y, w=w, h=h, kwargs=kwargs)\n\n self._fill_style = kwargs.get(\"fill_style\", FillStyle.Empty)\n self._background_colour = kwargs.get(\"background_colour\", (128, 128, 128, 255))\n self._background_image = kwargs.get(\"background_image\", None)\n self._colour = kwargs.get(\"colour\", (0, 0, 0, 255))\n self._show_border = kwargs.get(\"show_border\", True)\n self._show_caption = kwargs.get(\"show_caption\", False)\n self._font_size = kwargs.get(\n \"font_size\", 20\n ) # width and height is determined by font\n self._font_style = kwargs.get(\"font_style\", FontStyle.Normal)\n self._font = pygame.font.Font(\n kwargs.get(\"font\", pygame.font.get_default_font()), self._font_size\n )\n self._font_colour = kwargs.get(\"font_colour\", C_BLACK)\n self._text_caption = None\n self._text_bounds = None\n self._caption = None\n self._surface = None\n\n self._caption = kwargs.get(\"caption\", \"\")\n self.__update_caption()\n\n self._is_visible = kwargs.get(\"visible\", True)\n self._z_order = kwargs.get(\"z\", 0)\n\n self._caption_halign = kwargs.get(\"caption_halign\", HorizontalAlignment.Left)\n self._caption_valign = kwargs.get(\"caption_valign\", VerticalAlignment.Center)\n\n self._apply_font_style()\n\n if self.width is not None and self.height is not None:\n if self.width == 0 or self.height == 0:\n print(\"{} has set width and/or height to 0 px\".format(self._name))\n self._surface = pygame.Surface(\n (self.width, self.height), flags=pygame.SRCALPHA\n )\n else:\n print(\n \"{}' surface was not initialized, because either width ({}) or height ({}) was not provided {}.\".format(\n self._name, self.width, self.height, (w, h)\n )\n )\n\n self.invalidate()\n\n def __update_caption(self):\n self._text_caption = self._font.render(self._caption, 1, self._font_colour)\n self._text_bounds = self._text_caption.get_rect()\n\n # we add a safety buffer around the text bounds to allow for the real bounds\n _w, _h = (\n self._text_bounds[2] + 2 * I_MARGIN,\n self._text_bounds[3] + 2 * I_MARGIN,\n )\n if self.width is None or self.width < _w:\n self.width = _w\n if self.height is None or self.height < _h:\n self.height = _h\n\n def _apply_font_style(self):\n if self._font_style & FontStyle.Bold:\n self._font.set_bold(True)\n if self._font_style & FontStyle.Italic:\n self._font.set_italic(True)\n if self._font_style & FontStyle.Underline:\n self._font.set_underline(True)\n\n @property\n def z_order(self):\n return self._z_order\n\n @property\n def is_visible(self):\n return self._is_visible\n\n @property\n def background_colour(self):\n return self._background_colour\n\n @property\n def colour(self):\n return self._colour\n\n @property\n def show_border(self):\n return self._show_border\n\n def invalidate(self, **kwargs):\n UIElement.invalidate(self, **kwargs)\n # if we need to create a new surface we do otherwise we just clear\n if \"clear_only\" in kwargs:\n self._surface.fill(C_BLACK)\n else:\n if self._w is not None and self._h is not None:\n # TODO: we need to see if we need per pixel alpha\n self._surface = pygame.Surface(\n (self._w, self._h), flags=pygame.SRCALPHA\n )\n self._client_rect = (0, 0, self._w, self._h)\n\n @property\n def caption(self):\n return self._caption\n\n @caption.setter\n def caption(self, c):\n if c is None:\n return\n self._caption = c\n self.__update_caption()\n\n @property\n def show_caption(self):\n return self._show_caption\n\n def _paint(self):\n if self._invalidated:\n if self._surface is None:\n raise ValueError(\n \"Cannot paint into None surface: {}\".format(self._name)\n )\n\n if self._fill_style == FillStyle.Colour:\n pygame.draw.rect(\n self._surface, self._background_colour, self._client_rect, 0\n )\n elif self._fill_style == FillStyle.Image:\n if not self._background_image:\n raise ValueError(\"background fill image but image not provided\")\n\n scaled_bgimg = pygame.transform.scale(\n self._background_image, (self._w, self._h)\n )\n self._surface.blit(scaled_bgimg, dest=self._client_rect)\n else:\n pass\n\n if self._show_border:\n pygame.draw.rect(self._surface, self._colour, self._client_rect, 1)\n\n if self._show_caption and self._caption != \"\":\n # We place text in accordance with the chosen alignment.\n # This means vertically and horizontally, inside the parent's bounding box. The parent's\n # bounding box is at least so wide, so as to be able to capture the text.\n # It does not need to be recomputed every time - this can be placed after the initialization\n dest_pos = [0, 0]\n c_halign = self._caption_halign\n c_valign = self._caption_valign\n\n if c_halign == HorizontalAlignment.Left:\n dest_pos[0] = I_MARGIN\n elif c_halign == HorizontalAlignment.Center:\n dest_pos[0] = int(0.5 * (self._w - self._text_bounds[2]))\n elif c_halign == HorizontalAlignment.Right:\n dest_pos[0] = self._w - I_MARGIN - self._text_bounds[2]\n else:\n raise ValueError(\n \"unknown horizontal alignment setting {}\".format(c_halign)\n )\n\n if c_valign == VerticalAlignment.Top:\n dest_pos[1] = I_MARGIN\n elif c_valign == VerticalAlignment.Center:\n dest_pos[1] = int(0.5 * (self._h - self._text_bounds[3]))\n elif c_valign == VerticalAlignment.Bottom:\n dest_pos[1] = self._h - I_MARGIN - self._text_bounds[3]\n else:\n raise ValueError(\n \"unknown vertical alignment setting {}\".format(c_valign)\n )\n\n self._surface.blit(self._text_caption, dest=tuple(dest_pos))\n\n self._invalidated = False\n\n if self._surface is None:\n raise ValueError(\"Failed _paint {} - surface is None.\".format(self._name))\n\n def render(self, buffer):\n if self._invalidated:\n self._paint()\n if self._is_visible:\n buffer.blit(self._surface, (self.x, self.y))\n\n\nclass Clickable(Renderable):\n \"\"\"\"\"\"\n\n def __init__(\n self, name, x: int = None, y: int = None, w: int = None, h: int = None, **kwargs\n ):\n \"\"\"Constructor for Clickable\"\"\"\n super().__init__(name, x=x, y=y, w=w, h=h, **kwargs)\n self._is_clicked = kwargs.get(\"is_clicked\", False)\n\n def unclick(self):\n self._is_clicked = False\n\n def clicked(self, mx, my, button):\n previous_state = self._is_clicked\n if xy_inside(mx, my, self._x0, self._y0, self._w, self._h):\n self._is_clicked = True\n else:\n self._is_clicked = False\n\n # upon state change we invalidate\n if previous_state != self._is_clicked:\n self.invalidate()\n\n return self._is_clicked, self\n\n def on_click(self, sender, event_args):\n # when clicked this fires, it needs to be overwritten in a derived class\n pass\n\n\nclass UIImage(Clickable):\n \"\"\"\n A component to show an image\n \"\"\"\n\n def __init__(\n self,\n name: str,\n image_fp: str,\n x: int,\n y: int,\n w: int = None,\n h: int = None,\n **kwargs\n ):\n \"\"\"Constructor for UIImage\"\"\"\n if not image_fp or not os.path.exists(image_fp):\n raise ValueError(\"Image path not provided or does not exist\")\n img = pygame.image.load(image_fp)\n if w is None:\n w = img.get_width()\n if h is None:\n h = img.get_height()\n s_img = pygame.transform.scale(img, (w, h))\n s = pygame.Surface((w, h))\n s.blit(s_img, (0, 0))\n del img\n kwargs[\"background_image\"] = s\n kwargs[\"fill_style\"] = FillStyle.Image\n self._image_fp = image_fp\n super(UIImage, self).__init__(name=name, x=x, y=y, w=w, h=h, **kwargs)\n\n def __repr__(self):\n return \"[UIImage] {}: {}\".format(self._name, self._image_fp)\n\n @property\n def image(self):\n return self._background_image\n\n @property\n def image_path(self):\n return self._image_fp\n\n def render(self, buffer):\n if self._invalidated:\n self._paint()\n Renderable.render(self, buffer)\n buffer.blit(\n self._background_image, (self._x0, self._y0, self.width, self.height)\n )\n\n\nclass Canvas(Clickable):\n \"\"\"\n A canvas is a simple renderable, which only exists for the purposes of drawing into it\n \"\"\"\n\n def __init__(self, name: str, x: int, y: int, w: int, h: int, **kwargs):\n \"\"\"Constructor for Canvas\"\"\"\n kwargs[\"show_border\"] = False\n kwargs[\"show_caption\"] = False\n super(Canvas, self).__init__(name=name, x=x, y=y, w=w, h=h, **kwargs)\n\n def render(self, buffer):\n if self.is_visible:\n return\n\n if self._invalidated:\n self._paint()\n\n buffer.blit(self._surface, (self.x, self.y))\n\n\nclass Label(Renderable):\n \"\"\"\n Label holding a single line of text\n \"\"\"\n\n def __init__(self, name: str, x: int, y: int, caption: str = None, **kwargs):\n \"\"\"Constructor for TextLabel\"\"\"\n if caption is None:\n print(\"Caption ({}) cannot be none - set to empty string\".format(name))\n caption = \"\"\n if os.linesep in caption:\n print(\n \"Label ({}) does not support multiple lines of text/ line breaks\".format(\n name\n )\n )\n\n kwargs[\"caption\"] = caption\n kwargs[\"show_caption\"] = True\n\n if \"background_colour\" not in kwargs:\n kwargs[\"background_colour\"] = C_MENUGRAY\n\n if \"fill_style\" not in kwargs:\n kwargs[\"fill_style\"] = FillStyle.Colour\n\n if \"caption_halign\" not in kwargs:\n kwargs[\"caption_halign\"] = HorizontalAlignment.Left\n\n if \"caption_valign\" not in kwargs:\n kwargs[\"caption_valign\"] = VerticalAlignment.Center\n\n if \"colour\" not in kwargs:\n kwargs[\"colour\"] = C_ELEMENT_BORDER_DARKGRAY\n\n super(Label, self).__init__(name=name, x=x, y=y, **kwargs)\n\n\nclass TextBox(Label):\n \"\"\"\n TODO: Text Box\n \"\"\"\n\n __cursor__symbol = \"|\"\n\n __BLINK_ON__ = 1\n __BLINK_OFF__ = 0\n\n def __init__(\n self, name: str, x: int, y: int, max_chars: int = 20, w: int = 200, **kwargs\n ):\n \"\"\"Constructor for TextBox\"\"\"\n kwargs[\"show_border\"] = True\n kwargs[\"background_colour\"] = C_WHITE\n self._is_clicked = False\n self._blink_state = 0\n self._change_state_millis = 200\n self._dmillis = 0\n self._maxchars = max_chars\n self._cursor_added = False\n super(TextBox, self).__init__(name=name, x=x, y=y, w=w, **kwargs)\n\n @property\n def maxchars(self):\n return self._maxchars\n\n def update(self, t):\n if self._has_focus:\n self._dmillis += t\n\n if (\n self._blink_state == TextBox.__BLINK_OFF__\n and self._dmillis >= self._change_state_millis\n ):\n self._blink_state = TextBox.__BLINK_ON__\n self._dmillis = 0\n\n if (\n self._blink_state == TextBox.__BLINK_ON__\n and self._dmillis >= self._change_state_millis\n ):\n self._blink_state = TextBox.__BLINK_OFF__\n self._dmillis = 0\n\n self.invalidate()\n else:\n self._dmillis = 0\n self._blink_state = 0\n\n def clicked(self, mx, my, button):\n clicked, sender = Clickable.clicked(self, mx, my, button)\n if clicked:\n self.has_focus = True\n else:\n self.has_focus = False\n\n return clicked, sender\n\n def add_char(self, c) -> None:\n if len(self.caption) < self._maxchars:\n self.caption = self.caption + c\n\n def remove_char(self) -> None:\n _len = len(self.caption)\n if _len > 0:\n self.caption = self.caption[0 : (_len - 1)]\n\n def _paint(self):\n # add the blinking ...\n t_caption = self.caption\n if self._blink_state == 1:\n self.caption = self.caption + TextBox.__cursor__symbol\n Label._paint(self)\n self.caption = t_caption\n\n def process_event(self, e) -> None:\n if e.key == pygame.K_BACKSPACE:\n self.remove_char()\n\n\nclass MultiLineLabel(Label):\n \"\"\"\n TODO MultiLineLabel - horizontal alignment\n TODO MultiLineLabel - vertical alignment\n TODO MultiLineLabel - new line handling - newline should be handled explicitly, when they are the single line element\n https://www.pygame.org/docs/ref/font.html\n The text can only be a single line: newline characters are not rendered.\n \"\"\"\n\n def __init__(self, name: str, x: int, y: int, caption: str, **kwargs):\n \"\"\"Constructor for MultiLineLabel\"\"\"\n w = kwargs.get(\"width\", 200)\n kwargs[\"width\"] = w\n kwargs[\"show_caption\"] = False\n\n super(MultiLineLabel, self).__init__(\n name=name, x=x, y=y, caption=caption, **kwargs\n )\n w_available = w - 2 * I_MARGIN\n\n self._lines = MultiLineLabel.split_text(\n self._font, text=caption, label_width=w_available\n )\n self._text_lines = [\"\".join(line).strip() for line in self._lines]\n\n h_i = I_MARGIN\n for l_i in self._text_lines:\n s_i = self._font.render(l_i, 0, self._font_colour)\n h_i += s_i.get_height()\n h_i += I_MARGIN\n\n self.width = w\n self.height = h_i\n self.invalidate()\n\n @staticmethod\n def split_text(font, text: str, label_width: int):\n if font is None:\n raise ValueError(\"Font is not defined\")\n if text is None:\n raise ValueError(\"Text is None\")\n if label_width is None:\n raise ValueError(\"Label width is None\")\n if label_width < 0:\n raise ValueError(\"Label width < 1\")\n\n if text == \"\":\n return text\n frag_delim = \".,!?();:\"\n s = text.strip()\n s = re.sub(\n \"([{}])\".format(frag_delim), r\" \\1 \", s\n ) # put space around the first match group\n s = re.sub(\n r\"\\s{2,}\", \" \", s\n ) # collapse 2 white space characters into a single space\n\n # now simply split on space and get as many tokens into a line as possible\n s = re.split(r\"(\\s+)\", s)\n sx = []\n for i, t in enumerate(s):\n if t == \"\":\n continue\n if t == os.linesep or t == \"\\n\":\n t = \" \"\n if i < len(s) - 1 and s[i] == \" \" and s[i + 1] in frag_delim:\n # collapse ' ', '.' into '.'\n continue\n\n sx.append(t)\n s = sx\n\n temp_width = 0\n temp_sentence = []\n out_tokens = []\n\n # far and ask whether this is in the limits.\n for i, t in enumerate(s):\n r = font.render(t, 1, (0, 0, 0))\n w = r.get_width()\n # TODO: we do not put sentence delimiters as an opening token\n # still there is a breaking issue\n # instead we pull the last token of the previous line with it\n if temp_width + w >= label_width:\n # remove ending spaces from the temps\n x_sentence = temp_sentence.copy()\n sentence_length = len(x_sentence) - 1\n if x_sentence[sentence_length] == \" \":\n x_sentence = x_sentence[0:sentence_length]\n if t in frag_delim:\n # if t is a sentence/ fragement delimiter, we do not put on the next line\n x_sentence.append(t)\n t = None\n\n temp_sentence = []\n out_tokens.append(x_sentence)\n temp_width = 0\n\n if t is None:\n continue\n temp_sentence.append(t)\n temp_width += w\n\n if len(temp_sentence) > 0:\n out_tokens.append(temp_sentence)\n return out_tokens\n\n def _destpos_from_aligment(self, text_bounds):\n dest_pos = [0, 0]\n c_halign = self._caption_halign\n # TODO: vertical alignment needs to be integrated\n # c_valign = self._caption_valign\n\n if c_halign == HorizontalAlignment.Left:\n dest_pos[0] = I_MARGIN\n elif c_halign == HorizontalAlignment.Center:\n dest_pos[0] = int(0.5 * (self._w - text_bounds[2]))\n elif c_halign == HorizontalAlignment.Right:\n dest_pos[0] = self._w - I_MARGIN - text_bounds[2]\n else:\n raise ValueError(\"unknown horizontal alignment setting {}\".format(c_halign))\n\n dest_pos[1] = I_MARGIN\n # TODO:\n # if c_valign == VerticalAlignment.Top:\n # dest_pos[1] = I_MARGIN\n # elif c_valign == VerticalAlignment.Center:\n # dest_pos[1] = int(.5 * (self._h - self._text_bounds[3]))\n # elif c_valign == VerticalAlignment.Bottom:\n # dest_pos[1] = self._h - I_MARGIN - self._text_bounds[3]\n # else:\n # raise ValueError(\"unknown vertical alignment setting {}\".format(c_valign))\n return dest_pos\n\n def _paint(self):\n if self._surface is None:\n raise ValueError(\"Cannot paint into None surface: {}\".format(self._name))\n if self._fill_style == FillStyle.Colour:\n pygame.draw.rect(\n self._surface, self._background_colour, self._client_rect, 0\n )\n elif self._fill_style == FillStyle.Image:\n if not self._background_image:\n raise ValueError(\"background fill image but image not provided\")\n\n scaled_bgimg = pygame.transform.scale(\n self._background_image, (self._w, self._h)\n )\n self._surface.blit(scaled_bgimg, dest=self._client_rect)\n else:\n pass\n\n if self._show_border:\n pygame.draw.rect(self._surface, self._colour, self._client_rect, 1)\n\n if self._show_caption and self._caption != \"\":\n # We place text in accordance with the chosen alignment.\n # This means vertically and horizontally, inside the parent's bounding box. The parent's\n # bounding box is at least so wide, so as to be able to capture the text.\n # It does not need to be recomputed every time - this can be placed after the initialization\n dst = self._destpos_from_aligment(self._client_rect)\n x_i = dst[0]\n y_i = I_MARGIN\n for s_i in self._text_lines:\n d_pos = [x_i, y_i]\n s_i = self._font.render(s_i, 0, self._font_colour)\n self._surface.blit(s_i, dest=tuple(d_pos))\n y_i += s_i.get_height()\n\n self._invalidated = False\n\n def render(self, buffer):\n if not self._show_caption and not self._show_border:\n return\n\n if self._invalidated:\n self._paint()\n\n buffer.blit(self._surface, (self.x, self.y))\n\n\nclass Button(Clickable):\n \"\"\"\n A button is an element that can be clicked. It is in either of two states, clicked or not clicked.\n When the user clicks a button an event is fired and you may react to it.\n \"\"\"\n\n def __init__(\n self,\n name,\n x: int,\n y: int,\n w: int = None,\n h: int = None,\n image_fp: str = None,\n **kwargs\n ):\n \"\"\"Constructor for Button\"\"\"\n # if we do have a caption but not an explicit show_caption, assume the default of show\n if \"caption\" in kwargs and \"show_caption\" not in kwargs:\n kwargs[\"show_caption\"] = True\n if \"background_colour\" not in kwargs:\n kwargs[\"background_colour\"] = C_BTN_FACE\n if \"fill_style\" not in kwargs:\n kwargs[\"fill_style\"] = FillStyle.Colour\n if \"colour\" not in kwargs:\n kwargs[\"colour\"] = C_BTN_BORDER\n if image_fp is not None and os.path.exists(image_fp):\n kwargs[\"background_image\"] = pygame.image.load(image_fp)\n super().__init__(name=name, x=x, y=y, w=w, h=h, **kwargs)\n\n def _paint(self):\n Clickable._paint(self)\n\n if self._is_clicked:\n pygame.draw.rect(self._surface, C_RED, self._client_rect, 2)\n\n\nclass MenuItem(Button):\n \"\"\"\"\"\"\n\n def __init__(self, name: str, caption: str, w: int = None, h: int = None, **kwargs):\n \"\"\"Constructor for MenuItem\"\"\"\n kwargs[\"caption\"] = caption\n kwargs[\"show_caption\"] = True\n kwargs[\"show_border\"] = True\n kwargs[\"fill_style\"] = FillStyle.Colour\n super().__init__(name=name, x=0, y=0, w=w, h=h, **kwargs)\n\n\nclass ClickableContainer(Clickable):\n \"\"\"\n TODO: most of the menu stuff belongs into a clickable container\n \"\"\"\n\n def __init__(\n self, name: str, x: int, y: int, w: int = None, h: int = None, **kwargs\n ):\n \"\"\"Constructor for ClickableContainer\"\"\"\n super(ClickableContainer, self).__init__(x=x, y=y, w=w, h=h)\n self._items = {}\n self._item_names = []\n self._iterm_inner_margin = kwargs.get(\"item_inner_margin\", 2)\n\n @property\n def item_names(self):\n return self._item_names\n\n def __repr__(self):\n return \"ClickableContainer: {} - {} items\".format(\n self._name, len(self._item_names)\n )\n\n @property\n def items(self):\n return self._items\n\n def __getitem__(self, item):\n if item is None:\n raise ValueError(\"getitem - key not provided\")\n\n if isinstance(item, int):\n return self._items[self._item_names[item]]\n elif isinstance(item, str):\n if item in self._items:\n return self._items[item]\n else:\n self._items.get(item)\n\n def remove_item(self, item_name: str) -> None:\n \"\"\"\n Remove an item from the items.\n :param item_name:\n :return: None\n \"\"\"\n if item_name is None:\n raise ValueError(\"Item name not provided\")\n if item_name not in self._item_names:\n raise ValueError(\"Item does not exist\")\n\n # update item names and items\n del self._items[item_name]\n del self._item_names[self._item_names.index(item_name)]\n self.invalidate()\n\n def add_item(self, item, item_name: str):\n \"\"\"\n Add an item to the existing items.\n :param item: the item to add\n :param item_name: (str) the item's name\n \"\"\"\n if item is None:\n raise ValueError(\"Item not provided\")\n if item_name is None:\n raise ValueError(\"Item name not provided\")\n\n i_margin = self._iterm_inner_margin\n\n i_x = i_margin\n if len(self._item_names) < 1: # first item\n if self._show_caption:\n i_y = i_margin + self._text_bounds[3]\n else:\n i_y = i_margin\n self.height = self.height + item.height\n else:\n last_item = self._items[self._item_names[len(self._item_names) - 1]]\n i_y = last_item.y + last_item.height + i_margin\n self.height = i_y + item.height + i_margin\n\n item.x = i_x\n item.y = i_y\n\n if item.width >= self.width:\n self.width = 2 * i_margin + item.width\n for _, mi in self._items.items():\n mi.width = item.width\n else:\n item.width = self.width - 2 * i_margin\n\n # now add and invalidate\n self._items[item_name] = item\n self._item_names.append(item_name)\n self.invalidate()\n\n\nclass Menu(Clickable):\n \"\"\"\n A game menu - Single container.\n It does not support hotkeys, nesting and more common gui functionalty.\n TODO: reuse the clicable container.\n \"\"\"\n\n MENU_ITEM_INNER_MARGIN = 3\n\n def __init__(self, name, x, y, **kwargs):\n \"\"\"Constructor for Menu\"\"\"\n # if the user did not override these settings then we add our defaults\n if \"colour\" not in kwargs:\n kwargs[\"colour\"] = C_BLACK\n if \"background_colour\" not in kwargs:\n kwargs[\"background_colour\"] = C_MENUGRAY\n if \"show_border\" not in kwargs:\n kwargs[\"show_border\"] = True\n if \"fill_style\" not in kwargs:\n kwargs[\"fill_style\"] = FillStyle.Colour\n super().__init__(\n name=name,\n x=x,\n y=y,\n w=2 * Menu.MENU_ITEM_INNER_MARGIN,\n h=2 * Menu.MENU_ITEM_INNER_MARGIN,\n **kwargs\n )\n self._items = {}\n self._item_names = []\n\n def add_item(self, mni: MenuItem):\n \"\"\"\n Add a menu item to the existing menu items.\n :param mni: the menu item to add\n \"\"\"\n if not mni:\n raise ValueError(\"Item not provided\")\n\n i_margin = Menu.MENU_ITEM_INNER_MARGIN\n\n i_x = i_margin\n if len(self._item_names) < 1: # first item\n if self._show_caption:\n i_y = i_margin + self._text_bounds[3]\n else:\n i_y = i_margin\n self.height = self.height + mni.height\n else:\n last_item = self._items[self._item_names[len(self._item_names) - 1]]\n i_y = last_item.y + last_item.height + i_margin\n self.height = i_y + mni.height + i_margin\n\n mni.x = i_x\n mni.y = i_y\n\n if mni.width >= self.width:\n self.width = 2 * i_margin + mni.width\n for _, mi in self._items.items():\n mi.width = mni.width\n else:\n mni.width = self.width - 2 * i_margin\n\n # now add and invalidate\n self._items[mni.name] = mni\n self._item_names.append(mni.name)\n self.invalidate()\n\n def __repr__(self):\n return \"Menu: {} - {} items\".format(self._name, len(self._item_names))\n\n @property\n def caption(self):\n return self._caption\n\n @property\n def item_names(self):\n return self._item_names\n\n @property\n def items(self):\n return self._items\n\n def __getitem__(self, item):\n if item is None:\n raise ValueError(\"getitem - key not provided\")\n\n if isinstance(item, int):\n return self._items[self._item_names[item]]\n else:\n if item in self._items:\n return self._items[item]\n\n raise ValueError(\"undefined item selected\")\n\n def unclick(self):\n Clickable.unclick(self)\n for _, c in self._items.items():\n c.unclick()\n\n def clicked(self, mx, my, button):\n is_clicked, sender = Clickable.clicked(self, mx, my, button)\n # now ask for each item\n # for this we need to translate the mouse coord into the local coords by subtracting the parents offset\n lx, ly = mx - self.x, my - self.y\n for _, c in self._items.items():\n is_clicked_i, sender_i = c.clicked(lx, ly, button)\n if is_clicked_i:\n is_clicked, sender = is_clicked_i, sender_i\n\n return is_clicked, sender\n\n def _paint(self):\n Clickable._paint(self)\n for _, m in self._items.items():\n m._paint()\n\n def render(self, buffer):\n if (\n not self._show_caption\n and not self._show_border\n and len(self._item_names) < 1\n ):\n return\n\n if self._invalidated:\n self._paint()\n\n Clickable.render(self, buffer)\n\n for _, m in self._items.items():\n m.render(self._surface)\n\n buffer.blit(self._surface, (self._x0, self._y0))\n\n\nclass Screen(Clickable):\n \"\"\"\n The Screen type represents game windows, screens, overlays or anything similar.\n \"\"\"\n\n def __init__(self, name: str, title: str, width: int, height: int, **kwargs):\n \"\"\"Constructor for GameScreen\"\"\"\n if \"fill_style\" not in kwargs:\n kwargs[\"fill_style\"] = FillStyle.Colour\n if \"background_colour\" not in kwargs:\n kwargs[\"background_colour\"] = C_FORMBLUE\n\n super().__init__(name, x=0, y=0, w=width, h=height, **kwargs)\n self._title = title\n self._components = {}\n\n self._initialize_components()\n\n def _initialize_components(self):\n pass\n\n def _paint(self):\n Clickable._paint(self)\n for _, ui_elem in self._components.items():\n if isinstance(ui_elem, Renderable):\n ui_elem._paint()\n\n def render(self, buffer):\n \"\"\"\n Renders the screen and all its associated components to the provided buffer\n :param buffer:\n :return:\n \"\"\"\n if self._surface is None:\n raise ValueError(\n \"Cannot render {} - surface is none - check ui element creation.\".format(\n self._name\n )\n )\n if self._invalidated:\n self._paint()\n\n def can_render(o):\n return (isinstance(o, Renderable) and o.is_visible) or hasattr(o, \"render\")\n\n def sorter(o):\n if hasattr(o, \"z_order\"):\n return o.z_order\n else:\n return 0\n\n Clickable.render(self, buffer)\n # remove non-visible items and sort by z-index - front to back rendering\n ui_elems = [i for i in self._components.values() if can_render(i)]\n # sort by zindex\n visibles = sorted(ui_elems, key=sorter, reverse=False)\n for ui_elem in visibles:\n if isinstance(ui_elem, pygame.sprite.Sprite):\n ui_elem.draw()\n self._surface.blit(ui_elem.image, (ui_elem.rect[0], ui_elem.rect[1]))\n else:\n ui_elem.render(self._surface)\n\n buffer.blit(self._surface, (self._x0, self._y0))\n\n def unclick(self):\n Clickable.unclick(self)\n for _, c in self._components.items():\n if isinstance(c, Clickable):\n c.unclick()\n\n def __setitem__(self, key, value):\n if key in self._components:\n raise ValueError(\"item {} already added.\".format(key))\n self._components[key] = value\n\n def update(self, t) -> None:\n if len(self._components) < 1:\n return\n\n for _, c in self._components.items():\n c.update(t)\n\n def add_component(self, c: UIElement) -> None:\n \"\"\"\n Add a component to the screen's components\n :param c: (UIElement) the component to add\n :return: None\n \"\"\"\n if not c:\n raise ValueError(\"component to add not provided\")\n self._components[c.name] = c\n\n def remove_component(self, c_name: str):\n if not c_name:\n raise ValueError(\"component name is missing\")\n\n del self._components[c_name]\n\n def __getitem__(self, item):\n if isinstance(item, str) and item in self._components:\n return self._components[item]\n\n if isinstance(item, int):\n if item < 0 or item >= len(self._components):\n raise ValueError(\"item index out of bounds\")\n\n for j, c in enumerate(self._components):\n if j == item:\n return c\n\n self._components.get(item)\n\n def clicked(self, mx, my, button):\n # check for self and all child elements if they are clicked the one with the smallest hitbox wins\n # if no child is clicked, see if we are clicked\n is_clicked, sender = Clickable.clicked(self, mx, my, button)\n\n for _, c in self._components.items():\n if isinstance(c, Clickable) or hasattr(c, \"clicked\"):\n is_clicked_i, sender_i = c.clicked(mx, my, button)\n if is_clicked_i:\n is_clicked, sender = is_clicked_i, sender_i\n\n return is_clicked, sender\n\n def process_event(self, e) -> None:\n # delegate an event to the control that is in focus\n # if there is no focused control, process it in the screen - swallow it\n for _, c in self._components.items():\n if c.has_focus:\n c.process_event(e)\n\n\nclass WindowManager:\n \"\"\"\n # TODO: GUI - the main gui manager\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor for GUI\"\"\"\n super(WindowManager, self).__init__()\n\n self._items = {}\n self._item_names = []\n self._transitions = {}\n self._active_screen = None\n\n @property\n def active_screen(self):\n return self._active_screen\n\n def add_screen(self, s, is_active: bool = False):\n \"\"\"\n Add a screen to the window manager's collection of screens\n :param s: (Screen) or list of (Screen) instances. If s is an individual screen the is_active flag can be\n used to set the screen to the default active screen. If multiple screens are passed as (list) object, then\n is_active is not used.\n :param is_active: (bool) sets the passed screen as the currently active screen\n :return: (None)\n \"\"\"\n if not s:\n raise ValueError(\"screen not provided\")\n\n if isinstance(s, Screen):\n self._items[s.name] = s\n self._item_names.append(s.name)\n\n if is_active:\n self._active_screen = s\n elif isinstance(s, list):\n for _screen in s:\n self._items[_screen.name] = _screen\n self._item_names.append(_screen.name)\n else:\n raise ValueError(\"Unknown type of s: {}\".format(type(s)))\n\n def remove_screen(self, s_name: str):\n if not s_name:\n raise ValueError(\"screen not provided\")\n\n # remove transitions and the actual screen ...\n to_remove = [k for k, v in self._transitions if v[0].name == s_name]\n for r in to_remove:\n del self._transitions[r]\n\n del self._items[s_name]\n self._item_names.remove(s_name)\n\n def transition(self, from_name, to_name):\n if not from_name:\n raise ValueError(\"from name missing\")\n if not to_name:\n raise ValueError(\"to name missing\")\n t_name = \"{}-{}\".format(from_name, to_name)\n from_screen, to_screen = self._transitions[t_name]\n from_screen.finalize()\n to_screen.initialize()\n self._active_screen = to_screen\n self.on_transitioned(from_screen, to_screen)\n\n # TODO: make into getter and setter\n def on_transitioned(self, from_name: str, to_name: str) -> None:\n \"\"\"\n called after a transition is made\n :param from_name: old screen\n :param to_name: target screen\n :return: None\n \"\"\"\n return None\n\n def add_transition(\n self, from_screen: Screen, to_screen: Screen, add_reverse: bool = False\n ):\n if not from_screen:\n raise ValueError(\"from screen missing\")\n if not to_screen:\n raise ValueError(\"to screen missing\")\n t_name = \"{}-{}\".format(from_screen.name, to_screen.name)\n self._transitions[t_name] = (from_screen, to_screen)\n\n if add_reverse:\n t_name = \"{}-{}\".format(to_screen.name, from_screen.name)\n self._transitions[t_name] = (to_screen, from_screen)\n\n def remove_transition(self, from_name: str, to_name: str):\n if not from_name:\n raise ValueError(\"from name missing\")\n if not to_name:\n raise ValueError(\"to name missing\")\n t_name = \"{}-{}\".format(from_name, to_name)\n del self._transitions[t_name]\n\n def __getitem__(self, item):\n if item is None:\n raise ValueError(\"getitem - key not provided\")\n\n if isinstance(item, int):\n return self._items[self._item_names[item]]\n else:\n if item in self._items:\n return self._items[item]\n\n raise ValueError(\"Undefined screen selected\")\n\n def update(self, t=None):\n self._active_screen.update(t)\n\n def process_event(self, e) -> None:\n return self.active_screen.process_event(e)\n","sub_path":"elisa/ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":43208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"542164528","text":"import xml.dom.minidom as dom\n\n\nclass Location(object):\n def __init__(self, name: str, id: int):\n self.name = name\n self.id = 'id' + str(id)\n self.invariant = ''\n self.exponentialrate = ''\n self.is_urgent = False\n self.is_committed = False\n self.comments = ''\n self.testcodeEnter = ''\n self.testcodeExit = ''\n return\n\n def set_invariant(self, invariant: str):\n self.invariant = invariant\n return\n\n def set_exponentialrate(self, rate_of_exponential: str):\n self.exponentialrate = rate_of_exponential\n return\n\n def set_urgent(self, urgent: bool = False):\n self.is_urgent = urgent\n if urgent:\n self.is_committed = False\n return\n\n def set_committed(self, commited: bool = False):\n self.is_committed = commited\n if commited:\n self.is_urgent = False\n\n def set_comments(self, comments: str):\n self.comments = comments\n return\n\n def set_test_code(self, on_enter: str = '', on_exit: str = ''):\n if on_enter:\n self.testcodeEnter = on_enter\n if on_exit:\n self.testcodeExit = on_exit\n return\n\n def writer(self, doc: dom.Document):\n # doc = dom.Document()\n node = doc.createElement('location')\n node.setAttribute('id', self.id)\n\n name = doc.createElement('name')\n name.appendChild(doc.createTextNode(self.name))\n node.appendChild(name)\n\n if self.invariant:\n invariant = doc.createElement('label')\n invariant.setAttribute('kind', 'invariant')\n invariant.appendChild(doc.createTextNode(self.invariant))\n node.appendChild(invariant)\n\n if self.exponentialrate:\n exponentialrate = doc.createElement('label')\n exponentialrate.setAttribute('kind', 'exponentialrate')\n exponentialrate.appendChild(doc.createTextNode(self.exponentialrate))\n node.appendChild(exponentialrate)\n\n if self.testcodeEnter:\n testcodeEnter = doc.createElement('label')\n testcodeEnter.setAttribute('kind', 'testcodeEnter')\n testcodeEnter.appendChild(doc.createTextNode(self.testcodeEnter))\n node.appendChild(testcodeEnter)\n\n if self.testcodeExit:\n testcodeExit = doc.createElement('label')\n testcodeExit.setAttribute('kind', 'testcodeExit')\n testcodeExit.appendChild(doc.createTextNode(self.testcodeExit))\n node.appendChild(testcodeExit)\n\n if self.comments:\n comments = doc.createElement('label')\n comments.setAttribute('kind', 'comments')\n comments.appendChild(doc.createTextNode(self.comments))\n node.appendChild(comments)\n\n if self.is_urgent:\n urgent = doc.createElement('urgent')\n node.appendChild(urgent)\n elif self.is_committed:\n committed = doc.createElement('committed')\n node.appendChild(committed)\n\n # doc.appendChild(node)\n return node\n","sub_path":"uppaal/Location.py","file_name":"Location.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"139175701","text":"import copy\nimport math\nimport numpy as np\n\nfrom . import history\n\n\nclass jsonSchema:\n \"\"\" An implementation of MolSSI's qc schema\n\n Parameters\n ----------\n JSON_dict : dict\n should match qcschema_input format. version 1\n\n \"\"\"\n\n def __init__(self, JSON_dict):\n if JSON_dict['schema_name'] == 'qcschema_input':\n self.optking_json = copy.deepcopy(JSON_dict)\n self._original = copy.deepcopy(JSON_dict)\n self.optking_json['molecule']['fix_com'] = True\n self.optking_json['molecule']['fix_orientation'] = True\n else:\n raise ValueError(\"JSON file must match the qcschema_input\")\n\n def __str__(self):\n return str(self.optking_json)\n\n def _get_original(self, geom, driver = 'gradient'):\n self._original['molecule']['geometry'] = self.to_JSON_geom(geom)\n return self._original\n\n def update_geom_and_driver(self, geom, driver='gradient'):\n \"\"\"Updates jsonSchema for requesting calculation\n\n Parameters\n ----------\n geom : list of float\n cartesian geometry 1D list\n driver : str, optional\n deafult is gradient. Other options: hessian or energy\n\n Returns\n -------\n json_for_input : dict\n \"\"\"\n self.optking_json['molecule']['geometry'] = geom\n json_for_input = copy.deepcopy(self.optking_json)\n json_for_input['driver'] = driver\n\n return json_for_input\n\n # TODO revist once options for optimizer is finalized\n def find_optking_options(self):\n \"\"\" Parse JSON dict for optking specific options\"\"\"\n\n if 'optimizer' in self.optking_json['keywords']:\n optking_options = self.optking_json['keywords']['optimizer']\n del self.optking_json['keywords']['optimizer'] # remove to preserve json file for QM\n return optking_options\n else:\n return {}\n\n # TODO turn off logging_file if using json\n # TODO error output to json_output file\n def generate_json_output(self, geom, g_x):\n \"\"\" Creates json style dictionary to summarize optimization\n\n Parameters\n ----------\n geom : ndarray\n (nat, 3) cartesian geometry\n\n Returns\n -------\n json_output : dict\n \"\"\"\n json_output = {'schema_name': 'qcschema_output'}\n json_output['provenance'] = {'creator': 'optking', 'version': '3.0?',\n 'routine': 'runoptkingjson'}\n json_output['return_result'] = {'geometry': self.to_JSON_geom(geom)}\n json_output['success'] = 'true'\n json_output['properties'] = {'return_energy': history.oHistory[-1].E,\n 'nuclear_repulsion_energy':\n history.oHistory.nuclear_repulsion_energy}\n json_output['properties']['steps'] = history.oHistory.summary()\n json_output['return_result']['gradient'] = [i for i in g_x.flat]\n return json_output\n\n @staticmethod\n def to_JSON_geom(geom):\n \"\"\" Converts optking geom to list for JSON\n\n Parameters\n ----------\n geom : ndarray\n cartesian geometry\n\n Returns\n -------\n list\n 1D geometry\n \"\"\"\n j_geom = [i for i in geom.flat]\n return j_geom\n\n @staticmethod\n def get_JSON_result(json_data, driver, wantNuc=False):\n \"\"\" Parse JSON file from QM program for result of calculation\n\n Parameters\n ----------\n json_data : dict\n driver : str\n gradient, hessian, or energy\n wantNuc : boolean, optional\n return nuclar repulsion energy as well\n\n Returns\n -------\n return_result : float or ndarray\n float if energy. ndarray if gradient or hessian\n return_energy : float\n return_nuc : float\n \"\"\"\n\n if json_data['schema_name'] == 'qcschema_output':\n if driver == 'gradient':\n return_result = np.asarray(json_data['return_result'])\n return_energy = json_data['properties']['return_energy']\n elif driver == 'hessian':\n return_result = np.asarray(json_data['return_result'])\n nat_3 = int(math.sqrt(len(return_result)))\n return_result.shape = (nat_3, nat_3)\n elif driver == 'energy':\n return_result = json_data['return_result']\n\n return_nuc = json_data['properties']['nuclear_repulsion_energy']\n if driver == 'gradient' and wantNuc:\n return return_energy, return_result, return_nuc\n elif driver == 'gradient':\n return return_energy, return_result\n elif wantNuc:\n return return_result, return_nuc\n else:\n return return_result\n\n @classmethod\n def make_qcschema(cls, geom, symbols, QM_method, basis, keywords, multiplicity=1):\n \"\"\" Creates a qcschema according to MolSSI qcschema_input version 1\n\n Parameters\n ----------\n geom : list of float\n cartesian geom (1D list)\n symbols : list of str\n atomic symbols (1D list)\n QM_method: str\n basis : str\n keywords : dict of str\n all options\n \"\"\"\n qcschema = {\"schema_name\": \"qcschema_input\",\n \"schema_version\": 1,\n \"molecule\": {\"geometry\": geom,\n \"symbols\": symbols,\n \"fix_com\": True,\n \"fix_orientation\": True,\n \"molecular_multiplicity\": multiplicity},\n \"driver\": \"\",\n \"model\": {\"method\": QM_method,\n \"basis\": basis},\n \"keywords\": keywords}\n\n return cls(qcschema)\n","sub_path":"optking/qcdbjson.py","file_name":"qcdbjson.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"181011951","text":"# -*- coding: utf-8 -*-\n\nfrom copy import deepcopy\n\n\nclass VerticeInvalidoException(Exception):\n pass\n\n\nclass ArestaInvalidaException(Exception):\n pass\n\n\nclass MatrizInvalidaException(Exception):\n pass\n\n\nclass ErroNoRetornoDeCaminhoEulerianoException(Exception):\n pass\n\n\nclass Grafo:\n QTDE_MAX_SEPARADOR = 1\n SEPARADOR_ARESTA = '-'\n __maior_vertice = 0\n\n def __init__(self, V=None, M=None):\n '''\n Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio.\n Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada.\n :param V: Uma lista dos vértices (ou nodos) do grafo.\n :param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices\n '''\n\n if V == None:\n V = list()\n if M == None:\n M = list()\n\n for v in V:\n if not (Grafo.verticeValido(v)):\n raise VerticeInvalidoException('O vértice ' + v + ' é inválido')\n if len(v) > self.__maior_vertice:\n self.__maior_vertice = len(v)\n\n self.N = list(V)\n\n if M == []:\n for k in range(len(V)):\n M.append(list())\n for l in range(len(V)):\n if k > l:\n M[k].append(self.SEPARADOR_ARESTA)\n else:\n M[k].append(0)\n\n if len(M) != len(V):\n raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto')\n\n for c in M:\n if len(c) != len(V):\n raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto')\n\n for i in range(len(V)):\n for j in range(len(V)):\n '''\n Verifica se os índices passados como parâmetro representam um elemento da matriz abaixo da diagonal principal.\n Além disso, verifica se o referido elemento é um traço \"-\". Isso indica que a matriz é não direcionada e foi construída corretamente.\n '''\n if i > j and not (M[i][j] == self.SEPARADOR_ARESTA):\n raise MatrizInvalidaException('A matriz não representa uma matriz não direcionada')\n\n aresta = V[i] + Grafo.SEPARADOR_ARESTA + V[j]\n if not (self.arestaValida(aresta)):\n raise ArestaInvalidaException('A aresta ' + aresta + ' é inválida')\n\n self.M = list(M)\n\n def arestaValida(self, aresta=''):\n '''\n Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido.\n Uma aresta é representada por um string com o formato a-b, onde:\n a é um substring de aresta que é o nome de um vértice adjacente à aresta.\n - é um caractere separador. Uma aresta só pode ter um único caractere como esse.\n b é um substring de aresta que é o nome do outro vértice adjacente à aresta.\n Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo.\n :param aresta: A aresta que se quer verificar se está no formato correto.\n :return: Um valor booleano que indica se a aresta está no formato correto.\n '''\n\n # Não pode haver mais de um caractere separador\n if aresta.count(Grafo.SEPARADOR_ARESTA) != Grafo.QTDE_MAX_SEPARADOR:\n return False\n\n # Índice do elemento separador\n i_traco = aresta.index(Grafo.SEPARADOR_ARESTA)\n\n # O caractere separador não pode ser o primeiro ou o último caractere da aresta\n if i_traco == 0 or aresta[-1] == Grafo.SEPARADOR_ARESTA:\n return False\n\n if not (self.existeVertice(aresta[:i_traco])) or not (self.existeVertice(aresta[i_traco + 1:])):\n return False\n\n return True\n\n @classmethod\n def verticeValido(self, vertice: str):\n '''\n Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido.\n Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador.\n :param vertice: Um string que representa o vértice a ser analisado.\n :return: Um valor booleano que indica se o vértice está no formato correto.\n '''\n return vertice != '' and vertice.count(Grafo.SEPARADOR_ARESTA) == 0\n\n def existeVertice(self, vertice: str):\n '''\n Verifica se um vértice passado como parâmetro pertence ao grafo.\n :param vertice: O vértice que deve ser verificado.\n :return: Um valor booleano que indica se o vértice existe no grafo.\n '''\n return Grafo.verticeValido(vertice) and self.N.count(vertice) > 0\n\n def __primeiro_vertice_aresta(self, a: str):\n '''\n Dada uma aresta no formato X-Y, retorna o vértice X\n :param a: a aresta a ser analisada\n :return: O primeiro vértice da aresta\n '''\n return a[0:a.index(Grafo.SEPARADOR_ARESTA)]\n\n def __segundo_vertice_aresta(self, a: str):\n '''\n Dada uma aresta no formato X-Y, retorna o vértice Y\n :param a: A aresta a ser analisada\n :return: O segundo vértice da aresta\n '''\n return a[a.index(Grafo.SEPARADOR_ARESTA) + 1:]\n\n def __indice_primeiro_vertice_aresta(self, a: str):\n '''\n Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do primeiro vértice da aresta na lista de vértices\n '''\n return self.N.index(self.__primeiro_vertice_aresta(a))\n\n def __indice_segundo_vertice_aresta(self, a: str):\n '''\n Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do segundo vértice da aresta na lista de vértices\n '''\n return self.N.index(self.__segundo_vertice_aresta(a))\n\n def existeAresta(self, a: str):\n '''\n Verifica se uma aresta passada como parâmetro pertence ao grafo.\n :param aresta: A aresta a ser verificada\n :return: Um valor booleano que indica se a aresta existe no grafo.\n '''\n existe = False\n if Grafo.arestaValida(self, a):\n for i in range(len(self.M)):\n for j in range(len(self.M)):\n if self.M[self.__indice_primeiro_vertice_aresta(a)][self.__indice_segundo_vertice_aresta(a)]:\n existe = True\n\n return existe\n\n def adicionaVertice(self, v):\n '''\n Inclui um vértice no grafo se ele estiver no formato correto.\n :param v: O vértice a ser incluído no grafo.\n :raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.\n '''\n if v in self.N:\n raise VerticeInvalidoException('O vértice {} já existe'.format(v))\n\n if self.verticeValido(v):\n if len(v) > self.__maior_vertice:\n self.__maior_vertice = len(v)\n\n self.N.append(v) # Adiciona vértice na lista de vértices\n self.M.append([]) # Adiciona a linha\n\n for k in range(len(self.N)):\n if k != len(self.N) - 1:\n self.M[k].append(0) # adiciona os elementos da coluna do vértice\n self.M[self.N.index(v)].append(self.SEPARADOR_ARESTA) # adiciona os elementos da linha do vértice\n else:\n self.M[self.N.index(v)].append(0) # adiciona um zero no último elemento da linha\n else:\n raise VerticeInvalidoException('O vértice ' + v + ' é inválido')\n\n def adicionaAresta(self, a):\n '''\n Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n '''\n if self.arestaValida(a):\n i_a1 = self.__indice_primeiro_vertice_aresta(a)\n i_a2 = self.__indice_segundo_vertice_aresta(a)\n if i_a1 < i_a2:\n self.M[i_a1][i_a2] += 1\n else:\n self.M[i_a2][i_a1] += 1\n else:\n raise ArestaInvalidaException('A aresta {} é inválida'.format(a))\n\n def adiciona_aresta_sem_separador(self, arestas_sem_separador):\n arestas_sem_separador = arestas_sem_separador.split()\n for vertex_counter in range(len(arestas_sem_separador)):\n if vertex_counter % 2 == 1:\n new_edge = arestas_sem_separador[vertex_counter - 1] + self.SEPARADOR_ARESTA + arestas_sem_separador[\n vertex_counter]\n self.adicionaAresta(new_edge)\n\n def remove_aresta(self, a):\n '''\n Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n '''\n if self.arestaValida(a):\n if self.existeAresta(a):\n i_a1 = self.__indice_primeiro_vertice_aresta(a)\n i_a2 = self.__indice_segundo_vertice_aresta(a)\n if i_a1 < i_a2:\n self.M[i_a1][i_a2] -= 1\n else:\n self.M[i_a2][i_a1] -= 1\n else:\n raise ArestaInvalidaException('A aresta {} é inválida'.format(a))\n\n def __str__(self):\n '''\n Fornece uma representação do tipo String do grafo.\n O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.\n :return: Uma string que representa o grafo\n '''\n\n # Dá o espaçamento correto de acordo com o tamanho do string do maior vértice\n espaco = ' ' * (self.__maior_vertice)\n\n grafo_str = espaco + ' '\n\n for v in range(len(self.N)):\n grafo_str += self.N[v]\n if v < (len(self.N) - 1): # Só coloca o espaço se não for o último vértice\n grafo_str += ' '\n\n grafo_str += '\\n'\n\n for l in range(len(self.M)):\n grafo_str += self.N[l] + ' '\n for c in range(len(self.M)):\n grafo_str += str(self.M[l][c]) + ' '\n grafo_str += '\\n'\n\n return grafo_str\n\n def vertices_nao_adjacentes(self):\n list_of_edges_not_adjacent = list()\n\n for line_counter in range(len(self.M)):\n for column_counter in range(line_counter, len(self.M)):\n connection = self.M[line_counter][column_counter]\n if connection == 0:\n edge_not_adjacent = str()\n edge_not_adjacent += (self.N[line_counter] + self.SEPARADOR_ARESTA + self.N[column_counter])\n list_of_edges_not_adjacent.append(edge_not_adjacent)\n\n return list_of_edges_not_adjacent\n\n def ha_laco(self):\n for position in range(len(self.N)):\n vertex = self.M[position][position]\n if vertex > 0:\n return True\n return False\n\n def ha_paralelas(self):\n for line_counter in range(len(self.M)):\n for column_counter in range(line_counter, len(self.M)):\n vertex = self.M[line_counter][column_counter]\n if vertex > 1:\n return True\n\n return False\n\n def grau(self, vertex):\n if vertex not in self.N:\n raise VerticeInvalidoException('O vértice ' + vertex + ' é inválido')\n\n position = self.N.index(vertex)\n\n grau = 0\n\n for line_counter in range(len(self.M)):\n if line_counter != position:\n element = self.M[line_counter][position]\n if element != self.SEPARADOR_ARESTA:\n grau += int(element)\n else:\n break\n else:\n grau += sum(self.M[line_counter][line_counter:])\n\n return grau\n\n def arestas_sobre_vertice(self, vertex):\n position_of_vertex_in_array = self.N.index(vertex)\n\n list_of_edges = set()\n\n for element_counter in range(len(self.M[position_of_vertex_in_array])):\n if self.M[position_of_vertex_in_array][element_counter] != self.SEPARADOR_ARESTA:\n if self.M[position_of_vertex_in_array][element_counter] > 0:\n other_vertex = self.N[element_counter]\n list_of_edges.add(f'{vertex}{self.SEPARADOR_ARESTA}{other_vertex}')\n\n for line_counter in range(len(self.M)):\n if self.M[line_counter][position_of_vertex_in_array] != self.SEPARADOR_ARESTA:\n if self.M[line_counter][position_of_vertex_in_array] > 0:\n first_vertex = self.N[line_counter]\n list_of_edges.add(f'{first_vertex}{self.SEPARADOR_ARESTA}{vertex}')\n else:\n break\n\n return list_of_edges\n\n def eh_completo(self):\n for line_counter in range(len(self.M)):\n for column_counter in range(line_counter + 1, len(self.M)):\n vertex = self.M[line_counter][column_counter]\n if vertex == 0:\n return False\n\n return True\n\n def há_caminho_euleriano(self):\n if self.esta_vazia_matriz_de_adjacencia(self.M):\n return -1\n qtd_impares = 0\n for vertice in self.N:\n grau_do_vertice = self.grau(vertice)\n if grau_do_vertice % 2 == 1:\n qtd_impares += 1\n if qtd_impares > 2:\n return -1\n\n if qtd_impares == 1 and len(self.N) == 1:\n return 0\n\n if qtd_impares == 1:\n return -1\n\n return qtd_impares\n\n def encontrar_dupla_de_vertices_impares(self):\n vertices_impares = list()\n for vertice in self.N:\n if self.grau(vertice) % 2 == 1:\n vertices_impares.append(vertice)\n if len(vertices_impares) == 2:\n return vertices_impares\n\n def checar_se_linha_possui_conexoes(self, linha):\n for elemento in linha:\n if elemento > 0:\n return True\n return False\n\n def esta_vazia_matriz_de_adjacencia(self, matriz_adjacencia):\n for line_counter in range(len(matriz_adjacencia)):\n linha_de_conexoes = matriz_adjacencia[line_counter][line_counter:]\n if self.checar_se_linha_possui_conexoes(linha_de_conexoes):\n return False\n return True\n\n def caminho_euleriano_entre_dois_vertices(self,\n vertice,\n lista_de_vertices,\n copia_matriz_adjacencia,\n numero_da_aresta=2,\n caminho_euleriano=None):\n if caminho_euleriano is None:\n caminho_euleriano = list()\n caminho_euleriano.append(vertice)\n caminho_euleriano.append(\"a1\")\n if self.esta_vazia_matriz_de_adjacencia(copia_matriz_adjacencia):\n caminho_euleriano = caminho_euleriano[:-1]\n if caminho_euleriano[0] == lista_de_vertices[0] and caminho_euleriano[-1] == lista_de_vertices[-1]:\n return caminho_euleriano\n elif caminho_euleriano[0] == lista_de_vertices[-1] and caminho_euleriano[-1] == lista_de_vertices[0]:\n return caminho_euleriano\n else:\n return None\n else:\n posicao_vertice_na_lista = self.N.index(vertice)\n for line_counter in range(len(copia_matriz_adjacencia)):\n if line_counter == posicao_vertice_na_lista:\n for column_counter in range(line_counter, len(copia_matriz_adjacencia)):\n if copia_matriz_adjacencia[line_counter][column_counter] > 0:\n copia_matriz_adjacencia[line_counter][column_counter] -= 1\n novo_vertice = self.N[column_counter]\n caminho_euleriano += [novo_vertice, f'a{numero_da_aresta}']\n numero_da_aresta += 1\n return self.caminho_euleriano_entre_dois_vertices(\n novo_vertice,\n lista_de_vertices,\n copia_matriz_adjacencia,\n numero_da_aresta,\n caminho_euleriano)\n else:\n if copia_matriz_adjacencia[line_counter][posicao_vertice_na_lista] != self.SEPARADOR_ARESTA:\n if copia_matriz_adjacencia[line_counter][posicao_vertice_na_lista] > 0:\n copia_matriz_adjacencia[line_counter][posicao_vertice_na_lista] -= 1\n novo_vertice = self.N[line_counter]\n caminho_euleriano += [novo_vertice, f'a{numero_da_aresta}']\n numero_da_aresta += 1\n return self.caminho_euleriano_entre_dois_vertices(\n novo_vertice,\n lista_de_vertices,\n copia_matriz_adjacencia,\n numero_da_aresta,\n caminho_euleriano)\n return None\n\n def caminho_euleriano_para_zero_impares(self):\n for i in range(len(self.N)):\n for j in range(i, len(self.N)):\n lista_de_vertices = [self.N[i], self.N[j]]\n caminho_para_zero_impares = self.caminho_euleriano_entre_dois_vertices(lista_de_vertices[0],\n lista_de_vertices,\n deepcopy(self.M))\n if caminho_para_zero_impares is None:\n caminho_para_zero_impares = self.caminho_euleriano_entre_dois_vertices(lista_de_vertices[-1],\n lista_de_vertices,\n deepcopy(self.M))\n if caminho_para_zero_impares is not None:\n return caminho_para_zero_impares\n else:\n return caminho_para_zero_impares\n\n raise ErroNoRetornoDeCaminhoEulerianoException\n\n def caminho_euleriano(self):\n qtd_impares = self.há_caminho_euleriano()\n if qtd_impares < 0:\n return []\n\n if qtd_impares == 2:\n vertices_impares = self.encontrar_dupla_de_vertices_impares()\n primeiro_vertices_impar = vertices_impares[0]\n caminho_euleriano_para_primeiro_impar = self.caminho_euleriano_entre_dois_vertices(primeiro_vertices_impar,\n vertices_impares,\n deepcopy(self.M))\n if caminho_euleriano_para_primeiro_impar is None:\n segundo_vertice_impar = vertices_impares[-1]\n caminho_euleriano_para_segundo_impar = self.caminho_euleriano_entre_dois_vertices(segundo_vertice_impar,\n vertices_impares,\n deepcopy(self.M))\n return caminho_euleriano_para_segundo_impar\n else:\n return caminho_euleriano_para_primeiro_impar\n else:\n return self.caminho_euleriano_para_zero_impares()\n\n def todos_os_vertices_foram_visitados(self, vertices_visitados):\n for vertice in self.N:\n if vertice not in vertices_visitados:\n return False\n return True\n\n def adicionar_arestas_ao_ciclo_hamiltoniano(self, ciclo):\n ciclo_hamiltoniano_com_arestas = list()\n contador = 1\n for vertice in ciclo:\n ciclo_hamiltoniano_com_arestas.append(vertice)\n ciclo_hamiltoniano_com_arestas.append(f\"a{contador}\")\n contador += 1\n ciclo_hamiltoniano_com_arestas.pop()\n return ciclo_hamiltoniano_com_arestas\n\n def buscar_ciclo_hamiltoniano(self,\n vertice,\n copia_matriz_adjacencia,\n ciclo_hamiltoniano=None):\n if ciclo_hamiltoniano is None:\n ciclo_hamiltoniano = [vertice]\n\n if ciclo_hamiltoniano[0] == ciclo_hamiltoniano[-1] and len(ciclo_hamiltoniano) > 1:\n if self.todos_os_vertices_foram_visitados(ciclo_hamiltoniano):\n return ciclo_hamiltoniano\n return None\n else:\n posicao_na_lista_de_vertices = self.N.index(vertice)\n for line_counter in range(len(self.N)):\n if line_counter == posicao_na_lista_de_vertices:\n for column_counter in range(line_counter, len(self.N)):\n conexao = copia_matriz_adjacencia[line_counter][column_counter]\n if conexao > 0:\n novo_vertice = self.N[column_counter]\n if novo_vertice not in ciclo_hamiltoniano or novo_vertice == ciclo_hamiltoniano[0]:\n copia_matriz_adjacencia[line_counter][column_counter] -= 1\n ciclo_hamiltoniano.append(novo_vertice)\n retorno_da_busca_pelo_ciclo = self.buscar_ciclo_hamiltoniano(novo_vertice,\n copia_matriz_adjacencia,\n ciclo_hamiltoniano)\n if retorno_da_busca_pelo_ciclo is None:\n copia_matriz_adjacencia[line_counter][column_counter] += 1\n ciclo_hamiltoniano.pop()\n else:\n return retorno_da_busca_pelo_ciclo\n else:\n conexao = copia_matriz_adjacencia[line_counter][posicao_na_lista_de_vertices]\n if conexao != self.SEPARADOR_ARESTA and conexao > 0:\n novo_vertice = self.N[line_counter]\n if novo_vertice not in ciclo_hamiltoniano or novo_vertice == ciclo_hamiltoniano[0]:\n copia_matriz_adjacencia[line_counter][posicao_na_lista_de_vertices] -= 1\n ciclo_hamiltoniano.append(novo_vertice)\n retorno_da_busca_pelo_ciclo = self.buscar_ciclo_hamiltoniano(novo_vertice,\n copia_matriz_adjacencia,\n ciclo_hamiltoniano)\n if retorno_da_busca_pelo_ciclo is None:\n copia_matriz_adjacencia[line_counter][posicao_na_lista_de_vertices] += 1\n ciclo_hamiltoniano.pop()\n else:\n return retorno_da_busca_pelo_ciclo\n return None\n\n def ciclo_hamiltoniano(self):\n for vertice in self.N:\n ciclo_hamiltoniano = self.buscar_ciclo_hamiltoniano(vertice, deepcopy(self.M))\n if ciclo_hamiltoniano is not None:\n return self.adicionar_arestas_ao_ciclo_hamiltoniano(ciclo_hamiltoniano)\n return []\n","sub_path":"roteiro5/grafo_adj_nao_dir.py","file_name":"grafo_adj_nao_dir.py","file_ext":"py","file_size_in_byte":24274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"376033584","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n if root is None:\n return []\n \n lst = []\n h = 0\n def trav(root, y):\n nonlocal h\n if root is None:\n return\n trav(root.right, y+1)\n lst.append([y, root.val])\n trav(root.left, y+1)\n h = max(h, y)\n \n trav(root, 0)\n # print(lst)\n sol = [None]*(h+1)\n for l in lst:\n if not sol[l[0]]:\n sol[l[0]] = l[1]\n return sol\n","sub_path":"199. Binary Tree Right Side View/199.py","file_name":"199.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"25880678","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext import tasks\r\nimport os\r\nimport sys\r\nimport datetime\r\nimport glob\r\nimport re\r\nimport asyncio\r\nimport logging\r\nimport sqlite3\r\nimport aiosqlite\r\nfrom dotenv import load_dotenv\r\n\r\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\r\n\r\nfrom bot.config import *\r\nimport bot.glicko\r\nfrom bot.sql_queries import *\r\n\r\n\r\ndef days_hours_minutes(td):\r\n return td.days, td.seconds // 3600, (td.seconds // 60) % 60\r\n\r\n\r\nasync def check_admin(guild_id):\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n async with db.execute(\"select bot_admin_id from guilds where guild_id = ?\", (guild_id,)) as c:\r\n admin_id = await c.fetchone()\r\n if admin_id is None:\r\n return None\r\n return admin_id[0]\r\n\r\n\r\nif __name__ == '__main__':\r\n logger = logging.getLogger('discord')\r\n logger.setLevel(logging.DEBUG)\r\n handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\r\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\r\n logger.addHandler(handler)\r\n\r\n for path in path_list:\r\n if not os.path.isfile(path):\r\n if path == guild_settings_path:\r\n loop = asyncio.new_event_loop()\r\n asyncio.get_event_loop().run_until_complete(initialize_guild_settings())\r\n continue\r\n db_conn = sqlite3.connect(path)\r\n db_conn.close()\r\n\r\n prefix = '.'\r\n client = commands.Bot(command_prefix=prefix)\r\n client.remove_command('help')\r\n start_time = datetime.datetime.now().astimezone()\r\n\r\n load_dotenv(env_path)\r\n token = os.getenv(\"TEST_BOT\")\r\n\r\n\r\n @client.command(\"ping\", aliases=['p'])\r\n async def ping(ctx):\r\n await ctx.send(f'Bot Response Time: {round(client.latency * 1000)}ms ', delete_after=delete_delay)\r\n\r\n @client.command('reset_settings')\r\n @commands.has_permissions(administrator=True)\r\n async def reset(ctx):\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n await ctx.send(\"Are you sure you want to delete the settings for this guild? (y/n)\", delete_after=response_time)\r\n try:\r\n yn = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n if 'y' == yn.content:\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n await db.execute(\"delete from guilds where guild_id = ?\", (ctx.guild.id,))\r\n await db.commit()\r\n await ctx.send(\"Bot settings for this guild have been wiped!\", delete_after=delete_delay)\r\n await setup(ctx)\r\n else:\r\n await ctx.send(\"\\'y\\' was not detected, no settings have been changed\")\r\n return\r\n\r\n\r\n @client.command(\"setup\")\r\n @commands.has_permissions(administrator=True)\r\n async def setup(ctx):\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if await check_admin(ctx.guild.id) is not None:\r\n await ctx.send(\"Settings have already been set up for this server\")\r\n return\r\n\r\n await ctx.send(\"Do you already have two channels to designate as the bot-commands channel and leaderboard \"\r\n \"channel? (y/n)\", delete_after=delete_delay)\r\n try:\r\n yn = await client.wait_for(\"message\", check=check, timeout=180)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n if 'y' == yn.content:\r\n await ctx.send(\"please supply the channel id of the bot channel\", delete_after=delete_delay)\r\n try:\r\n res = await client.wait_for(\"message\", check=check, timeout=180)\r\n try:\r\n await client.fetch_channel(int(res.content))\r\n bot_channel_id = res.content\r\n except Exception:\r\n await ctx.send(\"You supplied an invalid response\", delete_after=delete_delay)\r\n return\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n await ctx.send(\"please supply the channel id of the leaderboard channel\", delete_after=delete_delay)\r\n try:\r\n res = await client.wait_for(\"message\", check=check, timeout=180)\r\n try:\r\n await client.fetch_channel(int(res.content))\r\n leaderboard_channel_id = int(res.content)\r\n except Exception:\r\n await ctx.send(\"You supplied an invalid response\", delete_after=delete_delay)\r\n return\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n elif 'n' == yn.content:\r\n await ctx.send(\"Input hyphenated bot channel and leaderboard channel names separated by a space, in that \"\r\n \"order. Example: ratings-bot-channel leaderboard-channel:\", delete_after=delete_delay)\r\n try:\r\n channel_names = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n try:\r\n ch_names = channel_names.content.split(' ')\r\n if len(ch_names) > 2:\r\n raise Exception(\"Too many arguments provided\")\r\n bot_channel_name = ch_names[0]\r\n leaderboard_channel_name = ch_names[1]\r\n\r\n bot_channel = await ctx.message.guild.create_text_channel(str(bot_channel_name))\r\n leaderboard_channel = await ctx.message.guild.create_text_channel(leaderboard_channel_name)\r\n bot_channel_id = bot_channel.id\r\n leaderboard_channel_id = leaderboard_channel.id\r\n except Exception as e:\r\n await ctx.send(f\"You did not follow the provided format! {e}\", delete_after=delete_delay)\r\n return\r\n else:\r\n await ctx.send(\"You supplied an invalid response\", delete_after=delete_delay)\r\n return\r\n\r\n await ctx.send(\"Do you already have a role to set as a Bot Administrator (y/n)?\", delete_after=delete_delay)\r\n try:\r\n ynr = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n if 'y' == ynr.content:\r\n await ctx.send(\"Input role ID to serve as Bot Administrator\", delete_after=delete_delay)\r\n try:\r\n try:\r\n r_id = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n role = discord.utils.get(ctx.guild.roles, id=int(r_id.content))\r\n bot_admin_id = int(r_id.content)\r\n except Exception:\r\n await ctx.send(\"An error has occurred\", delete_after=delete_delay)\r\n return\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n elif 'n' == ynr.content:\r\n role = await ctx.message.guild.create_role(name=\"Bot Admin\")\r\n bot_admin_id = role.id\r\n user = ctx.message.author\r\n await user.add_roles(role)\r\n await ctx.send(\"The \\\"Bot Admin\\\" Role has been created, feel free to give the role to other members but\"\r\n \" please do not delete it as the id is hardcoded in the database.\",\r\n delete_after=delete_delay)\r\n else:\r\n await ctx.send(\"You supplied an invalid response\", delete_after=delete_delay)\r\n return\r\n\r\n await ctx.send(\"Input starting elo and starting sigma and global ratings floor, separated by spaces \"\r\n \"Example: 1200 150 900:\", delete_after=delete_delay)\r\n\r\n try:\r\n ratings_set = await client.wait_for(\"message\", check=check, timeout=180)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n try:\r\n ratings_settings = ratings_set.content.split(' ')\r\n if len(ratings_settings) > 3:\r\n raise Exception(\"Too many arguments provided\")\r\n starting_elo = ratings_settings[0]\r\n starting_sigma = ratings_settings[1]\r\n global_elo_floor = ratings_settings[2]\r\n except Exception as e:\r\n await ctx.send(f\"You did not follow the provided format! {e}\", delete_after=delete_delay)\r\n return\r\n\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n params = (ctx.guild.id, bot_channel_id, leaderboard_channel_id, bot_admin_id, starting_elo, starting_sigma,\r\n global_elo_floor,)\r\n await db.execute(\"insert into guilds values (?, ?, ?, ?, ?, ?, ?)\", params)\r\n await db.commit()\r\n\r\n init_tables = createGuildSettings(guild_id=ctx.guild.id)\r\n await init_tables.initialize_tables()\r\n\r\n await ctx.send('''Settings have been initialized for this server, GLHF!\\n\r\n A few things to keep in mind as you get started using the bot and its features:\\n\r\n 1) The bot only accepts commands invoked in the bot channel (to mitigate bot spam issues)\\n\r\n 2) If you end up needing to reuse this command (in case of accidental channel/role deletion),\r\n invoke the .reset_settings command which is restricted to users with adminstrator permissions\r\n (and can also be read from any channel).\\n\r\n 3) The leaderboard will automatically update itself upon entry of a command, if it is deleted \r\n it will automatically recreate itself''',\r\n delete_after=120)\r\n\r\n\r\n @client.command(\"block\", aliases=['bl', 'ban'])\r\n async def block_id(ctx, id_to_block=None):\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if id_to_block is None:\r\n await ctx.send(\"What user do you want to prohibit? Format: @username or id\", delete_after=delete_delay)\r\n try:\r\n blockid = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n id_to_block = blockid.content\r\n\r\n try:\r\n banned_id = int(id_to_block)\r\n except ValueError:\r\n try:\r\n banned_id = int(id_to_block[3:-1])\r\n except ValueError:\r\n await ctx.send(\"Invalid input detected!\", delete_after=delete_delay)\r\n return\r\n try:\r\n user = await client.fetch_user(banned_id)\r\n except Exception:\r\n await ctx.send(\"You provided an invalid ID!\", delete_after=delete_delay)\r\n return\r\n name = user.name\r\n\r\n async with aiosqlite.connect(blocked_ids_path) as db:\r\n await db.execute(f\"insert or ignore into bans_{ctx.guild.id} (discord_id, discord_name) values (?, ?)\",\r\n (banned_id, name,))\r\n await db.commit()\r\n await ctx.send(f'<@{banned_id}> You have been banned from using {client.user.name}', delete_after=delete_delay)\r\n\r\n # completed\r\n @client.command(\"unblock\", aliases=['un', 'unban'])\r\n async def unblock_id(ctx, id_to_unblock=None):\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if id_to_unblock is None:\r\n await ctx.send(\"What user do you want to allow commands from? Format: @username\", delete_after=delete_delay)\r\n\r\n try:\r\n unblockid = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n id_to_unblock = unblockid.content\r\n\r\n try:\r\n unbanned_id = int(id_to_unblock)\r\n except ValueError:\r\n try:\r\n unbanned_id = int(id_to_unblock[3:-1])\r\n except ValueError:\r\n await ctx.send(\"Invalid input detected!\", delete_after=delete_delay)\r\n return\r\n\r\n try:\r\n user = await client.fetch_user(unbanned_id)\r\n except Exception:\r\n await ctx.send(\"You provided an invalid ID!\", delete_after=delete_delay)\r\n return\r\n\r\n async with aiosqlite.connect(blocked_ids_path) as db:\r\n await db.execute(\"delete from bans_\" + f'{ctx.guild.id}' + \" where discord_id = ?\", (unbanned_id,))\r\n await db.commit()\r\n await ctx.send(f\"<@{unbanned_id}> You have been unbanned from using {client.user.name}\",\r\n delete_after=delete_delay)\r\n\r\n # completed\r\n @tasks.loop(hours=12)\r\n async def backup(ctx):\r\n todays_date = datetime.date.today().strftime(\"%m-%d-%Y\")\r\n backup_file = f'./data/ratings_backup_{todays_date}.db'\r\n source = sqlite3.connect(ratings_path)\r\n destination = sqlite3.connect(backup_file)\r\n source.backup(destination)\r\n source.close()\r\n\r\n backup_file = f'./data/ratings_master_backup_{todays_date}.db'\r\n source = sqlite3.connect(ratings_master_path)\r\n destination = sqlite3.connect(backup_file)\r\n source.backup(destination)\r\n source.close()\r\n\r\n backup_file = f'./data/matches_backup_{todays_date}.db'\r\n source = sqlite3.connect(matches_path)\r\n destination = sqlite3.connect(backup_file)\r\n source.backup(destination)\r\n source.close()\r\n\r\n backup_file = f'./data/guild_settings_backup_{todays_date}.db'\r\n source = sqlite3.connect(guild_settings_path)\r\n destination = sqlite3.connect(backup_file)\r\n source.backup(destination)\r\n source.close()\r\n\r\n backup_file = f'./data/banned_ids_backup_{todays_date}.db'\r\n source = sqlite3.connect(blocked_ids_path)\r\n destination = sqlite3.connect(backup_file)\r\n source.backup(destination)\r\n source.close()\r\n\r\n # prunes old backups\r\n backup_files = glob.glob('./data/*_backup_*.db')\r\n for file in backup_files:\r\n re_match = re.search(r'\\d{2}-\\d{2}-\\d{4}', file)\r\n date = datetime.datetime.strptime(re_match.group(), '%m-%d-%Y').date()\r\n date_cutoff = datetime.date.today() - datetime.timedelta(days=days_of_backup_file_storage)\r\n if date_cutoff > date:\r\n pruned_backup = str('./') + str(file)\r\n os.remove(pruned_backup)\r\n\r\n await ctx.send(f\"ELO backed up for today {todays_date}\", delete_after=delete_delay)\r\n\r\n # completed\r\n @client.command(\"restoreratings\", aliases=['rr', 'restore'])\r\n @commands.is_owner()\r\n async def restore(ctx):\r\n backup_files = glob.glob('./data/ratings_backup_*.db')\r\n if len(backup_files) == 0:\r\n await ctx.send(\"No backups to restore from\", delete_after=delete_delay)\r\n return\r\n\r\n await ctx.send(f\"Input date to restore ratings from (format mo-dy-year).\\n\"\r\n f\"Available dates to restore from: {backup_files}\", delete_after=delete_delay)\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n try:\r\n date_to_restore = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n try:\r\n backup_file = f'./data/ratings_backup_{date_to_restore.content}.db'\r\n source = sqlite3.connect(backup_file)\r\n destination = sqlite3.connect(ratings_path)\r\n source.backup(destination)\r\n source.close()\r\n except Exception as e:\r\n await ctx.send(f\"Could not find backup for the date specified, error: {e}\", delete_after=delete_delay)\r\n return\r\n\r\n await ctx.send(f'ELO data restored from backup made on {date_to_restore.content}', delete_after=delete_delay)\r\n\r\n # completed\r\n @client.command(\"listmembers\", aliases=['lm'])\r\n async def list_members(ctx):\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n members = ''\r\n i = 0\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select clan_id, player_name from players_{ctx.guild.id}\") as cursor:\r\n async for row in cursor:\r\n members += f'{row[0]}{row[1]}\\n'\r\n i += 1\r\n await ctx.send(f\"There are {i} ranked members:\\n{members}\", delete_after=delete_delay)\r\n\r\n # completed\r\n @client.command(\"match\", aliases=['m'])\r\n async def match(ctx, personone: str = None, persononewins: int = None, persontwo: str = None,\r\n persontwowins: int = None):\r\n emoji_object = rankEmoji(client=client)\r\n rank_emojis = emoji_object.rank_emoji()\r\n guild_id = ctx.guild.id\r\n\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n async with db.execute(\"select global_elo_floor, bot_admin_id from guilds where guild_id = ?\",\r\n (guild_id,)) as cur:\r\n result = await cur.fetchone()\r\n global_elo_floor = result[0]\r\n bot_admin_id = result[1]\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if personone is None or persononewins is None or persontwo is None or persontwowins is None:\r\n await ctx.send(f\"Input new match to influence rating, format is Name1 (wins) Name2 (wins), example: \"\r\n f\"AlphaBeta 5 OmegaLambda 5. Global Elo floor is {global_elo_floor}. \"\r\n f\"There is no Elo ceiling.\", delete_after=delete_delay)\r\n\r\n try:\r\n match_msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n match_array = match_msg.content.split()\r\n personone = match_array[0]\r\n persononewins = int(match_array[1])\r\n persontwo = match_array[2]\r\n persontwowins = int(match_array[3])\r\n\r\n if persononewins > maxwinsallowed or persontwowins > maxwinsallowed:\r\n await ctx.send(f'You have exceeded the number of wins allowed in a single session ({maxwinsallowed})',\r\n delete_after=delete_delay)\r\n return\r\n\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select * from players_{guild_id} where player_name like ?\",\r\n ('%' + personone + '%',)) as cur:\r\n p1_info = await cur.fetchall()\r\n\r\n name_found_bool = False\r\n if p1_info is None or len(p1_info) == 0:\r\n await ctx.send(f'<@{ctx.author.id}> {personone} was not detected in the database!',\r\n delete_after=delete_delay)\r\n return\r\n elif len(p1_info) == 1:\r\n p1_info = p1_info[0]\r\n else:\r\n result = ''\r\n for row in p1_info:\r\n result += row[3] + row[4] + '\\n'\r\n await ctx.send(f'Multiple names found:\\n{result}Specify which player you want to show:',\r\n delete_after=response_time)\r\n try:\r\n name = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n search = name.content\r\n for row in p1_info:\r\n if search == str(row[4]):\r\n p1_info = row\r\n name_found_bool = True\r\n break\r\n # This means we failed to narrow down the search\r\n if not name_found_bool:\r\n await ctx.send(\"Name was not found!\", delete_after=delete_delay)\r\n return\r\n\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select * from players_{guild_id} where player_name like ?\",\r\n ('%' + persontwo + '%',)) as cur:\r\n p2_info = await cur.fetchall()\r\n\r\n name_found_bool = False\r\n if p2_info == p1_info:\r\n await ctx.send(\"You cannot input a match against yourself\", delete_after=delete_delay)\r\n return\r\n\r\n if p2_info is None or len(p2_info) == 0:\r\n await ctx.send(f'<@{ctx.author.id}> {persontwo} was not detected in the database!',\r\n delete_after=delete_delay)\r\n return\r\n elif len(p2_info) == 1:\r\n p2_info = p2_info[0]\r\n pass\r\n else:\r\n result = ''\r\n for row in p2_info:\r\n result += row[3] + row[4] + '\\n'\r\n\r\n await ctx.send(f'Multiple names found:\\n{result}Specify which player you want to show:',\r\n delete_after=response_time)\r\n try:\r\n name = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n search = name.content\r\n for row in p2_info:\r\n if search == str(row[4]):\r\n p2_info = row\r\n name_found_bool = True\r\n break\r\n # This means we failed to narrow down the search\r\n if not name_found_bool:\r\n await ctx.send(\"Name was not found!\", delete_after=delete_delay)\r\n return\r\n\r\n personone = p1_info[4]\r\n persontwo = p2_info[4]\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n cur = await db.execute(f\"select rating, sigma from ratings_{guild_id} where discord_id = ?\", (p1_info[0],))\r\n p1_r_s_tuple = await cur.fetchone()\r\n p1_old_rating = p1_r_s_tuple[0]\r\n p1_old_sigma = p1_r_s_tuple[1]\r\n\r\n cur2 = await db.execute(f\"select rating, sigma from ratings_{guild_id} where discord_id = ?\", (p2_info[0],))\r\n p2_r_s_tuple = await cur2.fetchone()\r\n p2_old_rating = p2_r_s_tuple[0]\r\n p2_old_sigma = p2_r_s_tuple[1]\r\n\r\n playeroneobject = bot.glicko.Player(rating=p1_old_rating, rd=p1_old_sigma)\r\n playertwoobject = bot.glicko.Player(rating=p2_old_rating, rd=p2_old_sigma)\r\n\r\n for i in range(persononewins):\r\n playeroneobject.update_player([p2_old_rating], [p2_old_sigma], [1])\r\n playertwoobject.update_player([p1_old_rating], [p1_old_sigma], [0])\r\n for i in range(persontwowins):\r\n playertwoobject.update_player([p2_old_rating], [p2_old_sigma], [1])\r\n playeroneobject.update_player([p1_old_rating], [p1_old_sigma], [0])\r\n\r\n p1_new_rating = round(playeroneobject.get_rating(), 2)\r\n if p1_new_rating < global_elo_floor:\r\n p1_new_rating = global_elo_floor\r\n p1_new_sigma = round(playeroneobject.get_rd(), 2)\r\n\r\n p2_new_rating = round(playertwoobject.get_rating(), 2)\r\n if p2_new_rating < global_elo_floor:\r\n p2_new_rating = global_elo_floor\r\n p2_new_sigma = round(playertwoobject.get_rd(), 2)\r\n\r\n author = str(ctx.author)\r\n author_id = ctx.author.id\r\n\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if ctx.author.id == p1_info[0]:\r\n await ctx.send(f'<@{p2_info[0]}> A match involving you was just added. The match results have been sent '\r\n f'to you for your perusal', delete_after=delete_delay)\r\n user = await client.fetch_user(p2_info[0])\r\n await user.send(f'<@{p2_info[0]}>, A match was added with a score of {persononewins} wins for '\r\n f'{personone} and {persontwowins} wins for {persontwo}, if this result is incorrect, please'\r\n f' contact the Bot Administrators.\\nOld Elo: {p2_old_rating} Old Sigma: {p2_old_sigma}.'\r\n f'\\nNew Elo: {p2_new_rating} New Sigma: {p2_new_sigma}.')\r\n\r\n elif ctx.author.id == p2_info[0]:\r\n await ctx.send(f'<@{p1_info[0]}> A match involving you was just added. The match results have been sent '\r\n f'to you for your perusal', delete_after=delete_delay)\r\n user = await client.fetch_user(p1_info[0])\r\n await user.send(f'<@{p1_info[0]}>, A match was added with a score of {persononewins} wins for '\r\n f'{personone} and {persontwowins} wins for {persontwo}, if this result is incorrect, please'\r\n f' contact the Bot Administrators.\\nOld Elo: {p2_old_rating} Old Sigma: {p2_old_sigma}.'\r\n f'\\nNew Elo: {p2_new_rating} New Sigma: {p2_new_sigma}.')\r\n\r\n else:\r\n if role in ctx.author.roles:\r\n await ctx.send(f'<@{p1_info[0]}> <@{p2_info[0]}> A match involving you was just added. The match '\r\n f'results have been sent to you for your perusal', delete_after=delete_delay)\r\n else:\r\n await ctx.send(f\"<@{ctx.author.id}>You cannot input a match concerning two other people!\",\r\n delete_after=delete_delay)\r\n return\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n await db.execute(f\"update ratings_{guild_id} set rating = ?, sigma = ?, wins = wins + ?, losses = \"\r\n \"losses + ? where discord_id = ?;\",\r\n (p1_new_rating, p1_new_sigma, persononewins, persontwowins, p1_info[0],))\r\n await db.commit()\r\n await db.execute(f\"update ratings_{guild_id} set rating = ?, sigma = ?, wins = wins + ?, losses = \"\r\n \"losses + ? where discord_id = ?;\",\r\n (p2_new_rating, p2_new_sigma, persontwowins, persononewins, p2_info[0],))\r\n await db.commit()\r\n\r\n async with aiosqlite.connect(matches_path) as db:\r\n await db.execute(f\"insert into matches_{guild_id} (player_a, player_a_score, player_a_old_elo, \"\r\n f\"player_a_old_sigma, player_a_new_elo, player_a_new_sigma, player_b, player_b_score, \"\r\n f\"player_b_old_elo, player_b_old_sigma, player_b_new_elo, player_b_new_sigma, \"\r\n f\"inputted_user_name, inputted_user_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\r\n (personone, persononewins, p1_old_rating, p1_old_sigma, p1_new_rating, p1_new_sigma,\r\n persontwo, persontwowins, p2_old_rating, p2_old_sigma, p2_new_rating, p2_new_sigma,\r\n author, author_id,))\r\n await db.commit()\r\n\r\n rankp1 = None\r\n emoji1 = None\r\n rankp2 = None\r\n emoji2 = None\r\n for k in ranks:\r\n if p1_new_rating >= ranks[k]:\r\n rankp1 = k\r\n emoji1 = rank_emojis[k]\r\n if p2_new_rating >= ranks[k]:\r\n rankp2 = k\r\n emoji2 = rank_emojis[k]\r\n\r\n if p1_new_rating < p1_old_rating:\r\n color1 = f'```diff\\n- {p1_new_rating} {rankp1}\\nnew Sigma {p1_new_sigma} \\n```'\r\n else:\r\n color1 = f'```diff\\n+ {p1_new_rating} {rankp1}\\nnew Sigma {p1_new_sigma} \\n```'\r\n\r\n if p2_new_rating < p2_old_rating:\r\n color2 = f'```diff\\n- {p2_new_rating} {rankp2}\\nnew Sigma {p2_new_sigma} \\n```'\r\n else:\r\n color2 = f'```diff\\n+ {p2_new_rating} {rankp2}\\nnew Sigma {p2_new_sigma} \\n```'\r\n\r\n await ctx.send(f\"Updated Ratings (automatically saved to file):\\n\\n{personone}\\'s Rank: {emoji1} {color1}\\n\"\r\n f\"{persontwo}\\'s Rank: {emoji2} {color2}\", delete_after=delete_delay)\r\n\r\n # completed\r\n @client.command(\"filecontents\", aliases=['fc'])\r\n async def file_contents(ctx):\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n guild_id = ctx.guild.id\r\n user = await client.fetch_user(ctx.author.id)\r\n\r\n i = 1\r\n ratings_embed = discord.Embed(title=\"Contents of \\'ratings.db\\'\")\r\n async with aiosqlite.connect(ratings_path) as db:\r\n async with db.execute(f\"select * from ratings_{guild_id}\") as cur:\r\n async for row in cur:\r\n ratings_embed.add_field(name=str(i), value=str(row), inline=False)\r\n i += 1\r\n await user.send(embed=ratings_embed, delete_after=delete_delay)\r\n\r\n i = 1\r\n ratings_master_embed = discord.Embed(title=\"Contents of \\'ratings_master.db\\'\")\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select * from players_{guild_id}\") as cur:\r\n async for row in cur:\r\n ratings_master_embed.add_field(name=str(i), value=str(row), inline=False)\r\n i += 1\r\n await user.send(embed=ratings_master_embed, delete_after=delete_delay)\r\n\r\n i = 1\r\n matches_embed = discord.Embed(title=\"Contents of \\'matches.db\\'\")\r\n async with aiosqlite.connect(matches_path) as db:\r\n async with db.execute(f\"select * from matches_{guild_id}\") as cur:\r\n async for row in cur:\r\n matches_embed.add_field(name=str(i), value=str(row), inline=False)\r\n i += 1\r\n await user.send(embed=matches_embed, delete_after=delete_delay)\r\n\r\n i = 1\r\n bans_embed = discord.Embed(title=\"Contents of \\'banned_ids.db\\'\")\r\n async with aiosqlite.connect(blocked_ids_path) as db:\r\n async with db.execute(f\"select * from bans_{guild_id}\") as cur:\r\n async for row in cur:\r\n bans_embed.add_field(name=str(i), value=str(row), inline=False)\r\n i += 1\r\n await user.send(embed=bans_embed, delete_after=delete_delay)\r\n\r\n i = 1\r\n guilds_embed = discord.Embed(title=\"Contents of \\'guild_settings.db\\'\")\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n async with db.execute(\"select * from guilds where guild_id = ?\", (guild_id,)) as cur:\r\n async for row in cur:\r\n guilds_embed.add_field(name=str(i), value=str(row), inline=False)\r\n i += 1\r\n await user.send(embed=guilds_embed, delete_after=delete_delay)\r\n\r\n await ctx.send(\"Input the file and line that you want to delete:\\nFormat: file_name line-#)\",\r\n delete_after=delete_delay)\r\n\r\n try:\r\n msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n if msg.content == 'stop':\r\n return\r\n if str(msg.content).startswith('.'):\r\n return\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n split_msg = msg.content.split(' ')\r\n file_name = split_msg[0]\r\n line_no = split_msg[1]\r\n\r\n if 'matches' in file_name:\r\n async with aiosqlite.connect(matches_path) as db:\r\n await db.execute(f\"delete from matches_{guild_id} where rowid = ?;\", (line_no,))\r\n await db.commit()\r\n elif 'ratings_master' in file_name:\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n await db.execute(f\"delete from players_{guild_id} where rowid = ?;\", (line_no,))\r\n await db.commit()\r\n elif 'ratings' in file_name:\r\n async with aiosqlite.connect(ratings_path) as db:\r\n await db.execute(f\"delete from ratings_{guild_id} where rowid = ?;\", (line_no,))\r\n await db.commit()\r\n elif 'banned' in file_name:\r\n async with aiosqlite.connect(blocked_ids_path) as db:\r\n await db.execute(f\"delete from players_{guild_id} where rowid = ?;\", (line_no,))\r\n await db.commit()\r\n elif 'settings' in file_name:\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n await db.execute(\"delete from guilds where rowid = ?;\", (line_no,))\r\n await db.commit()\r\n\r\n await ctx.send(f\"Line {line_no} in File {file_name} has been removed\", delete_after=delete_delay)\r\n\r\n # completed\r\n @client.command(\"editelo\", aliases=['ee'])\r\n async def edit_elo(ctx, name: str = None, newelo: str = None):\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if name is None or newelo is None:\r\n await ctx.send(\"Who\\'s elo do you want to change? format: user newelo\", delete_after=delete_delay)\r\n\r\n try:\r\n msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n name = msg.content.split()[0]\r\n newelo = msg.content.split()[1]\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n await db.execute(f\"update ratings_{ctx.guild.id} set rating = ? where player_name = ?;\", (newelo, name,))\r\n await db.commit()\r\n await ctx.send(f\"{name}\\'s elo has been changed to {newelo}\")\r\n\r\n # completed\r\n @client.command(\"editsigma\", aliases=['es'])\r\n async def edit_sigma(ctx, name: str = None, newsigma: str = None):\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if name is None or newsigma is None:\r\n await ctx.send(\"Who\\'s sigma do you want to change? format: user newsigma\", delete_after=delete_delay)\r\n\r\n try:\r\n msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n name = msg.content.split()[0]\r\n newsigma = msg.content.split()[1]\r\n\r\n # No ID checks needed because this is an admin only command\r\n async with aiosqlite.connect(ratings_path) as db:\r\n await db.execute(f\"update ratings_{ctx.guild.id} set sigma = ? where player_name = ?;\", (newsigma, name,))\r\n await db.commit()\r\n await ctx.send(f'{name}\\'s sigma has been changed to {newsigma}')\r\n\r\n # completed and rewritten in async format\r\n @client.command('editname', aliases=['en'])\r\n async def edit_member(ctx, name: str = None):\r\n guild_id = ctx.guild.id\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if name is None:\r\n await ctx.send(\"Input the name to be edited (Note that you may only change the name that is associated with\"\r\n \" your unique Discord tag)\", delete_after=delete_delay)\r\n\r\n try:\r\n old_name = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n name = old_name.content\r\n\r\n try:\r\n await ctx.send(\"Enter new name:\", delete_after=delete_delay)\r\n new_name = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n author_id = ctx.author.id\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select discord_id from players_{guild_id} where player_name = ?\",\r\n (new_name.content,)) as cur:\r\n search = await cur.fetchone()\r\n if search is not None:\r\n await ctx.send(\"Name already exists!\", delete_after=delete_delay)\r\n return\r\n\r\n async with db.execute(f\"select discord_id from players_{guild_id} where player_name = ?\",\r\n (name,)) as cur:\r\n search = await cur.fetchone()\r\n if search is None:\r\n await ctx.send(f\"Name {name} does not exist!\", delete_after=delete_delay)\r\n return\r\n # if bot admin\r\n if role in ctx.author.roles:\r\n author_id = search[0]\r\n elif author_id != search[0]:\r\n await ctx.send(\"Only Bot admins may change other member's names!\", delete_after=delete_delay)\r\n return\r\n\r\n # a non bot admin can only ever change their own name\r\n await db.execute(f\"update players_{guild_id} set player_name = ? where discord_id = ?;\",\r\n (new_name.content, author_id,))\r\n await db.commit()\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n await db.execute(f\"update ratings_{guild_id} set player_name = ? where discord_id = ?;\",\r\n (new_name.content, author_id,))\r\n await db.commit()\r\n await ctx.send(f\"<@{author_id}> Your name was changed from {name} to {new_name.content}\")\r\n\r\n # completed and rewritten in async format\r\n @client.command('stats', aliases=['s'])\r\n async def member_stats(ctx, name: str = None):\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if name is None:\r\n await ctx.send(f\"Who\\'s stats do you want to check?\", delete_after=delete_delay)\r\n try:\r\n msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n name = msg.content\r\n\r\n guild_id = ctx.guild.id\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select discord_id, clan_id, player_name from players_{guild_id} where \"\r\n f\"player_name like ?\", ('%' + name + '%',)) as cur:\r\n search_results = await cur.fetchall()\r\n\r\n discord_id = None\r\n if len(search_results) == 0:\r\n await ctx.send(f'<@{ctx.author.id}> {name} has not been added to the database!\\n',\r\n delete_after=delete_delay)\r\n return\r\n elif len(search_results) > 1:\r\n result = ''\r\n for row in search_results:\r\n result += row[1] + row[2] + '\\n'\r\n await ctx.send(f'Multiple names found:\\n{result}Specify which player you want to show:',\r\n delete_after=response_time)\r\n try:\r\n name = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n name = name.content\r\n for row in search_results:\r\n if name == row[2]:\r\n discord_id = int(row[0])\r\n if discord_id is None:\r\n await ctx.send(\"Name was not found!\", delete_after=delete_delay)\r\n return\r\n else:\r\n name = search_results[0][2]\r\n discord_id = search_results[0][0]\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n async with db.execute(\"select clan_id, player_name, rating, sigma, wins, losses, last_updated from ratings_\"\r\n f\"{guild_id} where player_name = ? and discord_id = ?\", (name, discord_id,)) as cur:\r\n r = await cur.fetchone()\r\n tslm = datetime.datetime.utcnow() - datetime.datetime.fromisoformat(r[6])\r\n t = days_hours_minutes(tslm)\r\n await ctx.send(f\"Stats for {r[0]}{r[1]}:\\nRating: {r[2]}\\nSigma: {r[3]}\\nWins: {r[4]}\\nLosses: {r[5]}\\n\"\r\n f\"Time since last match: {t[0]} days, {t[1]} hours and {t[2]} minutes\",\r\n delete_after=delete_delay)\r\n\r\n # completed and rewritten in async format\r\n @client.command(\"add\", aliases=['a'])\r\n async def add_member(ctx, name: str = None, d_id=None):\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n guild_id = ctx.guild.id\r\n\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n cur = await db.execute(\"select bot_admin_id, starting_elo, starting_sigma from guilds where guild_id = ?\",\r\n (guild_id,))\r\n starting_settings = await cur.fetchone()\r\n bot_admin_id = starting_settings[0]\r\n starting_elo = starting_settings[1]\r\n starting_sigma = starting_settings[2]\r\n role = ctx.guild.get_role(bot_admin_id)\r\n\r\n if name is None:\r\n await ctx.send(f\"Input new member for competitive ranking, Format is name, i.e. Example.\\n\"\r\n f\"Warning, your unique Discord tag will be associated with your inputted name, so do not \"\r\n f\"input anybody except yourself. If you make a typo, please use the .editname command.\\n\"\r\n f\"Elo starts at {starting_elo} and Sigma starts at {starting_sigma} for all players.\\n\"\r\n f\"Bot Admins are allowed to enter in members other than themselves by specifying name\"\r\n f\"then discord_id i.e. Example 112233445566778899\", delete_after=delete_delay)\r\n try:\r\n msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n name = msg.content\r\n\r\n try:\r\n split = name.split('>')\r\n clan = split[0] + '>'\r\n name = split[1]\r\n except IndexError:\r\n clan = '<>'\r\n\r\n author_id = ctx.author.id\r\n # if admin, allow the ability to add others, else, just sets id to author\r\n if d_id is not None:\r\n if role in ctx.author.roles:\r\n try:\r\n author_id = int(d_id)\r\n except Exception as e:\r\n try:\r\n author_id = d_id[3:-1]\r\n except Exception as er:\r\n await ctx.send(\"ID is invalid type!\", delete_after=delete_delay)\r\n return\r\n\r\n user = await client.fetch_user(author_id)\r\n\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n async with db.execute(f\"select 1 from players_{guild_id} where discord_id = ? limit 1\", (author_id,)) as cur:\r\n if await cur.fetchone() is None:\r\n await cur.execute(f\"insert into players_{guild_id} (discord_id, discord_name, clan_id, player_name)\"\r\n f\" values (?, ?, ?, ?)\", (author_id, str(user), clan, name,))\r\n await db.commit()\r\n else:\r\n await ctx.send(f'<@{ctx.author.id}> This Discord ID was already detected in the database! If you '\r\n f'need to edit your name, use the .editname command', delete_after=delete_delay)\r\n return\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n await db.execute(f\"insert into ratings_{guild_id} (discord_id, discord_name, clan_id, player_name, rating, \"\r\n f\"sigma, wins, losses) values (?, ?, ?, ?, ?, ?, ?, ?)\",\r\n (author_id, str(user), clan, name, starting_elo, starting_sigma, 0, 0))\r\n await db.commit()\r\n await ctx.send(f'<@{author_id}> was successfully registered in the database, GLHF!', delete_after=delete_delay)\r\n\r\n # completed and rewritten in async format\r\n @client.command('delete', aliases=['d'])\r\n async def delete_member(ctx, name: str = None):\r\n guild_id = ctx.guild.id\r\n bot_admin_id = await check_admin(ctx.guild.id)\r\n role = ctx.guild.get_role(bot_admin_id)\r\n if role not in ctx.author.roles:\r\n await ctx.send(\"You do not have the proper permissions to use this command\", delete_after=delete_delay)\r\n return\r\n\r\n def check(message):\r\n return message.author == ctx.author and message.channel == ctx.channel\r\n\r\n if name is None:\r\n await ctx.send(\"Who do you want to remove from the database?\", delete_after=delete_delay)\r\n try:\r\n msg = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n name = msg.content\r\n\r\n try:\r\n split = name.split('>')\r\n name = split[1]\r\n except IndexError:\r\n pass\r\n\r\n async with aiosqlite.connect(ratings_master_path) as db:\r\n cur = await db.execute(f\"select player_name from players_{guild_id} where player_name like ?\",\r\n ('%' + name + '%',))\r\n name = await cur.fetchone()\r\n await ctx.send(f\"Are you sure you want to delete {name[0]}? (y/n)\")\r\n try:\r\n yn = await client.wait_for(\"message\", check=check, timeout=response_time)\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Sorry, you didn't reply in time!\", delete_after=delete_delay)\r\n return\r\n\r\n if 'y' == yn.content:\r\n await cur.execute(f\"delete from players_{guild_id} where player_name = ?\", (name[0],))\r\n await db.commit()\r\n async with aiosqlite.connect(ratings_path) as dbr:\r\n await dbr.execute(f\"delete from ratings_{guild_id} where player_name = ?;\", (name[0],))\r\n await dbr.commit()\r\n await ctx.send(f'{name[0]} was removed from the database!', delete_after=delete_delay)\r\n else:\r\n await ctx.send(\"Command aborted, no deletions have occurred.\", delete_after=delete_delay)\r\n return\r\n\r\n # completed and rewritten in async format\r\n async def leaderboard(message):\r\n try:\r\n emoji_object = rankEmoji(client=client)\r\n rank_emojis = emoji_object.rank_emoji()\r\n\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n cur = await db.execute(\"select leaderboard_channel_id from guilds where guild_id = ?\",\r\n (message.guild.id,))\r\n leaderboard_channel = await cur.fetchone()\r\n channel = discord.utils.get(message.guild.text_channels, id=leaderboard_channel[0])\r\n msgs = await channel.history(limit=100).flatten()\r\n\r\n ratings = []\r\n new_embed = discord.Embed(title=f\"Current Top {leaderboard_members} leaderboard by Elo rating:\")\r\n\r\n async with aiosqlite.connect(ratings_path) as db:\r\n async with db.execute(f\"select * from ratings_{message.guild.id}\") as cs:\r\n async for row in cs:\r\n ratings.append(row)\r\n ratings.sort(key=lambda x: x[5], reverse=True)\r\n\r\n for index, row in enumerate(ratings):\r\n if index == (leaderboard_members - 1):\r\n break\r\n\r\n clan_id = row[3]\r\n player_name = row[4]\r\n rating = row[5]\r\n sigma = row[6]\r\n wins = row[7]\r\n losses = row[8]\r\n\r\n if clan_id == '<>':\r\n name_string = f'{index+1}) {player_name}'\r\n else:\r\n name_string = f'{index+1}) {clan_id}{player_name}'\r\n\r\n try:\r\n winrate_pct = round(((wins / (wins + losses)) * 100), 2)\r\n winrate = str(winrate_pct) + '%'\r\n except ZeroDivisionError:\r\n winrate = 'N/A'\r\n\r\n value_string = f'\\nRating: {rating} Sigma: {sigma} \\nWins: {wins} Losses: {losses} Winrate: {winrate}'\r\n rank = None\r\n em = None\r\n for k in ranks:\r\n if rating >= ranks[k]:\r\n rank = k\r\n em = rank_emojis[k]\r\n else:\r\n break\r\n new_embed.add_field(name=name_string, value=f'Rank: {rank} {em}{value_string}',\r\n inline=False)\r\n\r\n if len(msgs) == 0:\r\n await channel.send(embed=new_embed)\r\n else:\r\n for message in msgs:\r\n if message.author.name == client.user.name:\r\n message = await channel.fetch_message(message.id)\r\n await message.edit(embed=new_embed)\r\n except Exception as e:\r\n await message.channel.send(\"An error occurred with the leaderboard function\", delete_after=delete_delay)\r\n pass\r\n\r\n\r\n @client.command('uptime', aliases=['up'])\r\n async def uptime(ctx):\r\n now = datetime.datetime.now().astimezone()\r\n delta = now - start_time\r\n delta_d_h_s = days_hours_minutes(delta)\r\n if delta_d_h_s[0]:\r\n time_format = f\"**{delta_d_h_s[0]}** days, **{delta_d_h_s[1]}** hours and **{delta_d_h_s[2]}** minutes.\"\r\n else:\r\n time_format = f\"**{delta_d_h_s[1]}** hours and **{delta_d_h_s[2]}** minutes.\"\r\n await ctx.send(f\"{client.user.name} has been up for {time_format}\", delete_after=10)\r\n\r\n\r\n @client.command('help', aliases=['h'])\r\n async def help_command(ctx):\r\n embed = discord.Embed(colour=discord.Colour.blue())\r\n embed.set_author(name='Help:')\r\n embed.add_field(name='Developers:', value='Bobo#6885 <@813330702712045590>, Sean#4318 <@202947434236739584>',\r\n inline=False)\r\n file = discord.File(\"./data/ArcturusMengsk_SC2_Cine1.jpg\")\r\n embed.set_thumbnail(url='attachment://ArcturusMengsk_SC2_Cine1.jpg')\r\n embed.add_field(name='.add or .a', value=\"Add yourself to be ranked\", inline=True)\r\n embed.add_field(name='.match or .m', value=\"Add a new match to influence rating\", inline=True)\r\n embed.add_field(name='.stats or .s', value=\"Check the ratings of a member\", inline=True)\r\n embed.add_field(name='.editname or .en', value=\"Edit your name (for typos and clan changes)\", inline=True)\r\n embed.add_field(name='.help or .h', value='Shows this', inline=True)\r\n embed.add_field(name='.ping or .p', value='Bot latency (in ms)', inline=True)\r\n embed.add_field(name='.uptime or .up', value='Display how long bot has been running for', inline=True)\r\n embed.add_field(name='.block or .bl', value=\"*Requires \\'Bot Admin\\' role* Blocks a user from inputting \"\r\n \"commands\", inline=True)\r\n embed.add_field(name='.unblock or .u', value=\"*Requires \\'Bot Admin\\' role* Un-blocks a user from inputting \"\r\n \"commands\", inline=True)\r\n embed.add_field(name='.admin_add or .aa', value='*Requires \\'Bot Admin\\' role* Used for adding a person other '\r\n 'than yourself', inline=True)\r\n embed.add_field(name='.delete or .d', value='*Requires \\'Bot Admin\\' role* Removes an erroneously entered name',\r\n inline=True)\r\n embed.add_field(name='.editelo or .ee', value='*Requires \\'Bot Admin\\' role* Edits the Elo of a player',\r\n inline=True)\r\n embed.add_field(name='.editsigma or .es', value='*Requires \\'Bot Admin\\' role* Edits the Sigma of a player',\r\n inline=True)\r\n embed.add_field(name='.backupratings or .br', value=\"*Requires \\'Bot Admin\\' role* Backup Current Elo ratings\",\r\n inline=True)\r\n embed.add_field(name='.restoreratings or .rr', value='*Requires \\'Bot Admin\\' role* Restore Elo from backup '\r\n 'date', inline=True)\r\n embed.add_field(name='.listmembers or .lm', value='*Requires \\'Bot Admin\\' role* List all members being ranked',\r\n inline=True)\r\n embed.add_field(name='.filecontents or .fc', value='*Requires \\'Bot Admin\\' role* Displays contents of Elo '\r\n 'files', inline=True)\r\n embed.set_footer(icon_url=ctx.author.avatar_url,\r\n text='Requested by {} on {}'.format(ctx.author, datetime.date.today().strftime(\"%m-%d-%Y\")))\r\n await ctx.author.send(file=file, embed=embed)\r\n\r\n\r\n @client.event\r\n async def on_message(message):\r\n if message.author.name == client.user.name:\r\n return\r\n\r\n if str(message.content) == '.reset_settings' or str(message.content) == '.setup':\r\n await client.process_commands(message)\r\n return\r\n\r\n try:\r\n async with aiosqlite.connect(guild_settings_path) as db:\r\n cs = await db.execute(\"select bot_channel_id from guilds where guild_id = ?\", (message.guild.id,))\r\n bot_channel_id = await cs.fetchone()\r\n if bot_channel_id is None:\r\n pass\r\n if bot_channel_id[0] != message.channel.id:\r\n return\r\n except Exception:\r\n pass\r\n\r\n try:\r\n async with aiosqlite.connect(blocked_ids_path) as db:\r\n async with db.execute(f\"select discord_id from bans_{message.guild.id}\") as bans:\r\n if bans is None:\r\n return\r\n async for row in bans:\r\n if message.author.id == row[0]:\r\n await message.delete()\r\n return\r\n except Exception:\r\n pass\r\n\r\n if message.content.startswith(prefix):\r\n await client.process_commands(message)\r\n await leaderboard(message)\r\n return\r\n\r\n\r\n @client.command(\"shutdown\")\r\n @commands.is_owner()\r\n async def shutdown(ctx):\r\n await client.change_presence(status=discord.Status.offline)\r\n await client.close()\r\n\r\n\r\n @client.event\r\n async def on_ready():\r\n print('Bot is active')\r\n await client.change_presence(status=discord.Status.online, activity=discord.Activity(\r\n type=3, name=f\"{len(client.guilds)} servers. Type .help to get started\"))\r\n\r\n\r\n @client.event\r\n async def on_command_error(ctx, error):\r\n logger.debug(f\"An error {error} occurred in {ctx.guild} invoked by {ctx.author} \"\r\n f\"who inputted \\\"{ctx.message.content}\\\"\")\r\n logger.exception(error)\r\n await ctx.send(f\"An error has occurred {error}. Try .help. If you believe this to be a bug, \"\r\n f\"contact the bot developers\", delete_after=15)\r\n\r\n # possible future implementations of more client events\r\n # @client.event\r\n # async def on_guild_channel_delete(channel):\r\n # print(channel.id)\r\n\r\n # @client.event\r\n # async def on_guild_channel_update(before, after):\r\n # print(after.id)\r\n # pseudo code\r\n # if after.id == bot_channel_id\r\n # channel.set_permissions(use_external_emojis true\r\n\r\n # This could potentially be needed to resolve conflicts in the\r\n # databases\r\n # @client.event\r\n # async def on_user_update(before, after):\r\n # pseudo code\r\n # if after.name != username in ratings_master.db:\r\n # username == after.name where user.discord_id in players\r\n\r\n client.run(token)\r\n","sub_path":"Personal Projects/Competitive Elo Bot/bot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":60281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"103612978","text":"import create_graphs\nimport get_data\nfrom utils import *\nimport networkx as nx\nimport numpy as np\n\nfrom args import Args\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import average_precision_score\nfrom random import shuffle\nimport pickle\nimport os\nimport random\n\n\nif __name__ == '__main__':\n args = Args()\n if not os.path.isdir(args.graph_save_path):\n os.makedirs(args.graph_save_path)\n\n graphs = create_graphs.create(args)\n\n random.seed(123)\n shuffle(graphs)\n graphs_len = len(graphs)\n graphs_test = graphs[int(0.8 * graphs_len):]\n graphs_train = graphs[0:int(0.8*graphs_len)]\n graphs_validate = graphs[0:int(0.2*graphs_len)]\n\n graph_validate_len = 0\n for graph in graphs_validate:\n graph_validate_len += graph.number_of_nodes()\n graph_validate_len /= len(graphs_validate)\n print('graph_validate_len', graph_validate_len)\n\n graph_test_len = 0\n for graph in graphs_test:\n graph_test_len += graph.number_of_nodes()\n graph_test_len /= len(graphs_test)\n print('graph_test_len', graph_test_len)\n\n args.max_num_node = max([graphs[i].number_of_nodes() for i in range(len(graphs))])\n max_num_edge = max([graphs[i].number_of_edges() for i in range(len(graphs))])\n min_num_edge = min([graphs[i].number_of_edges() for i in range(len(graphs))])\n\n print('total graph num: {}, training set: {}'.format(len(graphs),len(graphs_train)))\n print('max number node: {}'.format(args.max_num_node))\n print('max/min number edge: {}; {}'.format(max_num_edge,min_num_edge))\n print('max previous node: {}'.format(args.max_prev_node))\n\n save_graph_list(graphs, args.graph_save_path + args.fname_train + '0.dat')\n save_graph_list(graphs, args.graph_save_path + args.fname_test + '0.dat')\n print('train and test graphs saved at: ', args.graph_save_path + args.fname_test + '0.dat')\n\n # Test loading the graphs:\n test_file_name = args.graph_save_path + args.fname_test + '0.dat'\n g_list = load_graph_list(test_file_name, is_real=True)\n print ('G-List', g_list)\n print ('Glist 0: ', g_list[0])\n\n dataset = get_data.Dataset(test_file_name, is_real=True, batch_size=10)\n output = dataset.next()\n print (output['adj_ph'][0])\n","sub_path":"research/graph_gen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"549872813","text":"import players\nfrom players.predict import predict\nimport rps\nfrom random import randrange\n\n\nclass Player(players.RPSPlayer):\n class_name = \"AntiSimplePredictorPlayer\"\n counter = 0\n \n def __init__(self,*args,**kwargs):\n players.RPSPlayer.__init__(self,*args,**kwargs)\n self.backlog = list()\n\n def play(self,backlog):\n if len(backlog):\n self.backlog = self.backlog[-len(backlog):]\n prediction = predict(self.backlog)\n\n if prediction is None:\n ret = players.moves[randrange(0,len(players.moves))]\n else:\n ret = rps._beats[prediction]\n else:\n ret = players.moves[randrange(0,len(players.moves))]\n self.backlog.append(ret)\n return ret\n \n @classmethod\n def get_name(cls):\n cls.counter += 1\n return '{:}{:d}'.format(cls.class_name,cls.counter)\n\n","sub_path":"src/players/predict/anti_simple.py","file_name":"anti_simple.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"480504905","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\n\nsvm_train_data = np.loadtxt('./hodge_svm_train_LC_avg')\nsvm_test_data = np.loadtxt('./hodge_svm_test_LC_avg')\n\nnn_reg_train_data = np.loadtxt('./hodge_nn_reg_train_LC_avg')\nnn_reg_test_data = np.loadtxt('./hodge_nn_reg_test_LC_avg')\n\nnn_class_train_data = np.loadtxt('./hodge_nn_class_train_LC_avg')\nnn_class_test_data = np.loadtxt('./hodge_nn_class_test_LC_avg')\n\nx = 1 - svm_train_data[:,0]\nprint(nn_class_test_data)\n\nmarker_size = 4\ncap_size = 3.5\n\nplt.errorbar(x, svm_test_data[:,1], yerr=svm_test_data[:,4], fmt='o', markersize=marker_size, capsize=cap_size,label='SVM Regressor Validation Accuracy',color = 'purple')\nplt.plot(x, svm_test_data[:,1],color='purple')\n\nplt.errorbar(x, nn_reg_test_data[:,1], yerr=nn_reg_test_data[:,4], fmt='o', markersize=marker_size, capsize=cap_size,label='Neural Net Regressor Validation Accuracy',color = 'green')\nplt.plot(x, nn_reg_test_data[:,1],color='green')\n\nplt.errorbar(x, nn_class_test_data[:,1], yerr=nn_class_test_data[:,2], fmt='o', markersize=marker_size, capsize=cap_size,label='Neural Net Classifier Validation Accuracy',color = 'orange')\nplt.plot(x, nn_class_test_data[:,1],color='orange')\n\nplt.xlabel('Fraction of data used for training')\nplt.ylabel('Accuracy')\nplt.title('Hodge Number - Validation Learning Curves')\nplt.legend()\nplt.savefig('Hodge_test_LC.pdf')\nplt.show()\n\n# plt.errorbar(x, svm_train_data[:,1], yerr=svm_train_data[:,4], fmt='o', markersize=marker_size, capsize=cap_size,label='SVM Classifier Training Accuracy',color = 'purple')\n# plt.plot(x, svm_train_data[:,1],color='purple')\n\n# plt.errorbar(x, nn_reg_train_data[:,1], yerr=nn_reg_train_data[:,4], fmt='o', markersize=marker_size, capsize=cap_size,label='Neural Net Regressor, Validation Accuracy',color = 'green')\n# plt.plot(x, nn_reg_train_data[:,1],color='green')\n\n# plt.errorbar(x, nn_class_train_data[:,1], yerr=nn_class_train_data[:,2], fmt='o', markersize=marker_size, capsize=cap_size,label='Neural Net Classifier, Training Accuracy',color = 'orange')\n# plt.plot(x, nn_class_train_data[:,1],color='orange')\n\n# plt.xlabel('Fraction of data used for training')\n# plt.ylabel('Accuracy')\n# plt.title('Hodge Number - Training Learning Curves')\n# plt.legend()\n# # plt.savefig('Hodge_train_LC.pdf')\n# plt.show()\n","sub_path":"Hodge/LC/plot_LC.py","file_name":"plot_LC.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"212826727","text":"#!/usr/bin/env python3\n\nimport utils, open_color, arcade\n\nutils.check_version((3,7))\n\n# Open the window. Set the window title and dimensions (width and height)\narcade.open_window(800, 600, \"Smiley Face Example\")\narcade.set_background_color(open_color.white)\n\n# Start the render process. This must be done before any drawing commands.\narcade.start_render()\n\n#ANSWER TO THE QUESTION HERE:\n#The outer loop changes the x coordinate of the origin, shown in face_x. It increments by 150 starting at 100, going to 800\n#The inner loop has the same steps, but stops at 600 instead of 800\n#Changing the first or second numbers determines the center coordinates of the circles. If the range is extended without\n#changing the increment, more or less faces will be drawn.\n#Changing the last number determines how often a face is drawn and how close they are together\n#start at 100, go to 799, counting by 150\nfor x in range(100,800,150):\n #start at 100, go to 599, counting by 150\n for y in range(100,600,150):\n face_x,face_y = (x,y)\n smile_x,smile_y = (face_x + 0,face_y - 0)\n eye1_x,eye1_y = (face_x - 30,face_y + 50) \n eye2_x,eye2_y = (face_x + 30,face_y + 50)\n catch1_x,catch1_y = (face_x - 25,face_y + 63) \n catch2_x,catch2_y = (face_x + 35,face_y + 63) \n\n\n # Draw the smiley face:\n # (x,y,radius,color)\n arcade.draw_circle_filled(face_x, face_y, 100, open_color.yellow_3)\n # (x,y,radius,color,border_thickness)\n arcade.draw_circle_outline(face_x, face_y, 100, open_color.black,4)\n\n #(x,y,width,height,color)\n #Again, changed size of eyes to match example\n arcade.draw_ellipse_filled(eye1_x,eye1_y,30,50,open_color.black)\n arcade.draw_ellipse_filled(eye2_x,eye2_y,30,50,open_color.black)\n arcade.draw_circle_filled(catch1_x,catch1_y,3,open_color.gray_2)\n arcade.draw_circle_filled(catch2_x,catch2_y,3,open_color.gray_2)\n\n #(x,y,width,height,color,start_degrees,end_degrees,border_thickness)\n arcade.draw_arc_outline(smile_x,smile_y,60,50,open_color.black,190,350,4)\n\n\n# Finish the render\n# Nothing will be drawn without this.\n# Must happen after all draw commands\narcade.finish_render()\n# Keep the window up until someone closes it.\narcade.run()\n","sub_path":"main4.py","file_name":"main4.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"294642478","text":"\"\"\"\nUnit test for EC2 subnet.\n\"\"\"\n\nimport unittest\nimport mock\n\nfrom treadmill.infra.subnet import Subnet\n\n\nclass SubnetTest(unittest.TestCase):\n\n @mock.patch('treadmill.infra.connection.Connection')\n def test_init(self, ConnectionMock):\n conn_mock = ConnectionMock()\n Subnet.ec2_conn = Subnet.route53_conn = conn_mock\n\n subnet = Subnet(\n id=1,\n vpc_id='vpc-id',\n metadata={\n 'Tags': [{\n 'Key': 'Name',\n 'Value': 'goo'\n }]\n }\n )\n\n self.assertEquals(subnet.vpc_id, 'vpc-id')\n self.assertEquals(subnet.name, 'goo')\n self.assertEquals(subnet.ec2_conn, conn_mock)\n\n @mock.patch('treadmill.infra.connection.Connection')\n def test_create_tags(self, ConnectionMock):\n conn_mock = ConnectionMock()\n conn_mock.create_tags = mock.Mock()\n\n Subnet.ec2_conn = Subnet.route53_conn = conn_mock\n subnet = Subnet(\n name='foo',\n id='1',\n vpc_id='vpc-id'\n )\n subnet.create_tags()\n\n conn_mock.create_tags.assert_called_once_with(\n Resources=['1'],\n Tags=[{\n 'Key': 'Name',\n 'Value': 'foo'\n }]\n )\n\n @mock.patch('treadmill.infra.connection.Connection')\n def test_create(self, ConnectionMock):\n ConnectionMock.context.region_name = 'us-east-1'\n conn_mock = ConnectionMock()\n subnet_json_mock = {\n 'SubnetId': '1'\n }\n\n conn_mock.create_subnet = mock.Mock(return_value={\n 'Subnet': subnet_json_mock\n })\n conn_mock.create_route_table = mock.Mock(return_value={\n 'RouteTable': {'RouteTableId': 'route-table-id'}\n })\n\n Subnet.ec2_conn = Subnet.route53_conn = conn_mock\n _subnet = Subnet.create(\n cidr_block='172.23.0.0/24',\n vpc_id='vpc-id',\n name='foo',\n gateway_id='gateway-id'\n )\n self.assertEqual(_subnet.id, '1')\n self.assertEqual(_subnet.name, 'foo')\n self.assertEqual(_subnet.metadata, subnet_json_mock)\n conn_mock.create_subnet.assert_called_once_with(\n VpcId='vpc-id',\n CidrBlock='172.23.0.0/24',\n AvailabilityZone='us-east-1a'\n )\n conn_mock.create_tags.assert_called_once_with(\n Resources=['1'],\n Tags=[{\n 'Key': 'Name',\n 'Value': 'foo'\n }]\n )\n conn_mock.create_route_table.assert_called_once_with(\n VpcId='vpc-id'\n )\n conn_mock.create_route.assert_called_once_with(\n RouteTableId='route-table-id',\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId='gateway-id'\n )\n conn_mock.associate_route_table.assert_called_once_with(\n RouteTableId='route-table-id',\n SubnetId='1',\n )\n\n @mock.patch('treadmill.infra.connection.Connection')\n def test_refresh(self, ConnectionMock):\n conn_mock = ConnectionMock()\n subnet_json_mock = {\n 'VpcId': 'vpc-id',\n 'Foo': 'bar'\n }\n conn_mock.describe_subnets = mock.Mock(return_value={\n 'Subnets': [subnet_json_mock]\n })\n\n Subnet.ec2_conn = Subnet.route53_conn = conn_mock\n _subnet = Subnet(id='subnet-id', vpc_id=None, metadata=None)\n _subnet.refresh()\n\n self.assertEqual(_subnet.vpc_id, 'vpc-id')\n self.assertEqual(_subnet.metadata, subnet_json_mock)\n\n @mock.patch.object(Subnet, 'refresh')\n @mock.patch.object(Subnet, 'get_instances')\n @mock.patch('treadmill.infra.connection.Connection')\n def test_show(self, ConnectionMock, get_instances_mock, refresh_mock):\n conn_mock = ConnectionMock()\n Subnet.ec2_conn = Subnet.route53_conn = conn_mock\n\n _subnet = Subnet(id='subnet-id',\n vpc_id='vpc-id',\n metadata=None)\n _subnet.instances = None\n\n result = _subnet.show()\n\n self.assertEqual(\n result,\n {\n 'VpcId': 'vpc-id',\n 'SubnetId': 'subnet-id',\n 'Instances': None\n }\n )\n\n get_instances_mock.assert_called_once_with(refresh=True, role=None)\n refresh_mock.assert_called_once()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/infra/test_subnet.py","file_name":"test_subnet.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"110841269","text":"from pyqtgraph.flowchart import Flowchart, Node\nimport pyqtgraph.flowchart.library as fclib\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nimport numpy as np\nimport wiimote\nimport sys\nfrom scipy import fft\nfrom sklearn import svm\n\n\nclass WiimoteNode(Node):\n \"\"\"\n Outputs sensor data from a Wiimote.\n\n Supported sensors: accelerometer (3 axis)\n Text input box allows for setting a Bluetooth MAC address.\n Pressing the \"connect\" button tries connecting to the Wiimote.\n Update rate can be changed via a spinbox widget. Setting it to \"0\"\n activates callbacks every time a new sensor value arrives (which is\n quite often -> performance hit)\n \"\"\"\n\n nodeName = \"Wiimote\"\n\n def __init__(self, name):\n terminals = {\n 'accelX': dict(io='out'),\n 'accelY': dict(io='out'),\n 'accelZ': dict(io='out'),\n }\n self.wiimote = None\n self._acc_vals = []\n\n # update timer\n self.update_timer = QtCore.QTimer()\n self.update_timer.timeout.connect(self.update_all_sensors)\n\n # super()\n Node.__init__(self, name, terminals=terminals)\n\n def update_all_sensors(self):\n if self.wiimote is None:\n return\n self._acc_vals = self.wiimote.accelerometer\n # todo: other sensors...\n self.update()\n\n def update_accel(self, acc_vals):\n self._acc_vals = acc_vals\n self.update()\n\n # def ctrlWidget(self):\n # return self.ui\n\n def connect_wiimote(self, btaddr, model=None):\n # self.btaddr = str(self.text.text()).strip()\n if self.wiimote is not None:\n self.wiimote.disconnect()\n self.wiimote = None\n # self.connect_button.setText(\"connect\")\n return\n if len(btaddr) == 17:\n # self.connect_button.setText(\"connecting...\")\n if model:\n self.wiimote = wiimote.connect(btaddr, model)\n else:\n self.wiimote = wiimote.connect(btaddr)\n if self.wiimote is None:\n self.connect_button.setText(\"try again\")\n else:\n # self.connect_button.setText(\"disconnect\")\n # self.set_update_rate(self.update_rate_input.value())\n\n # setting rate of samples\n self.set_update_rate(60)\n\n def set_update_rate(self, rate):\n if rate == 0: # use callbacks for max. update rate\n self.update_timer.stop()\n self.wiimote.accelerometer.register_callback(self.update_accel)\n else:\n self.wiimote.accelerometer.unregister_callback(self.update_accel)\n self.update_timer.start(1000.0/rate)\n\n def process(self, **kwdargs):\n x, y, z = self._acc_vals\n return {'accelX': np.array([x]), 'accelY': np.array([y]), 'accelZ': np.array([z])}\n\nfclib.registerNodeType(WiimoteNode, [('Sensor',)])\n\n\nclass SvmNode(Node):\n \"\"\"\n Support Vector Machine that can be switched between training mode and recognition mode via buttons in the UI and\n on the WiiMote.\n In training mode it continually reads in a date from the accelerometer.\n When starting recognition mode the SVM is getting trained with all saved data.\n While in recognition Mode the data is getting saved and then handed to the SVM for a prediction.\n \"\"\"\n\n nodeName = 'Svm'\n\n def __init__(self, name):\n terminals = {\n 'inX': dict(io='in'),\n 'inY': dict(io='in'),\n 'inZ': dict(io='in'),\n 'gesture': dict(io='out')\n }\n\n self.training_mode = False\n self.recognition_mode = False\n\n self.cutoff_length = 0\n\n self.saved_gestures = {}\n self.current_recording = []\n\n self.category_to_gesture = {}\n\n self.classifier = svm.SVC()\n\n Node.__init__(self, name, terminals=terminals)\n\n def set_training_mode(self, value):\n # catch some wrong paramaters\n if value is True:\n self.current_recording = []\n self.training_mode = True\n else:\n self.training_mode = False\n\n def add_gesture(self, name):\n print(\"Saved Gesture with name: %s\" % name)\n self.saved_gestures[name] = self.current_recording\n\n def set_recognition_mode(self, value):\n # catch some wrong paramaters\n if value is True:\n self.svm_train_classifier()\n self.current_recording = []\n self.recognition_mode = True\n else:\n self.recognition_mode = False\n prediction = self.svm_classification()\n if prediction:\n prediction = self.category_to_gesture[prediction[0]]\n print(\"Prediction is: \", prediction)\n return prediction\n\n def svm_train_classifier(self):\n \"\"\"\n Here all saved gestures are handed into a Fast Fourier Transformation to extract the present frequency spectrum.\n After that the data normalized to the length of the gesture with the fewest samples\n Finally the frequency data is handed to the SVM for training.\n \"\"\"\n\n # needed because a SVM needs more than 1 class\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n else:\n training_data = []\n categories = []\n id = 0\n\n for gesture, value in self.saved_gestures.items():\n id += 1\n # needed to map the id returned from the SVM to a name of a gesture\n self.category_to_gesture[id] = gesture\n categories.append(id)\n\n x = []\n y = []\n z = []\n for elem in value:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n training_data.append(self.get_fft(x, y, z))\n\n # normalized length of fft\n self.cutoff_length = min([len(l) for l in training_data])\n\n normalized_fft = []\n for l in training_data:\n normalized_fft.append(l[:self.cutoff_length])\n\n training_data = normalized_fft\n\n self.classifier.fit(training_data, categories)\n\n def svm_classification(self):\n \"\"\"\n Here the date from the recognition is handed into a Fast Fourier Transformation.\n After that it is normalized to the number of samples from the shortest gesture.\n This step is needed because a constraint of a SVM is that the length of the feature vectors and the vector for\n the prediction need to be the same.\n \"\"\"\n\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n return None\n else:\n x = []\n y = []\n z = []\n for elem in self.current_recording:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n gesture_fft = self.get_fft(x, y, z)\n\n if len(gesture_fft) > self.cutoff_length:\n print(\"bigger than cutoff\")\n gesture_fft = gesture_fft[:self.cutoff_length]\n elif len(gesture_fft) < self.cutoff_length:\n\n print(\"smaller than cutoff\")\n temp = np.zeros(self.cutoff_length)\n for x in range(len(gesture_fft)):\n temp[x] = gesture_fft[x]\n gesture_fft = temp\n else:\n pass\n\n return self.classifier.predict(gesture_fft)\n\n def get_fft(self, x, y, z):\n \"\"\"\n Here the avarage of the x, y, z data from the accelerometer is handed into the Fast Fourier Transformation.\n \"\"\"\n avg = (np.array(x) + np.array(y) + np.array(z))/3\n return np.abs(fft(avg)/len(avg))[1:len(avg)//2]\n\n def process(self, **kwds):\n x = kwds['inX']\n y = kwds['inY']\n z = kwds['inZ']\n\n # appending data to the current recording if we are in training or recognition mode\n if self.training_mode:\n self.current_recording.append((x, y, z))\n elif self.recognition_mode:\n self.current_recording.append((x, y, z))\n else:\n # do not append samples if we are not in training or recognition mode\n pass\n return None\n\n\nfclib.registerNodeType(SvmNode, [('Custom',)])\n\n\nclass ActivityRecognition():\n\n RED = QtGui.QColor(255, 0, 0)\n GREEN = QtGui.QColor(0, 255, 0)\n YELLOW = QtGui.QColor(255, 255, 0)\n GRAY = QtGui.QColor(100, 100, 100)\n BLACK = QtGui.QColor(0, 0, 0)\n\n def __init__(self, app):\n self.app = app\n\n self.training_mode = False\n self.recognition_mode = False\n\n self.init_ui()\n self.setup_nodes()\n self.connect_buttons()\n\n self.win.show()\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n def init_ui(self):\n width, height = self.app.desktop().width(), self.app.desktop().height()\n\n self.win = QtGui.QWidget()\n self.win.setWindowTitle('Activity Recognition')\n self.win.setGeometry(width/4, height/4, width/2, height/2)\n\n self.main_layout = QtGui.QGridLayout()\n self.win.setLayout(self.main_layout)\n\n self.setup_left_group()\n # self.setup_middle_group()\n self.setup_right_group()\n\n def setup_left_group(self):\n left_group = QtGui.QGroupBox()\n left_layout = QtGui.QGridLayout()\n\n wm_label = QtGui.QLabel(\"Enter your mac address\")\n self.wm_addr = QtGui.QLineEdit()\n self.wm_addr.setPlaceholderText(\"Enter your mac address here\")\n self.wm_addr.setText(\"B8:AE:6E:1B:5B:03\")\n self.wm_connect_btn = QtGui.QPushButton(\"Connect\")\n\n left_layout.addWidget(wm_label, 1, 1, 1, 2)\n left_layout.addWidget(self.wm_addr, 2, 1, 1, 2)\n left_layout.addWidget(self.wm_connect_btn, 3, 1, 1, 2)\n\n self.training_hint = QtGui.QLabel(\"You can toggle Training Mode by pressing 'A' on your WiiMote!\\n\" +\n \"To activate recognition mode HOLD down the 'B' button!\")\n\n self.training_label = QtGui.QLabel(\"NO WIIMOTE CONNECTED\")\n self.training_label.setAlignment(QtCore.Qt.AlignCenter)\n self.training_label.setAutoFillBackground(True)\n self.training_btn = QtGui.QPushButton(\"Activate Training Mode\")\n\n left_layout.addWidget(self.training_hint, 4, 1, 1, 2)\n left_layout.addWidget(self.training_label, 5, 1, 1, 2)\n left_layout.addWidget(self.training_btn, 6, 1, 1, 2)\n\n self.save_label = QtGui.QLabel(\"Enter a name for your gesture:\")\n self.save_label.setAlignment(QtCore.Qt.AlignCenter)\n self.save_text = QtGui.QLineEdit()\n self.save_text.setPlaceholderText(\"Enter Gesture Name\")\n self.save_btn = QtGui.QPushButton(\"Save Gesture\")\n\n left_layout.addWidget(self.save_label, 7, 1, 1, 2)\n left_layout.addWidget(self.save_text, 8, 1, 1, 2)\n left_layout.addWidget(self.save_btn, 9, 1, 1, 2)\n\n left_group.setLayout(left_layout)\n self.main_layout.addWidget(left_group, 1, 1, 1, 1)\n\n def setup_middle_group(self):\n middle_group = QtGui.QGroupBox()\n middle_layout = QtGui.QGridLayout()\n\n l1 = QtGui.QLabel()\n l1.setText(\"MIDDLE GROUP\")\n middle_layout.addWidget(l1, 1, 1)\n\n self.spectrogram_widget = pg.PlotWidget()\n self.spectrogram_widget.setYRange(0, 128)\n middle_layout.addWidget(self.spectrogram_widget, 2, 1)\n\n middle_group.setLayout(middle_layout)\n self.main_layout.addWidget(middle_group, 1, 2, 1, 5)\n\n def setup_right_group(self):\n right_group = QtGui.QGroupBox()\n right_layout = QtGui.QGridLayout()\n\n self.connected_status_label = QtGui.QLabel()\n self.connected_status_label.setAlignment(QtCore.Qt.AlignCenter)\n self.connected_status_label.setAutoFillBackground(True)\n\n connected_status_palette = self.connected_status_label.palette()\n connected_status_palette.setColor(self.connected_status_label.backgroundRole(), self.RED)\n connected_status_palette.setColor(self.connected_status_label.foregroundRole(), self.BLACK)\n self.connected_status_label.setPalette(connected_status_palette)\n\n self.connected_status_label.setText(\"NOT CONNECTED\")\n right_layout.addWidget(self.connected_status_label, 1, 1)\n\n self.recording_status_label = QtGui.QLabel()\n self.recording_status_label.setAlignment(QtCore.Qt.AlignCenter)\n self.recording_status_label.setAutoFillBackground(True)\n\n recording_status_palette = self.recording_status_label.palette()\n recording_status_palette.setColor(self.recording_status_label.backgroundRole(), self.RED)\n recording_status_palette.setColor(self.recording_status_label.foregroundRole(), self.BLACK)\n self.recording_status_label.setPalette(recording_status_palette)\n\n self.recording_status_label.setText(\"Not Recording\")\n right_layout.addWidget(self.recording_status_label, 2, 1)\n\n self.recognized_gesture_heading = QtGui.QLabel(\"Recognized Gesture:\")\n self.recognized_gesture = QtGui.QLabel(\"UNKNOWN GESTURE\")\n\n right_layout.addWidget(self.recognized_gesture_heading, 3, 1, 1, 1)\n right_layout.addWidget(self.recognized_gesture, 4, 1, 1, 1)\n\n self.known_gestures = QtGui.QLabel()\n self.known_gestures.setText(\"Saved Gestures:\\n\")\n right_layout.addWidget(self.known_gestures, 5, 1, 3, 1)\n\n right_group.setLayout(right_layout)\n self.main_layout.addWidget(right_group, 1, 7, 1, 1)\n\n def setup_nodes(self):\n # Create an empty flowchart with a single input and output\n self.fc = Flowchart(terminals={})\n\n self.wiimote_node = self.fc.createNode('Wiimote')\n self.svm_node = self.fc.createNode('Svm')\n\n self.fc.connectTerminals(self.wiimote_node['accelX'], self.svm_node['inX'])\n self.fc.connectTerminals(self.wiimote_node['accelY'], self.svm_node['inY'])\n self.fc.connectTerminals(self.wiimote_node['accelZ'], self.svm_node['inZ'])\n\n def connect_buttons(self):\n self.training_btn.clicked.connect(self.toggle_training_mode)\n self.wm_connect_btn.clicked.connect(self.connect_wm)\n self.save_btn.clicked.connect(self.save_gesture)\n\n def save_gesture(self):\n name = self.save_text.text().strip()\n print(len(name))\n if len(name) == 0:\n name = \"Unknown Name\"\n\n self.known_gestures.setText(self.known_gestures.text() + \"\\n\" + name)\n self.svm_node.add_gesture(name)\n\n self.save_text.setText(\"\")\n\n def connect_wm(self):\n btaddr = self.wm_addr.text().strip()\n print(btaddr)\n self.wiimote_node.connect_wiimote(btaddr, model='Nintendo RVL-CNT-01-TR')\n\n self.training_label.setText(\"Training Mode OFF\")\n self.connected_status_label.setText(\"CONNECTED\")\n connected_status_palette = self.connected_status_label.palette()\n connected_status_palette.setColor(self.connected_status_label.backgroundRole(), self.GREEN)\n connected_status_palette.setColor(self.connected_status_label.foregroundRole(), self.BLACK)\n self.connected_status_label.setPalette(connected_status_palette)\n\n self.wiimote_node.wiimote.buttons.register_callback(self.handle_wm_button)\n\n def handle_wm_button(self, buttons):\n if len(buttons) > 0:\n for button in buttons:\n if button[0] == 'A':\n if button[1]:\n self.toggle_training_mode()\n if button[0] == 'B':\n if button[1]:\n self.start_recognition_mode()\n else:\n self.stop_recognition_mode()\n\n def toggle_training_mode(self):\n self.training_mode = not self.training_mode\n print('New State (Training Mode): ', self.training_mode)\n if self.training_mode:\n self.svm_node.set_training_mode(True)\n self.training_btn.setText(\"Deactivate Training Mode\")\n self.training_label.setText(\"Training Mode ON\")\n training_status_palette = self.training_label.palette()\n training_status_palette.setColor(self.training_label.backgroundRole(), self.YELLOW)\n training_status_palette.setColor(self.training_label.foregroundRole(), self.BLACK)\n self.training_label.setPalette(training_status_palette)\n\n self.recording_status_label.setText(\"Recording Training Data\")\n p = self.recording_status_label.palette()\n p.setColor(self.recording_status_label.backgroundRole(), self.YELLOW)\n self.recording_status_label.setPalette(p)\n else:\n self.svm_node.set_training_mode(False)\n self.training_btn.setText(\"Activate Training Mode\")\n self.training_label.setText(\"Training Mode OFF\")\n training_status_palette = self.training_label.palette()\n training_status_palette.setColor(self.training_label.backgroundRole(), self.GRAY)\n self.training_label.setPalette(training_status_palette)\n\n self.recording_status_label.setText(\"Not Recording\")\n p = self.recording_status_label.palette()\n p.setColor(self.recording_status_label.backgroundRole(), self.RED)\n p.setColor(self.recording_status_label.foregroundRole(), self.BLACK)\n self.recording_status_label.setPalette(p)\n\n def start_recognition_mode(self):\n print(\"Start recognition Mode\")\n self.recognized_gesture.setText(\"UNKNOWN\")\n self.svm_node.set_recognition_mode(True)\n self.recording_status_label.setText(\"Recording Recognition Data\")\n p = self.recording_status_label.palette()\n p.setColor(self.recording_status_label.backgroundRole(), self.YELLOW)\n self.recording_status_label.setPalette(p)\n\n def stop_recognition_mode(self):\n print(\"Stop recognition Mode\")\n gesture = self.svm_node.set_recognition_mode(False)\n if gesture:\n self.recognized_gesture.setText(gesture)\n p = self.recognized_gesture.palette()\n p.setColor(self.recognized_gesture.backgroundRole(), self.GREEN)\n self.recognized_gesture.setPalette(p)\n\n else:\n self.recognized_gesture.setText(\"Not enough gestures! Save another one!\")\n p = self.recognized_gesture.palette()\n p.setColor(self.recognized_gesture.backgroundRole(), self.RED)\n self.recognized_gesture.setPalette(p)\n\n self.recording_status_label.setText(\"Not Recording\")\n p = self.recording_status_label.palette()\n p.setColor(self.recording_status_label.backgroundRole(), self.RED)\n self.recording_status_label.setPalette(p)\n\n\ndef main():\n app = QtGui.QApplication([])\n\n activity_recognition = ActivityRecognition(app)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignment8/activity_recognizer.py","file_name":"activity_recognizer.py","file_ext":"py","file_size_in_byte":19139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"107898587","text":"# this code can be improved using dictionary by storing previously calculated values\nmaxCurrentX = 0;\nmaxCurrentNumber = 0;\nfor i in range(1, 1000000):\n currentX = 1;\n currentNumber = i\n while currentNumber != 1:\n if (currentNumber % 2 == 0):\n currentNumber = currentNumber / 2\n else :\n currentNumber = currentNumber * 3 + 1\n\n currentX += 1\n \n if (currentX > maxCurrentX):\n maxCurrentX = currentX\n maxCurrentNumber = i\n\nprint(maxCurrentNumber)","sub_path":"projecteuler/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"70672946","text":"import numpy as np\nimport pathlib\nimport sys\nsys.path.append(str(pathlib.Path(__file__).parents[3]))\nfrom cmx_execution.multileg_order_manager import side\nfrom cmx_risk.invent_util import (is_not_larger, is_not_smaller,)\nimport logging\n\n\nclass hedging_linear_scalper:\n\tdef __init__(self, context, leg_index):\n\t\tself.context = context\n\t\tself.leg_index = leg_index\n\t\tself.symbol = self.context.cmx_config.catalyst_symbols[self.leg_index]\n\t\tself.allowed_position_error = self.context.cmx_config.risk_deltas[self.leg_index]\n\t\tself._failed_bid_count = 0\n\t\tself._failed_ask_count = 0\n\n\tdef cancel_all(self):\n\t\tself.context.cmx_exec.send_orders(self.symbol, None, side.buy)\n\t\tself.context.cmx_exec.send_orders(self.symbol, None, side.sell)\n\n\tdef assert_orders(self, price_amount_map, tradeside):\n\t\t# if tradeside == side.buy:\n\t\t# \tfor k,v in price_amount_map.items():\n\t\t# \t\tassert v >= 0 and is_not_larger(\n\t\t# \t\t\t\t\t\t\t\t\t\tv + self.context.cmx_invent.positions[0], \n\t\t# \t\t\t\t\t\t\t\t\t\tself.context.risk_max_long_position, \n\t\t# \t\t\t\t\t\t\t\t\t\tself.allowed_position_error\n\t\t# \t\t\t\t\t\t\t\t\t\t), \\\n\t\t# \t\t'illegal bid order {} * {} is adding to position {} exceeding max long position of {}'.format(k, v, self.context.cmx_invent.amounts[0], self.context.risk_max_long_pos)\n\t\t# else:\n\t\t# \tfor k,v in price_amount_map.items():\n\t\t# \t\tassert v <= 0 and is_not_smaller(\n\t\t# \t\t\t\t\t\t\t\t\t\t v + self.context.cmx_invent.amounts[0], \n\t\t# \t\t\t\t\t\t\t\t\t\t self.context.risk_max_short_pos, \n\t\t# \t\t\t\t\t\t\t\t\t\t self.allowed_position_error\n\t\t# \t\t\t\t\t\t\t\t\t\t ), \\\n\t\t# \t\t'illegal ask order {} * {} is adding to position {} exceeding max short position of {}'.format(k, v, self.context.cmx_invent.amounts[0], self.context.risk_max_short_pos)\n\t\tpass\n\n\tdef _convert_amount_to_qty(self, price_amount_map):\n\t\tprice_qty_map = price_amount_map.copy()\n\t\tif not slef.context.cmx_config.base_quote_flips[0]:\n\t\t\tfor k,v in price_qty_map:\n\t\t\t\tprice_qty_map[k] = v / price_qty_map[k]\n\t\treturn price_qty_map\n\n\tdef trade_normal(self):\n\t\t# return True if order updated.\n\t\tif self.context.cmx_invent.buy_flags[self.leg_index]:\n\t\t\tbidqty = self.context.cmx_invent.unhedged_positions[self.leg_index]\n\t\t\tif bidqty >= self.context.cmx_config.invent_min_shares[self.leg_index]:\n\t\t\t\tif np.isnan(self.context.cmx_signal.prc_smas[self.leg_index].get_std()):\n\t\t\t\t\tbidprice = self.context.cmx_invent.upper_prices[self.leg_index] \\\n\t\t\t\t\t\t\t + self.context.cmx_config.invent_price_offsets[self.leg_index] \\\n\t\t\t\t\t\t\t * self._failed_bid_count\n\t\t\t\telse:\n\t\t\t\t\tbidprice = self.context.cmx_invent.upper_prices[self.leg_index] \\\n\t\t\t\t\t\t\t + self.context.cmx_config.invent_price_offsets[self.leg_index] \\\n\t\t\t\t\t\t\t * self.context.cmx_signal.prc_smas[self.leg_index].get_std() \\\n\t\t\t\t\t\t\t * self._failed_bid_count\n\t\t\t\tbids = {bidprice: bidqty}\n\t\t\t\tbid_str = '|'.join(['{} * {}'.format(k,v) for k,v in bids.items()])\n\t\t\t\tlogging.info('[cmx_invent] send {} bids {} after {} failures'.format(self.symbol.symbol, bid_str, self._failed_bid_count))\n\t\t\t\tself.context.cmx_exec.send_orders(self.symbol, bids, side.buy)\n\t\t\t\tself._failed_bid_count += 1\n\t\telse:\n\t\t\tself._failed_bid_count = 0\n\n\t\tif self.context.cmx_invent.sell_flags[self.leg_index]:\n\t\t\taskqty = self.context.cmx_invent.unhedged_positions[self.leg_index]\n\t\t\tif askqty <= -1 * self.context.cmx_config.invent_min_shares[self.leg_index]:\n\t\t\t\tif np.isnan(self.context.cmx_signal.prc_smas[self.leg_index].get_std()):\n\t\t\t\t\taskprice = self.context.cmx_invent.lower_prices[self.leg_index] \\\n\t\t\t\t\t\t\t - self.context.cmx_config.invent_price_offsets[self.leg_index] \\\n\t\t\t\t\t\t\t * self._failed_ask_count\n\t\t\t\telse:\n\t\t\t\t\taskprice = self.context.cmx_invent.lower_prices[self.leg_index] \\\n\t\t\t\t\t\t\t - self.context.cmx_config.invent_price_offsets[self.leg_index] \\\n\t\t\t\t\t\t\t * self.context.cmx_signal.prc_smas[self.leg_index].get_std() \\\n\t\t\t\t\t\t\t * self._failed_ask_count\n\t\t\t\tasks = {askprice: askqty}\n\t\t\t\task_str = '|'.join(['{} * {}'.format(k,v) for k,v in asks.items()])\n\t\t\t\tlogging.info('[cmx_invent] send {} asks {} after {} failures'.format(self.symbol.symbol, ask_str, self._failed_ask_count))\n\t\t\t\tself.context.cmx_exec.send_orders(self.symbol, asks, side.sell)\n\t\t\t\tself._failed_ask_count += 1\n\t\telse:\n\t\t\tself._failed_ask_count = 0\n\n\t\t\n\n","sub_path":"Library/Python/comics_catalyst/cmx_risk/hedging_manager.py","file_name":"hedging_manager.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"16908235","text":"\n\nfrom xai.brain.wordbase.nouns._gleam import _GLEAM\n\n#calss header\nclass _GLEAMS(_GLEAM, ):\n\tdef __init__(self,): \n\t\t_GLEAM.__init__(self)\n\t\tself.name = \"GLEAMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"gleam\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_gleams.py","file_name":"_gleams.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"635894083","text":"import numpy as np\n\n# 11/7/2018 task: create \"easy\" test state with \"nice\" county lines & random populations\n\ny = 20\nx = 20\ndistf = 20\nstate = np.random.negative_binomial(2, 0.003, size = (y, x))\n\n# 11/14/2018 task: create two dictionaries: 1 maps the coordinates of a county to its population and the other maps the coordinates of a county to the coordinates of all neighboring counties\n\npops = {}\nneighbors = {}\n\nfor row in range(y):\n for dist in range(x):\n pops[(row, dist)] = state[row][dist]\n if row == 0:\n neighbors[(row, dist)] = [(row+1, dist)]\n elif row == y-1:\n neighbors[(row, dist)] = [(row-1, dist)]\n else:\n neighbors[(row, dist)] = [(row+1, dist), (row-1, dist)]\n\n if dist == 0:\n neighbors[(row, dist)].append((row, dist+1))\n elif dist == x-1:\n neighbors[(row, dist)].append((row, dist-1))\n else:\n neighbors[(row, dist)].extend([(row, dist+1), (row, dist-1)])\n\ntgtpop = sum(state.values()) / distf\n\ndef findMerge(dist):\n nbp = {}\n for neighbor in neighbors[dist]:\n nbp[neighbor] = pops[neighbor]\n popDif = {}\n for key in nbp:\n popDif[key] = abs(tgtpop - (nbp[key] + pops[dist]))\n merge = min(popDif, key=popDif.get)\n return merge\n\n\ndef doMerge(dist, merge, history):\n\n # Update the history information\n history[merge] = dist\n for k, v in history:\n if v == merge:\n history[k] = dist\n\n # Make sure that merge and dist are neighbors of each other\n assert(dist in neighbors[merge])\n assert(merge in neighbors[dist])\n assert(dist not in neighbors[dist])\n assert(merge not in neighbors[merge])\n\n # combine dist and merge populations under dist's key in pops dictionary\n pops[dist] += pops[merge]\n\n # remove merge key in pops dictionary\n del pops[merge]\n \n # Everything that was a neighbor of merge now needs to become a neighbor of dist\n neighbors[dist].update(neighbors[merge])\n neighbors[dist].remove(dist)\n \n # replace all occurences of merge in all values in neighbors dictionary with dist\n for neighbor in neighbors[merge]:\n neighbors[neighbor].remove(merge)\n if neighbor != dist:\n neighbors[neighbor].add(dist)\n \n # remove merge key from neighbors dictionary\n del neighbors[merge]\n\n\nwhile len(pops) > distf:\n dist = min(pops, key=pops.get)\n doMerge(dist, findMerge(dist))\n","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"33184021","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author : Ledent Loric\n\"\"\"\n##\n# @brief : file which allows to send messages (default function) and read messages (thanks to the command -read) \n##\n\n# Imports\nimport argparse, os\n\n# Create parser\nparser = argparse.ArgumentParser()\nparser.add_argument('-read', action='store_true')\nargs = parser.parse_args().read\n\n# Check argument state\nif args:\n os.system('py S4_simple_queue_read.py')\nelse:\n os.system('py S4_simple_queue_publish.py')\n","sub_path":"assignments/Session4/S4_queue_publish_read.py","file_name":"S4_queue_publish_read.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"253062073","text":"config = {\r\n 'map': {\r\n 'height': 20,\r\n 'width': 20,\r\n 'coverage': 20\r\n },\r\n 'island': {\r\n 'chance_of_expansion': {\r\n 'north': 55,\r\n 'east': 55,\r\n 'south': 60,\r\n 'west': 55\r\n },\r\n 'min_width': 1,\r\n 'min_height': 1,\r\n 'max_width': 5,\r\n 'max_height': 5\r\n }\r\n}","sub_path":"temp--map_stuff/python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"456548843","text":"from flask import Flask, render_template\nfrom dogapi import dog_http_api as api\nfrom statsd import statsd\nimport time, random\n\napp = Flask(__name__)\n\n# API params.\napi.api_key = 'cd5682f38e477d011c8c788064869b07'\napi.application_key = '718b5ffa208043f4c4f2f90198a7a55844c2e61b'\n\n#level 1 event generation.\ntitle = \"Basic level-1 event\"\ntext = 'Event with custom tag.'\ntags = ['version:1', 'application:web', 'type:support', 'basic:event']\napi.event_with_response(title, text, tags=tags)\n\n\n#Landing page handler.\n@app.route(\"/\")\n@statsd.timed('page.load.time', tags=['type:support', 'page:home'])\ndef home_page():\n statsd.increment('web.page_count', tags=['page:home'])\n return \"App Home Page\"\n\n\n#Main page handler\n@app.route(\"/main\")\ndef main_page():\n statsd.increment('web.page_count', tags=['page:main'])\n #time diff for histograms\n start_time = time.time()\n #putting randomly to sleep to generate delays.\n time.sleep(random.randint(1, 10))\n duration = time.time() - start_time\n #paging data for histogram\n statsd.histogram('page.load.hist_timer', duration, tags=['type:support', 'page:main'])\n return \"App Main Page\"\n\n\n#About page handler\n@app.route('/about')\n@statsd.timed('page.load.time', tags=['type:support', 'page:about'])\ndef about():\n statsd.increment('web.page_count', tags=['page:about'])\n return \"App About Page\"\n\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n\n","sub_path":"datadog_levels.py","file_name":"datadog_levels.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"409534188","text":"\n# for x in list: \n# for语句之后必须缩进\n\nnames = ['zhangSan', 'liSi', 'wangWu']\n\nfor name in names:\n print(name)\n\n# range: 生成一个序列\n\nr = range(3)\n\nprint(r)\n\n# 将range转化为list\nr = list(r)\n\nprint(r)\n\na = range(101)\n\na = list(a)\n\nsum = 0\nfor n in a: \n sum = sum + n\n\nprint(sum)\n\n# while 注意冒号 break continue\n\nn = 1\nwhile n <= 100:\n if n > 10: # 当n = 11时,条件满足,执行break语句\n break # break语句会结束当前循环\n print(n)\n n = n + 1\nprint('END')","sub_path":"base/for-while.py","file_name":"for-while.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"210393847","text":"'''\nThis program creates a choropleth map using folium. The map \nshows the school district boundaries overlaid with markers \nfor New York City's public libraries. \n'''\n\nimport folium\nfrom folium.plugins import MarkerCluster\nimport pandas as pd\n\nlibrary_map = folium.Map(location=[40.75, -74.125])\nlibrary_data = pd.read_csv('LIBRARY.csv')\n\ncoords = []\npopups = []\n\n# add the markers for the public libraries\nfor index, row in library_data.iterrows():\n # get the location of the libraries\n coord = row['the_geom']\n coord = coord.split(' ')\n lat = coord[2].strip(')')\n lon = coord[1].strip('(')\n coords.append([lat, lon])\n\n name = row['NAME']\n # get rid of the note at the end of some library names\n name = name.split('
')[0]\n popups.append(name)\n\nlibrary_map.add_children(MarkerCluster(locations=coords, popups=popups))\n\n# add a layer displaying the school district boundaries\nlibrary_map.choropleth(geo_path='schoolDistricts.json',\n fill_opacity=0.15, line_opacity=0.75,\n key_on='feature.properties.SchoolDist')\n\nlibrary_map.save(outfile='libraries_schools_map.html')","sub_path":"homework/week06/library_map.py","file_name":"library_map.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"371959766","text":"from tkinter import *\nfrom random import randrange\n\nPlayer1 = 0\nPlayer2 = 0\n\ntk=Tk()\ntk.title(\"Player1 : \"+str(Player1)+\" | \"+\"Player2 : \"+str(Player2))\n\nX = 800\nY = 600\nspeed = 5 \n\n\nPLAY = False\n\nclass Raquette :\n def __init__(self,x,y,haut,bas):\n self.x = x\n self.y = y\n self.tx = 10\n self.ty = 50\n self.speed = 10\n self.haut = haut\n self.bas = bas\n self.ra = terrain.create_rectangle(self.x, self.y, self.x+self.tx, self.y+self.ty, fill = \"white\")\n tk.bind_all(self.haut,self.monter)\n tk.bind_all(self.bas,self.descendre)\n\n def monter(self,event):\n self.deplacer(-self.speed)\n def descendre(self,event):\n self.deplacer(self.speed)\n\n\n\n def deplacer (self,dy):\n if PLAY :\n self.y += dy\n if self.y < 0 :\n self.y = 0\n if self.y > Y-self.ty:\n self.y = Y-self.ty\n self.reset()\n def placer(self,x,y):\n self.x = x\n self.y = y\n terrain.coords(self.ra, self.x, self.y, self.x+self.tx, self.y+self.ty)\n\n\n def reset(self):\n terrain.coords(self.ra, self.x, self.y, self.x+self.tx, self.y+self.ty)\n\n\nclass Pong :\n def __init__(self):\n self.tx = 10\n self.ty = 10\n self.r1 = Raquette(30,Y/2-self.ty/2,\"\",\"\")\n self.r2 = Raquette(X-30,Y/2-self.ty/2,\"\",\"\")\n self.dummy = randrange(0, 100)\n\n self.x = X/2-self.tx/2\n self.y = Y/2-self.ty/2\n self.dx = 0\n if self.dummy < 50 :\n self.dx = -1\n else :\n self.dx=1\n\n self.dy = float(randrange(-100,100))/100\n\n self.balle = terrain.create_rectangle(self.x, self.y, self.x+self.tx, self.y+self.ty, fill = \"white\")\n self.deplacer()\n tk.bind_all(\"\",self.pause)\n\n def pause(self, event):\n global PLAY\n if PLAY :\n PLAY = False\n else :\n PLAY = True\n\n\n def reset(self):\n terrain.coords(self.balle, self.x, self.y, self.x+self.tx, self.y+self.ty)\n\n def deplacer(self):\n global Player1, Player2, PLAY\n if PLAY :\n self.x += self.dx*speed\n self.y += self.dy*speed\n\n if self.y <= 0 or self.y >= Y-self.ty :\n self.dy =- self.dy\n if self.r1.y <= self.y + self.ty and self.r1.y + self.r1.ty >= self.y :\n if self.x <= self.r1. x+ self.r1.tx and not self.x + self.tx <= self.r1.x:\n self.dx =- self.dx\n self.dy = float(randrange(-100,100))/100\n\n if self.r2.y <= self.y + self.ty and self.r2.y + self.r2.ty >= self.y :\n if self.x + self.tx >= self.r2.x and not self.x >= self.r2.x + self.r2.tx :\n self.dx =- self.dx\n self.dy = float(randrange(-100,100))/100\n\n if self.x+self.tx<0 :\n self.x = X/2-self.tx/2\n self.y = Y/2-self.ty/2\n Player2 += 1\n self.dx =- self.dx\n self.dy = float(randrange(-100,100))/100\n self.r1.placer(30, Y/2-self.ty/2)\n self.r2.placer(X-30, Y/2-self.ty/2)\n PLAY = False\n\n\n if self.x + self.tx>X :\n self.x = X/2-self.tx/2\n self.y = Y/2-self.ty/2\n Player1 += 1\n self.dx =- self.dx\n self.dy = float(randrange(-100,100))/100\n self.r1.placer(30, Y/2-self.ty/2)\n self.r2.placer(X-30, Y/2-self.ty/2)\n PLAY = False\n\n\n\n tk.title(\"Player1 : \"+str(Player1)+\" | \"+\"Player2 : \"+str(Player2))\n self.reset()\n\n tk.after(30,self.deplacer)\n\n\nif __name__ == '__main__':\n terrain = Canvas(tk, bg=\"black\",height = Y, width = X)\n terrain.pack()\n jeu = Pong()\n\n tk.mainloop()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"542200569","text":"# -*- coding: utf-8 -*-\n\nimport camera as Cam\nimport detector as Det\nimport system as System\nfrom PIL import Image, ImageTk\nimport tkinter as tk\nfrom tkinter import Menu\nfrom tkinter import ttk\nimport sys\n\n\nclass Gui(object):\n def _quit(self):\n self.root.quit()\n self.root.destroy()\n sys.exit\n\n def see_feed(self):\n self.camera = Cam.VideoCam()\n self.camera.show_feed()\n print(\"Ending show feed\")\n\n def detect_cam(self):\n self.camera = Cam.VideoCam()\n self.detector = Det.FaceDetector(self.HAAR_XML)\n self.camera.detect_live(self.detector)\n print(\"Using a camera to detect a face\")\n\n def detect_pic(self):\n from tkinter.filedialog import askopenfilename\n pic = askopenfilename()\n# d = Det.FaceDetector(self.HAAR_XML)\n# d.detect_from_img(pic)\n self.detector = Det.FaceDetector(self.HAAR_XML)\n self.detector.detect_from_img(pic)\n\n def select_dir(self):\n from tkinter import filedialog\n train_path = filedialog.askdirectory(title='Please select a directory')\n print(train_path)\n\n def recog_cam_CV(self):\n print(\"Recognise a face from a camera using OpenCV\")\n\n def recog_cam_custom(self):\n print(\"Recognise a face from a camera using custom system\")\n\n def recog_from_pic(self):\n from tkinter.filedialog import askopenfilename\n pic_name = askopenfilename()\n print(\"Recognise a face from a picture using OpenCV: \" + pic_name)\n\n def train_person(self):\n print(\"Adding a person to training model\")\n\n def cmd(self, s=''):\n print(\"Checking Tkinter command fn: \" + s)\n\n def get_win_size(self, win):\n win.update()\n print(win.winfo_width())\n print(win.winfo_height())\n\n def set_win_size(self, win, w, h=1):\n win.minsize(width=w, height=h)\n\n def snap_imgs(self):\n label = self.snapScreenBox.get(\"1.0\", \"end-1c\")\n print(label)\n System.grab_frames(label, 's', self.HAAR_XML)\n\n def add_person(self):\n self.snapScreenBox.pack()\n buttonCommit = tk.Button(self.root, height=3, width=56,\n text=\"Write a name of person and start\",\n command=lambda: self.snap_imgs())\n buttonCommit.pack()\n\n def __init__(self, haar):\n # Windows\n self.root = tk.Tk()\n self.root.title(\"Python GUI\")\n self.root.minsize(600, 1)\n self.snapScreenBox = tk.Text(self.root, height=3, width=50)\n\n self.HAAR_XML = haar\n self.camera = None\n self.detector = None\n\n # menu bar items\n self.menu_bar = Menu()\n self.file_menu = Menu(self.menu_bar, tearoff=0)\n self.file_menu.add_command(label=\"See camera feed\",\n command=lambda: self.see_feed())\n self.file_menu.add_separator()\n self.file_menu.add_command(label=\"Exit\", command=lambda: self._quit())\n self.menu_bar.add_cascade(label=\"File\", menu=self.file_menu)\n\n # Detect menu\n self.detect_menu = Menu(self.menu_bar, tearoff=0)\n self.detect_menu.add_command(label=\"From a camera\",\n command=self.detect_cam)\n self.detect_menu.add_separator()\n self.detect_menu.add_command(label=\"From a picture\",\n command=self.detect_pic)\n self.menu_bar.add_cascade(label=\"Detect Face\", menu=self.detect_menu)\n\n # Train model menu\n self.train_menu = Menu(self.menu_bar, tearoff=0)\n self.train_menu.add_command(label=\"Start training\",\n command=self.select_dir)\n self.menu_bar.add_cascade(label=\"Train a model\", menu=self.train_menu)\n\n # Recognision menu\n# self.recog_menu = Menu(self.menu_bar, tearoff=0)\n# self.recog_menu.add_command(label=\"Use OpenCV\", command=self.recog_cam_CV)\n# self.recog_menu.add_separator()\n# self.recog_menu.add_command(label=\"Use custom model\", command=self.recog_cam_custom)\n# self.recog_menu.add_separator()\n# self.recog_menu.add_command(label=\"From a picture\", command=self.recog_from_pic)\n# self.menu_bar.add_cascade(label=\"Recognise Face\", menu=self.recog_menu)\n\n # Train by camera menu\n self.add_person_menu = Menu(self.menu_bar, tearoff=0)\n self.add_person_menu.add_command(label=\"Add person\",\n command=self.add_person)\n self.menu_bar.add_cascade(label=\"Build own model\",\n menu=self.add_person_menu)\n\n # Help, about menu\n self.help_menu = Menu(self.menu_bar, tearoff=0)\n self.help_menu.add_command(label='About',\n command=lambda: self.cmd(\"spam\"))\n self.menu_bar.add_cascade(label='Help', menu=self.help_menu)\n\n # --- Main loop\n self.root.config(menu=self.menu_bar)\n self.root.mainloop()\n","sub_path":"code/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"589303115","text":"from PIL import Image\nfrom PIL import ImageFilter\nimport os\n\ndef preprocess_image(region):\n for y in xrange(0, region.size[1]):\n for x in xrange(0, region.size[0]):\n (r, g, b, a)=region.getpixel((x,y))\n if a < 192:\n region.putpixel((x,y),(255, 255, 255, 255))\n else:\n region.putpixel((x,y),(0, 0, 0, 255))\n region = region.resize((region.size[0]*5, region.size[1]*5))\n region = region.filter(ImageFilter.SHARPEN)\n return region\n\nfor i in range(1,10):\n file_name = str(i)+'.png'\n img = Image.open(file_name)\n new_region = preprocess_image(img)\n tif_name = str(i)+'.tif'\n new_region.save(tif_name, 'TIFF')\n os.popen('tesseract '+tif_name+' '+str(i)+' digits')\n","sub_path":"training_test/gen_tif.py","file_name":"gen_tif.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"252437214","text":"#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef pam2map(filename):\n \"\"\"Convert Partable anymap(.pgm, .pbm) to 2d [0|1] array\"\"\"\n with open(filename, 'r') as f:\n buf = f.read()\n try:\n header, ver, width, height, maxval, byte_array = re.search(\n b\"(^P(\\d)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\"\n b\"(.*)\", buf).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n\n if ver == b'5':\n color_byte_len = 1\n elif ver == b'6':\n color_byte_len = 3\n else:\n color_byte_len = 3\n\n width, height = int(width), int(height)\n\n map_data = [[0]*width for _ in range(height)]\n for i in range(height):\n for j in range(width):\n for k in range(color_byte_len):\n if byte_array[i*width*color_byte_len +\n j*color_byte_len + k] != '\\x00':\n map_data[height - i - 1][j] = 0\n break\n else:\n map_data[height - i - 1][j] = 1\n\n return map_data\n\n\ndef main():\n m = pam2map(os.path.dirname(__file__) + \"map.pgm\")\n print(m)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"particles_filter/scripts_ROS/pam2map.py","file_name":"pam2map.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"155535751","text":"from num import num\nfrom sym import sym\nimport re\n\n\nclass data:\n\n def __init__(self,filename):\n self.w={}\n self.syms={}\n self.nums={}\n self.Class=None\n self.rows=[]\n self.name=[]\n self._use=[]\n self.indeps=[]\n self.filename=filename\n self.rows1()\n\n\n\n def indep(self,current):\n return not self.w[current] and self.Class!=current\n\n def dep(self,current):\n return not self.indep(current)\n\n def header(self,cells):\n for c0,x in enumerate(cells):\n if not re.match(r\"\\?\",x):\n c=len(self._use)\n self._use.append(c0)\n self.name.append(x)\n if re.match(r\"[<>\\$]\",x):\n self.nums[c]=num()\n else:\n self.syms[c]=sym([])\n if re.match(\"<\",x):\n self.w[c]=-1\n elif re.match(\">\",x):\n self.w[c]=1\n elif re.match(\"!\",x):\n self.Class=c\n else:\n self.indeps.append(c)\n return True\n\n\n def row(self,cells):\n r=len(self.rows)\n self.rows.append({})\n for c,c0 in enumerate(self._use):\n x=cells[c0]\n if not x==\"?\":\n if c in self.nums.keys():\n x=float(x)\n self.nums[c].numInc(x)\n else:\n self.syms[c].symInc(x)\n\n self.rows[r][c]=x\n return True\n\n\n def rows1(self):\n with open(self.filename,\"r+\") as f:\n first=True\n for line in f:\n line=re.sub(r\"[\\t\\r\\n ]\",\"\",line)\n line=re.sub(r\"#.*\",\"\",line)\n cells=line.split(\",\")\n if len(cells)>0:\n if first:\n self.header(cells)\n else:\n self.row(cells)\n first=False\n\n'''\n def rows(self,file):\n return self.rows1(file)#input\n'''","sub_path":"w6/rows.py","file_name":"rows.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"153580902","text":"\n\n\"\"\"\n Sample Run:\n python splits_prep/organize_menpo_train.py\n\n Copies landmark points and the images of menpo dataset into a common \n directory so that get_jsons_from_config.py script could be used. Before \n copying check if the landmark corresponding to the image has 68 landmark\n points otherwise do not copy.\n\n Version 1 2019-06-20 Abhinav Kumar\n\"\"\"\n\nimport os\nimport numpy as np\nfrom common_functions import grab_files, copy_files\n\nLANDMARK_DELIMITER = \" \"\nLANDMARK_HEADERS = 3\nLANDMARK_FOOTERS = 1\nIMAGE_EXTENSIONS= [\".png\", \".jpg\"]\nLANDMARK_GROUND_TRUTH_EXTENSIONS = [\".pts\"]\n\ninput_folder = \"./bigdata1/zt53/data/face/menpo/trainset\"\noutput_folder = \"./bigdata1/zt53/data/face/menpo_organized/trainset\"\n\nif os.path.exists(output_folder):\n print(\"Output directory exists\")\nelse:\n print(\"Creating directory {}\".format(output_folder))\n os.makedirs(output_folder)\n\n\ninput_sub_directory_full_path = input_folder\n# Grab all the image files\nimage_files_grabbed = grab_files(input_sub_directory_full_path, IMAGE_EXTENSIONS)\nnum_images_1 = len(image_files_grabbed)\n\nimage_files_68landmarks = []\nlandmark_files_68landmarks = []\n\n# Read all the landmarks and check if there are 68 landmarks for each of them\n# since some images in the menpo also have 39 landmarks which can not be used \n# for testing. Copy only those which have 68 landmarks\nfor i in range(num_images_1):\n landmark_file_path = image_files_grabbed[i][:-4] + LANDMARK_GROUND_TRUTH_EXTENSIONS[0]\n landmark = np.genfromtxt(landmark_file_path, skip_header=LANDMARK_HEADERS, skip_footer=LANDMARK_FOOTERS, delimiter=LANDMARK_DELIMITER)\n\n if landmark.shape[0] == 68:\n image_files_68landmarks.append(image_files_grabbed[i])\n landmark_files_68landmarks.append(landmark_file_path)\n\nnum_images = len(image_files_68landmarks)\nprint(\"Image_directory= {:54s} #Images= {} #Images_with_68_landmarks= {}\".format(input_sub_directory_full_path, num_images_1, num_images))\n\noutput_sub_directory_full_path = output_folder\nif not os.path.exists(output_sub_directory_full_path):\n print(\"Creating subdirectory {}\".format(output_sub_directory_full_path))\n os.makedirs(output_sub_directory_full_path)\n\n# Copy all image files\ncopy_files(image_files_68landmarks , output_sub_directory_full_path)\n# Copy all landmark files\ncopy_files(landmark_files_68landmarks, output_sub_directory_full_path)\n","sub_path":"splits_prep/organize_menpo_train.py","file_name":"organize_menpo_train.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"95180306","text":"#!/usr/bin/env python3\r\n#\r\n\r\n\r\n'''\r\n\r\nCreate an epoll object——创建1个epoll对象\r\nTell the epoll object to monitor specific events on specific sockets——告诉epoll对象,在指定的socket上监听指定的事件\r\nAsk the epoll object which sockets may have had the specified event since the last query——询问epoll对象,从上次查询以来,哪些socket发生了哪些指定的事件\r\nPerform some action on those sockets——在这些socket上执行一些操作\r\nTell the epoll object to modify the list of sockets and/or events to monitor——告诉epoll对象,修改socket列表和(或)事件,并监控\r\nRepeat steps 3 through 5 until finished——重复步骤3-5,直到完成\r\nDestroy the epoll object——销毁epoll对象\r\n'''\r\n\r\n'''\r\nimport select 导入select模块\r\n\r\nepoll = select.epoll() 创建一个epoll对象\r\n\r\nepoll.register(文件句柄,事件类型) 注册要监控的文件句柄和事件\r\n\r\n事件类型:\r\n\r\n  select.EPOLLIN 可读事件\r\n\r\n  select.EPOLLOUT 可写事件\r\n\r\n  select.EPOLLERR 错误事件\r\n\r\n  select.EPOLLHUP 客户端断开事件\r\n\r\nepoll.unregister(文件句柄) 销毁文件句柄\r\n\r\nepoll.poll(timeout) 当文件句柄发生变化,则会以列表的形式主动报告给用户进程,timeout\r\n\r\n 为超时时间,默认为-1,即一直等待直到文件句柄发生变化,如果指定为1\r\n\r\n 那么epoll每1秒汇报一次当前文件句柄的变化情况,如果无变化则返回空\r\n\r\nepoll.fileno() 返回epoll的控制文件描述符(Return the epoll control file descriptor)\r\n\r\nepoll.modfiy(fineno,event) fineno为文件描述符 event为事件类型 作用是修改文件描述符所对应的事件\r\n\r\nepoll.fromfd(fileno) 从1个指定的文件描述符创建1个epoll对象\r\n\r\nepoll.close() 关闭epoll对象的控制文件描述符\r\n'''\r\n\r\nimport socket\r\nimport select\r\nimport queue\r\n\r\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver_address = (\"0.0.0.0\", 8888)\r\nserver_socket.bind(server_address)\r\nserver_socket.listen(10)\r\nprint(\"server start, listening: \", server_address)\r\nserver_socket.setblocking(False)\r\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\ntimeout = 10\r\nepoll = select.epoll()\r\n\r\n# 注册服务器监听fd到等待读事件集合\r\nepoll.register(server_socket.fileno(), select.EPOLLIN)\r\n\r\n# 保存连接客户端消息的字典,格式为{}\r\nmessage_queues = {}\r\n\r\n# 文件句柄到所对应对象的字典,格式为{句柄:对象}\r\nfd_to_socket = {server_socket.fileno(): server_socket, }\r\n\r\nwhile True:\r\n print('wait for new connection...')\r\n events = epoll.poll(timeout)\r\n if not events:\r\n print('epoll time out, no ready connection, round robin again...')\r\n continue\r\n print(f'now coming {len(events)} new event, start to handle...')\r\n\r\n for fd, event in events:\r\n sock = fd_to_socket[fd]\r\n\r\n # 如果活动 socket 为当前服务器 socket, 表示有新的连接\r\n if sock == server_socket:\r\n connection, address = server_socket.accept()\r\n print('new connection: ', address)\r\n connection.setblocking(False)\r\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n # 注册新连接fd到待读事件集合\r\n epoll.register(connection.fileno(), select.EPOLLIN)\r\n\r\n # 把新连接的文件句柄以及对象保存到字典\r\n fd_to_socket[connection.fileno()] = connection\r\n\r\n message_queues[connection] = queue.Queue()\r\n\r\n # 关闭事件\r\n elif events and select.EPOLLHUP:\r\n print('client close')\r\n epoll.unregister(fd)\r\n fd_to_socket[fd].close()\r\n del fd_to_socket[fd]\r\n\r\n # 可读事件\r\n elif event and select.EPOLLIN:\r\n data = sock.recv(1024)\r\n if data:\r\n print(\"recv: \", data, \"client: \", sock.getpeername())\r\n\r\n # 将数据放入对应客户端的字典\r\n message_queues[sock].put(data)\r\n\r\n # 修改读取到消息的连接到等待写事件集合(即对应客户端收到消息后,再将其fd修改并加入写事件集合)\r\n epoll.modify(fd, select.EPOLLOUT)\r\n\r\n # 可写事件\r\n elif event and select.EPOLLOUT:\r\n try:\r\n # 从字典中获取对应客户端的信息\r\n msg = message_queues[sock].get_nowait()\r\n except queue.Empty:\r\n print(sock.getpeername(), \" queue empty\")\r\n # 修改文件句柄为读事件\r\n epoll.modify(fd, select.EPOLLIN)\r\n else:\r\n print(\"发送数据:\", data, \"客户端:\", sock.getpeername())\r\n # 发送数据\r\n sock.send(msg)\r\n\r\n# 在epoll中注销服务端文件句柄\r\nepoll.unregister(serversocket.fileno())\r\n# 关闭epoll\r\nepoll.close()\r\n# 关闭服务器socket\r\nserversocket.close()\r\n","sub_path":"basic_/io_multiplexing_/demo01_server.py","file_name":"demo01_server.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"} +{"seq_id":"526323546","text":"\r\nfrom Patient import Patient\r\nfrom Bed import Bed\r\nfrom ClassABed import ClassABed\r\nfrom ClassBBed import ClassBBed\r\nfrom ClassCBed import ClassCBed\r\nclass Child(Patient):\r\n def __init__(self,id,name,age,gender,citizen_status,cda_balance):\r\n super().__init__(id,name,age,gender,citizen_status)\r\n self.__cda_balance=cda_balance\r\n\r\n @property\r\n def cda_balance(self):\r\n return self.__cda_balance\r\n\r\n @cda_balance.setter\r\n def cda_balance(self,cda_balance):\r\n self.__cda_balance = cda_balance\r\n\r\n def calculate_charges(self):\r\n total=0\r\n import datetime\r\n format = \"%d/%m/%Y\"\r\n for i,each in enumerate(self.stay_list):\r\n if each.payment_status=='Unpaid':\r\n print('=====Stay # {}====='.format(i+1))\r\n print('Admission date:',each.admitted_date)\r\n print('Discharge date:',each.discharge_date)\r\n print('Payment status:',each.payment_status)\r\n for n,every in enumerate(each.bedstay_list):\r\n print('=====Bed # {}====='.format(n+1))\r\n print('Ward number:',every.bed.ward_no)\r\n print('Bed number:',every.bed.bed_no)\r\n print('Ward class:',every.bed.bed_type)\r\n print('Start of bed stay:',every.start_bedstay)\r\n print('End of bed stay:',every.end_bedstay)\r\n if isinstance(every.bed,ClassABed):\r\n print('Accompany person:',every.bed.accompanying_person)\r\n if isinstance(every.bed,ClassBBed):\r\n print('Air-conditioned variant:',every.bed.air_con)\r\n if isinstance(every.bed,ClassCBed):\r\n print('Portable TV:',every.bed.portable_tv)\r\n start=datetime.datetime.strptime(every.start_bedstay,format)\r\n end=datetime.datetime.strptime(every.end_bedstay,format)\r\n days=(end-start).days\r\n print('Number of days stayed:',days)\r\n sub=days*every.bed.calculate_charges(self.citizen_status)\r\n print('Total charges:$',sub)\r\n total+=sub\r\n if self.__cda_balance < total:\r\n subtotal=total-self.__cda_balance\r\n deduct=self.__cda_balance\r\n else:\r\n subtotal=0\r\n deduct=total\r\n print('=========')\r\n print('Total charges pending:$',total)\r\n print('CDA balance:$',self.__cda_balance)\r\n print('To deduct from CDA:$',deduct)\r\n print('Sub-total:',subtotal)\r\n payment=input('\\nWould you like to make payment now?[Y/N]: ').upper()\r\n if payment=='Y':\r\n print('\\nCommencing payment...')\r\n self.stay_list[-1].payment_status='Paid'\r\n self.status='Registered'\r\n print('\\n$ {:d} has been deducted from CDA balance'.format(deduct))\r\n self.__cda_balance= self.__cda_balance-deduct\r\n print('New CDA balance:$',self.__cda_balance)\r\n print('Sub-toal:${:d} has been paid by cash'.format(subtotal))\r\n print('\\nPayment successful!')\r\n else:\r\n print('\\nPayment cancelled.')\r\n \r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + \"('{:s}','{:s}','{:d}','{:s}','{:s}','{:s}',{:d})\".format(\r\n self.id,self.name,self.age,self.gender,self.citizen_status,self.__cda_balance)\r\n\r\n\r\n\r\n \r\n","sub_path":"Child.py","file_name":"Child.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"603219837","text":"########## 分岐、ループに関してのまとめ ##########\n\n############### practice 2-1 ################\n# 名前を入力\nname = input(\"お名前は?\")\nprint(\"こんにちは\" + name + \"さん!\")\n\n############### practice 2-2 ################\n# ユーザーから入力を得て値を出力する\nper_inch = 2.54\nuser = input(\"何インチ?\")\ninch = float(user)\ncm = per_inch * inch\nprint(\"{0}inch = {1}cm\".format(inch, cm))\n\nint_i = int(12345)\nfloat_i = float(12345)\nstr_i = str(12345)\n\nprint(\"\"\"{0},{1},\n{2},{3},\n{4},{5}\"\"\".format(int_i, type(int_i), float_i, type(float_i), str_i,type(int_i), type(float_i), type(str_i)))\n# 型を変換するメソッドとして、\n# float(), int(), str() があり、型を調べるメソッドとしてtype()がある\n\n############### practice 2-3 ################\n#if文を利用する\nage = int(input(\"あなたの年齢は?\"))\n\nif age >= 80:\n print(\"大おじいさんですね\")\n\nelif age >= 60:\n print(\"おじいちゃんですね\")\n\nelif age >= 30:\n print(\"おじさんですね\")\n\nelif age >= 20:\n print(\"若者ですね\")\n\nelif age >= 10:\n print(\"ガキンチョですね\")\n\nelse:\n print(\"ちっちゃい子ですね!\")\n\n#給料計算\nmoney = int(input(\"時給はいくらですか?\"))\ntime = int(input(\"何時間働きましたか?\"))\nans = money * time\n\nprint(\"時給{0}円で、{1}時間働いたので、\".format(money, time))\nprint(\"お給料は{0}円です。\".format(ans))\n\n#BMIを測定する\nweight = int(input(\"体重を入力してください\"))\nheight = int(input(\"次に身長を入力してください\"))\n\nheight = height / 100\nbmi = weight / (height * height)\n\n\nprint(\"\\nBMI = {0}\".format(bmi))\nif bmi < 18.5:\n print(\"痩せ気味ですね\")\n\nelif (bmi < 18.5) and (bmi < 25):\n print(\"標準体型ですね\")\n\nelif (bmi < 25) and (bmi < 30):\n print(\"ちょっと太り気味かもしれませんね\")\n\nelse:\n print(\"ダイエット始めた方がいいかもしれませんね。\")\n\n\n\n# < が続くif分の時は、一番小さい値が先頭に来る\n# > が続くif分の時は、一番大きい値が先頭に来る\n\n# pythonで用いる論理演算子は、or, and, notである。\n\n\n########### test of and ############\nprint(\"\\ntest of and\\n\")\n\nif True and True:\n print(\"true\")\nelse:\n print(\"false\")\n\n\nif True and False:\n print(\"true\")\nelse:\n print(\"false\")\n\n\nif False and True:\n print(\"true\")\nelse:\n print(\"false\")\n\n\nif False and False:\n print(\"true\")\nelse:\n print(\"false\")\n\n\n############ test of or ############\nprint(\"\\ntest of or\\n\")\n\nif True or True:\n print(\"true\")\nelse:\n print(\"false\")\n\n\nif True or False:\n print(\"true\")\nelse:\n print(\"false\")\n\n\nif False or True:\n print(\"true\")\nelse:\n print(\"false\")\n\n\nif False or False:\n print(\"true\")\nelse:\n print(\"false\")\n\n############ test of not ############\nprint(\"\\ntest of not\\n\")\n\nif not True:\n print(\"true\")\nelse:\n print(\"false\")\n\nif not False:\n print(\"true\")\nelse:\n print(\"false\")\n\nprint(\"\")\n\n############### practice 2-4 ################\n# 繰り返し処理の利用\n\n# whileループ\nnum = 1\nwhile num <= 10:\n print(\"{0}回目\".format(num))\n num += 1\n\nelse:\n print(\"\")\n\n#forループ\nfor i in range(6): #6回繰り返す\n print(\"{0}回目\".format(i))\nelse:\n print(\"\")\n\nfor i in range(1, 6): #1~6まで繰り返す\n print(\"{0}回目\".format(i))\nelse:\n print(\"\")\n\nfor i in range(1, 6, 2): #1~6まで2づつ足して繰り返す\n print(\"{0}回目\".format(i))\nelse:\n print(\"\")\n\n# このように、for文の範囲指定にはrange()関数を用いる。\n\n# pythonの繰り返しにはelseを用いることができ、\n# ループ終了後の処理を指定することができる。\n\n# # range\n# for e in range(3):\n# print(e)\n\n# # リスト\n# for e in [0, 1, 2]:\n# print(e)\n\n# # タプル\n# for e in (0, 1, 2):\n# print(e)\n\n# # 集合\n# for e in {0, 1, 2}:\n# print(e)\n\n# # 辞書\n# for e in {0:'a', 1:'b', 2:'c'}:\n# print(e)\n\n# # 文字列\n# for e in '012':\n# print(e)\n# for文に関して、これらの形式のループも利用可能\n\n# pythonの本から抜粋\n# 画面に300本の縦線を書く\n\n# グラフィックライブラリを取り込む\nfrom tkinter import * # ライブラリを取り込む\n#画面の初期化\nw = Canvas(Tk(), width=830, height=445)# 900*400の画面を用意\nw.pack()#描画画面を配置する\n\n#線をたくさん引く\n# for i in range(300): # 描画処理\n# x = i * 3\n# w.create_line(x, 0, x, 400, fill=\"#FF0000\")\n\n# mainloop() # 画面を表示して待機\n# ↑これがないと一瞬だけ表示して終了してしまう\n\n#単純に線を引くプログラム\nw.create_line(0, 0, 100, 100, fill=\"#FF0000\")\nw.create_line(100, 0, 0, 100, fill=\"#FF0000\")\nw.create_line(100, 100, 100, 200, fill=\"#FF0000\")\nw.create_line(100, 200, 200, 300, fill=\"#FF0000\")\nw.create_line(200, 300, 100, 400, fill=\"#FF0000\")\nw.create_line(100, 300, 200, 400, fill=\"#FF0000\")\nw.create_line(100, 300, 0, 400, fill=\"#FF0000\")\n\n#上のやつを反転したやつ\nw.create_line(830-0, 445-0, 830-100, 445-100, fill=\"#FF0000\")\nw.create_line(830-100, 445-0, 830-0, 445-100, fill=\"#FF0000\")\nw.create_line(830-100, 445-100, 830-100, 445-200, fill=\"#FF0000\")\nw.create_line(830-100, 445-200, 830-200, 445-300, fill=\"#FF0000\")\nw.create_line(830-200, 445-300, 830-100, 445-400, fill=\"#FF0000\")\nw.create_line(830-100, 445-300, 830-200, 445-400, fill=\"#FF0000\")\nw.create_line(830-100, 445-300, 830-0, 445-400, fill=\"#FF0000\")\n\n#楽しいね\nprint(\"楽しいね\")\n\nmainloop() # ←これがないと一瞬だけ表示して終了してしまう","sub_path":"Python/Django/practice/practice2.py","file_name":"practice2.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"60257562","text":"\n# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL (http://tiny.be). All Rights Reserved\n# \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n#\n##############################################################################\n\nfrom openerp.osv import osv, fields\n\nclass product_product(osv.Model):\n _inherit = 'product.product'\n \n _columns = {\n 'product_line': fields.many2many('res.partner.category', id1='product_id', id2='category_id', domain=[('parent_id', 'ilike', 'I')], string='Product Lines'),\n }\n\nclass ProductCategory(osv.Model):\n _inherit = 'product.category'\n\n def name_get(self, cr, uid, ids, context=None):\n if isinstance(ids, (list, tuple)) and not len(ids):\n return []\n if isinstance(ids, (long, int)):\n ids = [ids]\n reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)\n res = []\n for record in reads:\n name = record['name']\n res.append((record['id'], name))\n return res","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"220836018","text":"import secrets\nfrom threading import Lock\n\nusers = list()\nusers_lock = Lock()\n\ndef register_user(name, number, contacts):\n d = dict()\n d['name'] = name; d['number'] = number; d['contacts'] = contacts\n private_token = secrets.token_hex(4)\n shared_token = secrets.token_hex(4)\n d['private'] = private_token\n d['shared'] = shared_token\n d['subscribers'] = list()\n users_lock.acquire()\n users.append(d)\n users_lock.release()\n return private_token, shared_token\n\ndef subscribe_to_token(shared_token, user_id):\n users_lock.acquire()\n for ud in users:\n if ud['shared'] == shared_token:\n ud['subscribers'].append(user_id)\n users_lock.release()\n return\n else:\n users_lock.release()\n raise IndexError\n\neq_numbers = lambda num1, num2: num1.rstrip('\\n ')[-10:] == num2.rstrip('\\n ')[-10:]\n\ndef get_user_by_number(number):\n users_lock.acquire()\n for ud in users:\n if eq_numbers(ud['number'], number):\n users_lock.release()\n return ud\n else:\n users_lock.release()\n raise IndexError\n","sub_path":"server/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"613100792","text":"import sys\n\nif len(sys.argv) != 4:\n\tprint(\"python overlap.py all.pos check.pos out\")\n\texit(0)\nfw = open(sys.argv[3],'w')\nwith open(sys.argv[1]) as fh1,open(sys.argv[2]) as fh2:\n\tdic = {}\n\tfor rows in fh2:\n\t\tif rows.startswith('chr'):continue\n\t\trow = rows.strip().split('\\t')\n\t\tchr2,start2,end2 = row[:3]\n\t\tanno = row[-1]\n\t\tif chr2 in dic:\n\t\t\tdic[chr2].append(start2+'_'+end2+'_'+anno)\n\t\telse:\n\t\t\tdic[chr2] = [start2+'_'+end2+'_'+anno]\n\tfor lines in fh1:\n\t\tif lines.startswith('chr'):continue\n\t\tline = lines.strip().split('\\t')\n\t\tchr,start,end = line[:3]\n\t\tset1 = set(range(int(start),int(end)+1))\n\t\ttmps = []\n\t\tif chr in dic:\n\t\t\tlst = dic[chr]\n\t\t\tfor i in lst:\n\t\t\t\tstart2,end2,anno = i.split('_')\n\t\t\t\tset2 = set(range(int(start2),int(end2)+1))\n\t\t\t\t#lst_tmp = list(set1.intersection(set2))\n\t\t\t\tlst_tmp = set1.intersection(set2)\n\t\t\t\tif lst_tmp:\n\t\t\t\t\ttmps = [start2,end2]\n\t\t\tif len(tmps) > 0:\n\t\t\t\ts2,e2 = tmps[0],tmps[1]\n\t\t\t\tfw.write(chr+'\\t'+start+'\\t'+end+'\\t'+'1'+'\\t'+str(s2)+'\\t'+str(e2)+'\\t'+anno+'\\n')\n\t\t\telse:\n\t\t\t\tfw.write(chr+'\\t'+start+'\\t'+end+'\\t'+'0'+'\\t'+'-'+'\\t'+'-'+'\\t'+'-'+'\\n')\n\t\t\t\n\t\telse:\n\t\t\tfw.write(chr+'\\t'+start+'\\t'+end+'\\t'+'0'+'\\t'+'-'+'\\t'+'-'+'\\t'+'-'+'\\n')\nfw.close()\n","sub_path":"overlap2.py","file_name":"overlap2.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"53733633","text":"\"\"\"\nThis module intends on creating higher level api calls for establishing an\n VnicLanConnTempl\n\"\"\"\n\nfrom ucsmsdk.ucsexception import UcsOperationError\n\ndef vnic_lan_conn_templ_create(handle, name, obj_dn=\"org-root\", \n\t\t\t\t\t\t\t admin_cdn_name=None, cdn_source=None, descr=None,\n\t\t\t\t\t\t\t ident_pool_name=None, mtu=None, \n\t\t\t\t\t\t\t nw_ctrl_policy_name=None, \n\t\t\t\t\t\t\t peer_redundancy_templ_name=None, \n\t\t\t\t\t\t\t pin_to_group_name=None, policy_owner=None,\n\t\t\t\t\t\t\t qos_policy_name=None, redundancy_pair_type=None,\n\t\t\t\t\t\t\t stats_policy_name=None, switch_id=None, \n\t\t\t\t\t\t\t target=None ,templ_type=None, **kwargs):\n\t\"\"\"\n\tcreate the vnic lann conn templ \n\t\n\tArgs:\n\t\thandle (UcsHandle)\n\t\tadmin_cdn_name (string):\n\t\tcdn_source (string): 'user-defined' or 'vnicname'\n\t\tdescr (string):\n\t\tident_pool_name (string):\n\t\tmtu (string):\n\t\tnw_ctrl_policy_name (string):\n\t\tpeer_redundancy_templ_name (string):\n\t\tpin_to_group_name (string):\n\t\tpolicy_owner (string): 'local' or 'pending-policy' or 'policy'\n\t\tqos_policy_name (string):\n\t\tredundancy_pair_type (string): 'none' or 'primary' or 'secondary'\n\t\tstats_policy_name (string):\n\t\tswitch_id (string): 'A' or 'B' or 'A-B' or 'B-A'\n\t\ttarget (string):\n\t\ttempl_type (string): 'initial-template' or 'updating-template'\n\t\t**kwargs:\n\t\t\n\tReturns:\n\t\tVnicLanConnTempl: managed object\n\t\t\n\tRaises:\n\t\tUcsOperationError: if OrgOrg is not present\n\t\t\n\tExample:\n\t\t\n\t\"\"\"\n\tfrom ucsmsdk.mometa.vnic.VnicLanConnTempl import VnicLanConnTempl\n\t\n\tobj = handle.query_dn(obj_dn)\n\tif not obj:\n\t\traise UcsOperationError(\"vnic_lan_conn_templ_create\", \"LsServer '%s' \\\n\t\t\t\t\t\t\t\tdoes not exist\" % obj_dn)\n\t\n\tmo = VnicLanConnTempl(parent_mo_or_dn=obj, name=name, \n\t\t\t\t\t\t admin_cdn_name=admin_cdn_name, cdn_source=cdn_source,\n\t\t\t\t\t\t descr=descr, ident_pool_name=ident_pool_name, \n\t\t\t\t\t\t mtu=mtu, nw_ctrl_policy_name=nw_ctrl_policy_name, \n\t\t\t\t\t\t peer_redundancy_templ_name=peer_redundancy_templ_name,\n\t\t\t\t\t\t pin_to_group_name=pin_to_group_name, \n\t\t\t\t\t\t policy_owner=policy_owner, \n\t\t\t\t\t\t qos_policy_name=qos_policy_name, \n\t\t\t\t\t\t redundancy_pair_type=redundancy_pair_type, \n\t\t\t\t\t\t stats_policy_name=stats_policy_name, \n\t\t\t\t\t\t switch_id=switch_id, target=target, \n\t\t\t\t\t\t templ_type=templ_type)\t\n\tmo.set_prop_multiple(**kwargs)\n\thandle.add_mo(mo, modify_present = True)\n\thandle.commit()\n\treturn mo\n\t\ndef vnic_lan_conn_templ_get(handle, name, obj_dn, \n\t\t\t\t\t\t\tcaller=\"vnic_lan_conn_templ_get\"):\n\t\"\"\"\n\tgets vnic lan conn templ\n\t\n\tArgs:\n\t\thandle (UcsHandle)\n\t\tname (string):\n\t\tobj_dn (string):\n\t\tcaller (string):\n\t\t\n\tReturns:\n\t\tVnicLanConnTempl: managed object\n\t\t\n\tRaises:\n\t\tUcsOperationError: if VnicLanConnTempl is not present\n\t\t\n\tExample:\n\t\t\n\t\"\"\"\n\tdn = obj_dn + \"/lan-conn-templ-\" + name \n\tmo = handle.query_dn(dn)\n\tif mo is None:\n\t\traise UcsOperationError(caller, \"VnicLanConnTempl '%s' does not exist\"\\\n\t\t\t\t\t\t\t\t% dn)\n\treturn mo\n\t\ndef vnic_lan_conn_templ_exists(handle, name, obj_dn, **kwargs):\n\t\"\"\"\n\tchecks if vnic lan conn templ exists\n\t\n\tArgs:\n\t\thandle(UcsHandle)\n\t\tls_server_dn (string):\n\t\t**kwargs: key-value pair of managed object(MO) property and value, Use\n 'print(ucscoreutils.get_meta_info().config_props)'\n to get all configurable properties of class\n\t\t\t\t \n\tReturns:\n\t\t(True/False, VnicLanConnTempl mo/None)\n\t\t\n\tRaises:\n\t\tNone\n\t\t\n\tExample:\n\t\t\n\t\"\"\"\n\ttry:\n\t\tmo = vnic_lan_conn_templ_get(handle=handle, name=name, obj_dn=obj_dn, \n\t\t\t\t\t\t\t\t\t caller=\"vnic_lan_conn_templ_exists\")\n\texcept UcsOperationError:\n\t\treturn (False, None)\n\t\n\tmo_exists = mo.check_prop_match(**kwargs)\n\treturn (mo_exists, mo if mo_exists else None)\n\t\ndef vnic_lan_conn_templ_modify(handle, name, ls_server_dn, **kwargs):\n\t\"\"\"\n\tmodifies vnic lan conn templ\n\t\n\tArgs:\n\t\thandle (UcsHandle)\n\t\tname (string):\n\t\tls_server_dn (string):\n\t\t**kwargs: key-value pair of managed object(MO) property and value, Use\n 'print(ucscoreutils.get_meta_info().config_props)'\n to get all configurable properties of class\n\t\t\t\t \n\tReturns:\n\t\tVnicLanConnTempl: managed object\n\t\t\n\tRaises:\n\t\tUcsOperationError: if VnicLanConnTempl is not present\n\t\t\n\tExample:\n\t\t\n\t\"\"\"\n\tmo = vnic_lan_conn_templ_get(handle=handle, name=name, obj_dn=obj_dn, \n\t\t\t\t\t\t\t\t caller=\"vnic_lan_conn_templ_modify\")\n\t\t\t\t\t \n\tmo.set_prop_multiple(**kwargs)\n\thandle.set_mo(mo)\n\thandle.commit()\n\treturn mo\n\t\ndef vnic_lan_conn_templ_delete(handle, name, ls_server_dn):\n\t\"\"\"\n\tdeletes vnic lan conn templ\n\t\n\tArgs:\n\t\thandle (UcsHandle)\n\t\tname (string):\n\t\tls_server_dn (string):\n\t\t\n\tReturns:\n\t\tNone\n\t\n\tRaises:\n\t\tUcsOperationError: if LsmaintAckl is not present\n\t\t\n\tExample:\n\t\t\n\t\"\"\"\n\tmo = vnic_lan_conn_templ_get(handle=handle, name=name, obj_dn=obj_dn,\n\t\t\t\t\t\t\t\t caller=\"lsmaint_ack_delete\")\n\thandle.remove_mo(mo)\n\thandle.commit()","sub_path":"ucsm_apis/service_profile/vnic_lan_conn_templ.py","file_name":"vnic_lan_conn_templ.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"271210837","text":"import subprocess\n\nf = open(\"popular-names.txt\", \"r\")\nl_ary = f.readlines()\nf.close()\n\nprint(\"n = \", end=\"\")\nn = int(input())\ns = \"\"\nfor i in range(len(l_ary)-n, len(l_ary)):\n s += l_ary[i]\n\nprint(s, end='')\n\nres = subprocess.run([\"./15.sh\", str(n)], capture_output=True, text=True)\nif s == res.stdout:\n print(\"Correct\")\nelse:\n print(\"Incorrect\")\n","sub_path":"nip100/ch02/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"528621970","text":"# 10 binden küçük fibonacci sayılarının en büyüğünü bulan algoritma\n\nfn1, fn2 = 1, 1\nfn3 = 42 #fn3 e herhangi bir değer atamamız gerekiyordu tanımlı olması için\n\nwhile(fn3<10000): #10.000den küçük olduğu yerlerde yaptırmak istediğimiz işlemleri döngüye aldık\n\n fn3 = fn1 + fn2\n fn1 = fn2\n fn2 = fn3\n\nprint(fn1) #fn3 ü fn2 ye, fn2 yi fn1 e atadığımız için fn1 i yazdırdık\n\n","sub_path":"PYTHON/DERS2-10.py","file_name":"DERS2-10.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"495123190","text":"def pi_function(s):\n n = len(s)\n pi = [0] * n\n for i in range(1, n):\n j = pi[i-1]\n while j > 0 and s[i] != s[j]:\n j = pi[j - 1]\n if s[i] == s[j]:\n j += 1\n pi[i] = j\n return pi\nprint(' '.join(map(str, pi_function(input()))))","sub_path":"pi_function.py","file_name":"pi_function.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"495021575","text":"import pandas\n\ndef add_full_name(path_to_csv, path_to_new_csv):\n #Assume you will be reading in a csv file with the same columns that the\n #Lahman baseball data set has -- most importantly, there are columns\n #called 'nameFirst' and 'nameLast'.\n #1) Write a function that reads a csv\n #located at \"path_to_csv\" into a pandas dataframe, adds a new column\n #called 'nameFull' with a players full name.\n #\n #For example:\n # for Hank Aaron, nameFull would be 'Hank Aaron', \n\t#\n\t#2) Write the data in the pandas dataFrame to a new csv file located at\n\t#path_to_new_csv\n\n #WRITE YOUR CODE HERE\n rawFile = pandas.read_csv(path_to_csv)\n rawFile['nameFull'] = rawFile['nameFirst'] + ' ' + rawFile['nameLast']\n rawFile.to_csv(path_to_new_csv)","sub_path":"python/data_science/dataWrangling/csvIO.py","file_name":"csvIO.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"384230778","text":"import logging\nimport emoji\nfrom typing import Dict\n\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update, ReplyKeyboardMarkup\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n CallbackQueryHandler,\n ConversationHandler,\n CallbackContext,\n MessageHandler,\n Filters\n)\n\n# Enable logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\n\nlogger = logging.getLogger(__name__)\n\n# Stages\nSTART, DAYS, TIME, PAX, REMARKS = range(5)\n\ndef start(update: Update, context: CallbackContext) -> None:\n \"\"\"Send message on `/start`.\"\"\"\n # Get user that sent /start and log his name\n # Build InlineKeyboard where each button has a displayed text\n # and a string as callback_data\n # The keyboard is a list of button rows, where each row is in turn\n # a list (hence `[[...]]`).\n keyboard = [\n [\n InlineKeyboardButton(\"✍ Create Meal Session\", callback_data=\"create\")],\n [ InlineKeyboardButton(\"🤝Join Meal Session\", callback_data=\"join\")],\n\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n try:\n user = update.message.from_user\n except AttributeError:\n update.callback_query.edit_message_text(text=\"*What do you want to do 🤔 ?*\", parse_mode='Markdown', reply_markup=reply_markup)\n return START\n\n context.user_data['username'] = user.first_name + ' ' + user.last_name\n logger.info(\"User %s started the conversation.\", user.first_name)\n # Send message with text and appended InlineKeyboard\n update.message.reply_text(text=\"*What do you want to do 🤔 ?*\", parse_mode='Markdown', reply_markup=reply_markup)\n # Tell ConversationHandler that we're in state `FIRST` now\n return START\n\ndef facts_to_str(user_data: Dict[str, str]) -> str:\n facts = list()\n\n for key, value in user_data.items():\n facts.append(f'{key} - {value}')\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\ndef days(update: Update, context: CallbackContext) -> int:\n query = update.callback_query\n query.answer()\n context.user_data['days'] = query.data\n keyboard = [\n [\n InlineKeyboardButton(\"📃Main Menu\", callback_data=\"main\")],\n [ InlineKeyboardButton(\"🔙Back\", callback_data=\"back\")],\n\n\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n query.edit_message_text(text=\"*Please indicate the timeframe 🕔 of the meal, following this example: 1500-1600*\",parse_mode= 'Markdown',reply_markup=reply_markup)\n\n return TIME\n\ndef time(update: Update, context: CallbackContext) -> None:\n text = update.effective_message.text\n context.user_data['time'] = text\n keyboard = [\n [ InlineKeyboardButton(\"2\", callback_data=\"2\"),\n InlineKeyboardButton(\"3\", callback_data=\"3\"),\n InlineKeyboardButton(\"4\", callback_data=\"4\"),\n InlineKeyboardButton(\"5\", callback_data=\"5\")],\n [ InlineKeyboardButton(\"🔙Back\", callback_data=\"back\")],\n [ InlineKeyboardButton(\"📃Main Menu\", callback_data=\"main\")],\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.effective_message.reply_text(\n text=\"*Choose the max number 🔢 of pax*\",parse_mode='Markdown', reply_markup=reply_markup\n )\n\n return PAX\n\n\n\n\n\n\ndef pax(update: Update, context: CallbackContext) -> None:\n query = update.callback_query\n query.answer()\n keyboard = [\n [\n InlineKeyboardButton(\"🔙Back\", callback_data=\"back\")],\n [ InlineKeyboardButton(\"📃Main Menu\", callback_data=\"main\")],\n\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n query.edit_message_text(text=\"*Additional remarks such as location 🧭 and other preferences?*\",parse_mode= 'Markdown',reply_markup=reply_markup)\n\n return REMARKS\n\ndef create(update: Update, context: CallbackContext) -> None:\n \"\"\"Show new choice of buttons\"\"\"\n query = update.callback_query\n query.answer()\n keyboard = [\n [ InlineKeyboardButton(\"Mon\", callback_data=\"mon\"),\n InlineKeyboardButton(\"Tue\", callback_data=\"tues\"),\n InlineKeyboardButton(\"Wed\", callback_data=\"wed\"),\n InlineKeyboardButton(\"Thurs\", callback_data=\"thurs\"),\n InlineKeyboardButton(\"Fri\", callback_data=\"fri\"),\n InlineKeyboardButton(\"Sat\", callback_data=\"sat\"),\n InlineKeyboardButton(\"Sun\", callback_data=\"sun\")],\n [ InlineKeyboardButton(\"📃Main Menu\", callback_data=\"main\")]\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n query.edit_message_text(\n text=\"*Choose a day in the current academic week 📅:*\",parse_mode= 'Markdown', reply_markup=reply_markup\n )\n return DAYS\n\ndef join(update: Update, context: CallbackContext) -> None:\n \"\"\"Show new choice of buttons\"\"\"\n query = update.callback_query\n query.answer()\n keyboard = [\n [\n InlineKeyboardButton(\"🧐Browse active sessions\", url='https://t.me/test_channel12333')],\n [ InlineKeyboardButton(\"📃Main Menu\", callback_data=\"main\")],\n\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n query.edit_message_text(\n text=\"*Search🔍 for available sessions!*\",parse_mode= 'Markdown', reply_markup=reply_markup\n )\n return DAYS\n\ndef end(update: Update, context: CallbackContext) -> None:\n \"\"\"Returns `ConversationHandler.END`, which tells the\n ConversationHandler that the conversation is over\"\"\"\n text = update.effective_message.text\n context.user_data['choice'] = text\n keyboard = [\n [\n InlineKeyboardButton(\"🔙Back\", callback_data=\"back\")],\n [ InlineKeyboardButton(\"📃Main Menu\", callback_data=\"main\")],\n \n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.effective_message.reply_text('*Thank you for hosting a session!*',parse_mode='Markdown',reply_markup=reply_markup)\n return ConversationHandler.END\n\n\ndef main():\n # Create the Updater and pass it your bot's token.\n updater = Updater(\"TOKEN\", use_context=True)\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # Setup conversation handler with the states FIRST and SECOND\n # Use the pattern parameter to pass CallbackQueries with specific\n # data pattern to the corresponding handlers.\n # ^ means \"start of line/string\"\n # $ means \"end of line/string\"\n # So ^ABC$ will only allow 'ABC'\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n START: [\n CallbackQueryHandler(create, pattern='^create$'),\n #MessageHandler(Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_time),\n CallbackQueryHandler(join, pattern='^join$')\n ],\n DAYS: [\n CallbackQueryHandler(days, pattern='^mon$'),\n CallbackQueryHandler(days, pattern='^tues$'),\n CallbackQueryHandler(days, pattern='^wed$'),\n CallbackQueryHandler(days, pattern='^thurs$'),\n CallbackQueryHandler(days, pattern='^fri$'),\n CallbackQueryHandler(days, pattern='^sat$'),\n CallbackQueryHandler(days, pattern='^sun$'),\n CallbackQueryHandler(start, pattern='^main$')\n ],\n TIME: [\n MessageHandler(Filters.text & ~Filters.command, time),\n CallbackQueryHandler(create, pattern='^back$'),\n CallbackQueryHandler(start, pattern='^main$')\n ],\n PAX: [\n CallbackQueryHandler(pax, pattern='^2$'),\n CallbackQueryHandler(pax, pattern='^3$'),\n CallbackQueryHandler(pax, pattern='^4$'),\n CallbackQueryHandler(pax, pattern='^5$'),\n CallbackQueryHandler(days, pattern='^back$'),\n CallbackQueryHandler(start, pattern='^main$'),\n ],\n REMARKS: [\n MessageHandler(Filters.text & ~(Filters.command | Filters.regex('^Done$')), end)\n ]\n },\n fallbacks=[CommandHandler('start', start)],\n )\n\n\n # Add ConversationHandler to dispatcher that will be used for handling\n # updates\n\n\n dispatcher.add_handler(conv_handler)\n\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"playground/bot_prototyping.py","file_name":"bot_prototyping.py","file_ext":"py","file_size_in_byte":8628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"} +{"seq_id":"442698144","text":"import discord\nfrom discord.ext import commands\nfrom datetime import datetime, timedelta\nfrom .utils import helper_functions as hf\nimport re\nimport textblob\nfrom Levenshtein import distance as LDist\nimport string\nimport asyncio\nfrom urllib.error import HTTPError\nfrom collections import Counter\nfrom inspect import cleandoc\nfrom random import choice\n\nimport os\n\ndir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nCOLOR_CHANNEL_ID = 577382927596257280\nBLACKLIST_CHANNEL_ID = 533863928263082014\nBANS_CHANNEL_ID = 329576845949534208\nMODERATING_CHANNEL_ID = 257990571103223809\nMODCHAT_SERVER_ID = 257984339025985546\nRYRY_SPAM_CHAN = 275879535977955330\nTRACEBACKS_CHAN = 554572239836545074\nJP_SERVER_ID = 189571157446492161\nSP_SERVER_ID = 243838819743432704\nCH_SERVER_ID = 266695661670367232\nCL_SERVER_ID = 320439136236601344\nRY_SERVER_ID = 275146036178059265\n\n\nENG_ROLE = {\n 266695661670367232: 266778623631949826, # C-E Learning English Role\n 320439136236601344: 474825178204078081 # r/CL Learning English Role\n}\n\n\ndef blacklist_check():\n async def pred(ctx):\n if not ctx.guild:\n return\n if ctx.author in ctx.bot.get_guild(MODCHAT_SERVER_ID).members:\n if ctx.guild.id == MODCHAT_SERVER_ID or hf.admin_check(ctx):\n return True\n\n return commands.check(pred)\n\n\nclass General(commands.Cog):\n \"\"\"My custom cog that does stuff!\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.ignored_characters = []\n hf.setup(bot)\n\n @commands.Cog.listener()\n async def on_message(self, msg):\n if msg.author.bot:\n if msg.author.id != 720900750724825138: # a window for BurdBot to post questions to AOTW\n return\n\n if not self.bot.is_ready:\n return\n\n \"\"\"BurdBot's window to open questions in #audio_of_the_week\"\"\"\n async def burdbot_window():\n if msg.channel.id != 620997764524015647: # aotw_feedback\n return\n if msg.author.id != 720900750724825138: # burdbot\n return\n if not msg.attachments:\n return\n ctx = await self.bot.get_context(msg)\n if \"AOTW recording\" in msg.content:\n await ctx.invoke(self.bot.get_command(\"question\"), args=msg.content)\n await burdbot_window()\n\n # \"\"\"Messages/pings to Rai\"\"\"\n # async def message_to_bot():\n # if msg.content:\n # if msg.content[0] == ';':\n # return\n # if (msg.channel == msg.author.dm_channel and msg.author.id not in [202995638860906496, 414873201349361664]) \\\n # or '270366726737231884' in msg.content:\n # if isinstance(msg.channel, discord.DMChannel):\n # embed = hf.green_embed(f\"DM from {msg.author.mention} \"\n # f\"({msg.author.name}#{msg.author.discriminator}) - \"\n # f\"[Jump URL]({msg.jump_url})\")\n # async for message in msg.channel.history(limit=2):\n # if 'report' in message.content.casefold() and message.author == self.bot.user:\n # return\n # else:\n # embed = hf.green_embed(f\"Ping from {msg.author.mention} \"\n # f\"({msg.author.name}#{msg.author.discriminator}) in {msg.channel.mention} \"\n # f\"({msg.guild.name}) - [Jump URL]({msg.jump_url})\")\n # if msg.content:\n # embed.add_field(name=\"Text\", value=msg.content[:1024])\n # if msg.content[1024:]:\n # embed.add_field(name=\"Text pt. 2\", value=msg.content[1024:])\n # if msg.attachments:\n # for attachment in msg.attachments:\n # embed.add_field(name=\"Attachment\", value=attachment.url)\n #\n # channel_id = str(msg.channel.id)\n # length = len(channel_id)\n # i = [channel_id[round(0 * length / 3): round(1 * length / 3)],\n # channel_id[round(1 * length / 3): round(2 * length / 3)],\n # channel_id[round(2 * length / 3): round(3 * length / 3)]]\n # color = {'r': int(i[0]) % 255, 'g': int(i[1]) % 255, 'b': int(i[2]) % 255}\n # embed.color = discord.Color.from_rgb(color['r'], color['g'], color['b'])\n #\n # spam_chan = self.bot.get_channel(RYRY_SPAM_CHAN)\n # await spam_chan.send(f\"{msg.channel.id} <@202995638860906496>\", embed=embed)\n # await message_to_bot()\n\n \"\"\"Message as the bot\"\"\"\n async def message_as_bot():\n if isinstance(msg.channel, discord.DMChannel) \\\n and msg.author.id == self.bot.owner_id and msg.content[0:3] == 'msg':\n await self.bot.get_channel(int(msg.content[4:22])).send(str(msg.content[22:]))\n\n await message_as_bot()\n\n \"\"\"Replace tatsumaki/nadeko serverinfo posts\"\"\"\n async def replace_tatsumaki_posts():\n if msg.content in ['t!serverinfo', 't!server', 't!sinfo', '.serverinfo', '.sinfo']:\n if msg.guild.id in [JP_SERVER_ID, SP_SERVER_ID, RY_SERVER_ID]:\n new_ctx = await self.bot.get_context(msg)\n await new_ctx.invoke(self.serverinfo)\n await replace_tatsumaki_posts()\n\n ##########################################\n\n if not msg.guild: # all code after this has msg.guild requirement\n return\n\n ##########################################\n\n \"antispam\"\n async def antispam_check():\n if str(msg.guild.id) in self.bot.db['antispam']:\n config = self.bot.db['antispam'][str(msg.guild.id)]\n else:\n return\n if not config['enable']:\n return\n if msg.channel.id in config['ignored']:\n return\n spam_count = 1\n\n def check(m):\n return m.guild == msg.guild and m.author == msg.author and m.content == msg.content\n while spam_count < config['message_threshhold']:\n try:\n await self.bot.wait_for('message', timeout=config['time_threshhold'], check=check)\n except asyncio.TimeoutError:\n return\n else:\n spam_count += 1\n\n reason = f\"Antispam: Sent the message `{msg.content[:400]}` {config['message_threshhold']} \" \\\n f\"times in {config['time_threshhold']} seconds.\"\n if config['action'] == 'ban':\n try:\n await msg.author.ban(reason=reason)\n except (discord.Forbidden, discord.HTTPException):\n pass\n elif config['action'] == 'kick':\n try:\n await msg.author.kick(reason=reason)\n except (discord.Forbidden, discord.HTTPException):\n pass\n elif config['action'] == 'mute':\n try:\n ctx = await self.bot.get_context(msg)\n ctx.author = self.bot.user\n await ctx.invoke(self.bot.get_command('mute'), '1h', str(msg.author.id), reason)\n if str(msg.guild.id) in self.bot.db['mod_channel']:\n mod_channel = self.bot.get_channel(self.bot.db['mod_channel'][str(ctx.guild.id)])\n if mod_channel:\n await hf.safe_send(mod_channel,\n embed=hf.red_embed(f\"Muted for 1h: {str(msg.author)} for {reason}\\n\"\n f\"[Jump URL]({msg.jump_url})\"))\n except (discord.Forbidden, discord.HTTPException):\n pass\n await antispam_check()\n\n \"automatic word filter\"\n async def wordfilter():\n if not msg.guild.me.guild_permissions.ban_members:\n return\n if str(msg.guild.id) not in self.bot.db['wordfilter']:\n return\n config = self.bot.db['wordfilter'][str(msg.guild.id)]\n if not config:\n return\n\n time_ago = datetime.utcnow() - msg.author.joined_at\n\n for filter_word in config:\n if msg.content:\n if re.search(filter_word, msg.content, flags=re.I):\n if time_ago < timedelta(minutes=int(config[filter_word])):\n reason = f\"Rai automatic word filter ban:\\n{msg.content}\"[:512]\n if len(reason) > 509:\n reason = reason[:509] + \"...\"\n try:\n await asyncio.sleep(1)\n await msg.delete()\n except (discord.Forbidden, discord.NotFound):\n pass\n try:\n asyncio.sleep(3)\n await msg.author.ban(reason=reason)\n except (discord.Forbidden, discord.HTTPException):\n pass\n await wordfilter()\n\n \"\"\"Ping me if someone says my name\"\"\"\n async def mention_ping():\n cont = str(msg.content).casefold()\n if msg.author.bot or msg.author.id == 202995638860906496:\n return\n try:\n if not msg.channel.permissions_for(msg.guild.get_member(202995638860906496)).read_messages:\n return # I ain't trying to spy on people\n except AttributeError:\n pass\n for word in cont.casefold():\n for ignored_word in ['http', ':']:\n if ignored_word in word:\n cont = cont.replace(word, \"\")\n\n found_word = False\n ignored_words = ['bryan', 'aryan', 'biryani', 'ryan gosling', 'ryan-reynold', 'ryan reynold', 'ryan_army']\n for word in ignored_words:\n if word in cont.casefold(): # why do people say these so often...\n cont = re.sub(word, '', cont, flags=re.IGNORECASE)\n if msg.guild:\n if msg.guild.id == SP_SERVER_ID:\n cont = re.sub(r'ryan', '', cont, flags=re.IGNORECASE)\n\n to_check_words = ['ryry', 'ryan', 'らいらい', 'ライライ', '来雷', '雷来']\n for word in to_check_words:\n if word in cont.casefold():\n found_word = True\n\n if found_word:\n spamChan = self.bot.get_channel(RYRY_SPAM_CHAN)\n await spamChan.send(\n f'**By {msg.author.name} in {msg.channel.mention}** ({msg.channel.name}): '\n f'\\n{msg.content}'\n f'\\n{msg.jump_url} <@202995638860906496>'[:2000])\n\n await mention_ping()\n\n \"\"\"Self mute\"\"\"\n try:\n if self.bot.db['selfmute'][str(msg.guild.id)][str(msg.author.id)]['enable']:\n try:\n await msg.delete()\n except (discord.Forbidden, discord.NotFound):\n pass\n except KeyError:\n pass\n\n \"\"\"check for servers of banned IDs\"\"\"\n async def check_guilds():\n if msg.guild.id == MODCHAT_SERVER_ID:\n async def check_user(content):\n bans_channel = msg.channel\n re_result = re.findall('(?:^| |\\n)(\\d{17,22})', content)\n users = []\n if re_result:\n for user_id in [int(user_id) for user_id in re_result]:\n if user_id == 270366726737231884:\n continue\n user = self.bot.get_user(user_id)\n if user:\n users.append(user)\n for user in users:\n await hf.ban_check_servers(self.bot, bans_channel, user, ping=False)\n\n await check_user(msg.content)\n for embed in msg.embeds:\n if embed.description:\n await check_user(embed.description)\n await check_guilds()\n\n \"\"\"chinese server banned words\"\"\"\n words = ['动态网自由门', '天安門', '天安门', '法輪功', '李洪志', 'Free Tibet', 'Tiananmen Square',\n '反右派鬥爭', 'The Anti-Rightist Struggle', '大躍進政策', 'The Great Leap Forward', '文化大革命',\n '人權', 'Human Rights', '民運', 'Democratization', '自由', 'Freedom', '獨立', 'Independence']\n if msg.guild.id in [CH_SERVER_ID, 494502230385491978, CL_SERVER_ID, RY_SERVER_ID]:\n word_count = 0\n for word in words:\n if word in msg.content:\n word_count += 1\n if word_count == 5:\n mod_channel = self.bot.get_channel(self.bot.db['mod_channel'][str(msg.guild.id)])\n log_channel = self.bot.get_channel(self.bot.db['bans'][str(msg.guild.id)]['channel'])\n if datetime.utcnow() - msg.author.joined_at < timedelta(minutes=60):\n try:\n await msg.delete()\n except discord.Forbidden:\n await hf.safe_send(mod_channel,\n f\"Rai is lacking the permission to delete messages for the Chinese \"\n f\"spam message.\")\n except discord.NotFound:\n pass\n\n # await msg.author.send(\"That message doesn't do anything to Chinese computers. It doesn't \"\n # \"get their internet shut down or get them arrested or anything. \"\n # \"It's just annoying, so please stop trying it.\")\n try:\n await asyncio.sleep(3)\n await msg.author.ban(reason=f\"__Reason__: Automatic ban: Chinese banned words spam\\n\"\n f\"{msg.content[:100]}\")\n except discord.Forbidden:\n await hf.safe_send(mod_channel,\n f\"I tried to ban someone for the Chinese spam message, but I lack \"\n f\"the permission to ban users.\")\n\n await hf.safe_send(log_channel, f\"Banned {msg.author.name} for the banned words spam message.\"\n f\"\\nMessage was posted in {msg.channel.mention}. Message:\"\n f\"\\n```{msg.content}\"[:1850] + '```')\n\n break\n else:\n await hf.safe_send(mod_channel,\n f\"Warning: {msg.author.name} may have said the banned words spam message\"\n f\"\\nMessage was posted in {msg.channel.mention}. Message:\"\n f\"\\n```{msg.content}\"[:1995] + '```')\n break\n\n \"\"\"best sex dating\"\"\"\n async def spam_account_bans():\n words = ['amazingsexdating', 'bestdatingforall', 'nakedphotos.club', 'privatepage.vip', 'viewc.site',\n 'libra-sale.io', 'ethway.io', 'omg-airdrop', 'linkairdrop', \"Airdrop Time!\"]\n try:\n for word in words:\n if word in msg.content:\n time_ago = datetime.utcnow() - msg.author.joined_at\n msg_text = f\"Bot spam message in [{msg.guild.name}] - [{msg.channel.name}] by \" \\\n f\"{msg.author.name} (joined {time_ago.seconds//3600}h \" \\\n f\"{time_ago.seconds%3600//60}m ago [{time_ago}])```{msg.content}```\"\n await self.bot.get_user(self.bot.owner_id).send(msg_text)\n if str(msg.author.guild.id) not in self.bot.db['auto_bans']:\n return\n if self.bot.db['auto_bans'][str(msg.author.guild.id)]['enable']:\n if time_ago < timedelta(minutes=20) or \\\n (msg.channel.id == 559291089018814464 and time_ago < timedelta(hours=5)):\n if msg.author.id in [202995638860906496, 414873201349361664]:\n return\n await msg.author.ban(reason=f'For posting spam link: {msg.content}',\n delete_message_days=1)\n self.bot.db['global_blacklist']['blacklist'].append(msg.author.id)\n channel = self.bot.get_channel(BLACKLIST_CHANNEL_ID)\n emb = hf.red_embed(f\"{msg.author.id} (automatic addition)\")\n emb.add_field(name=\"Reason\", value=msg.content)\n await hf.safe_send(channel, embed=emb)\n created_ago = datetime.utcnow() - msg.author.created_at\n joined_ago = datetime.utcnow() - msg.author.joined_at\n message = f\"**Banned a user for posting a {word} link.**\" \\\n f\"\\n**ID:** {msg.author.id}\" \\\n f\"\\n**Server:** {msg.author.guild.name}\" \\\n f\"\\n**Name:** {msg.author.name} {msg.author.mention}\" \\\n f\"\\n**Account creation:** {msg.author.created_at} \" \\\n f\"({created_ago.days}d {created_ago.seconds//3600}h ago)\" \\\n f\"\\n**Server join:** {msg.author.joined_at} \" \\\n f\"({joined_ago.days}d {joined_ago.seconds//3600}h ago)\" \\\n f\"\\n**Message:** {msg.content}\"\n emb2 = hf.red_embed(message)\n emb2.color = discord.Color(int('000000', 16))\n await self.bot.get_channel(BANS_CHANNEL_ID).send(embed=emb2)\n if str(msg.guild.id) in self.bot.db['bans']:\n if self.bot.db['bans'][str(msg.guild.id)]['channel']:\n channel_id = self.bot.db['bans'][str(msg.guild.id)]['channel']\n await self.bot.get_channel(channel_id).send(embed=emb2)\n return\n\n except KeyError as e:\n print(f'>>passed for key error on amazingsexdating: {e}<<')\n pass\n except AttributeError as e:\n print(f'>>passed for attributeerror in amazingsexdating: {e}<<')\n pass\n\n await spam_account_bans()\n\n \"\"\"spanish server welcome channel module\"\"\"\n async def smart_welcome(msg):\n if msg.channel.id == SP_SERVER_ID:\n content = re.sub('> .*\\n', '', msg.content.casefold()) # remove quotes in case the user quotes bot\n content = content.translate(str.maketrans('', '', string.punctuation)) # remove punctuation\n for word in ['hello', 'hi', 'hola', 'thanks', 'gracias']:\n if content == word:\n return # ignore messages that are just these single words\n if msg.content == '<@270366726737231884>': # ping to Rai\n return # ignore pings to Rai\n english_role = msg.guild.get_role(243853718758359040)\n spanish_role = msg.guild.get_role(243854128424550401)\n other_role = msg.guild.get_role(247020385730691073)\n for role in [english_role, spanish_role, other_role]:\n if role in msg.author.roles:\n return # ignore messages by users with tags already\n if datetime.utcnow() - msg.author.joined_at < timedelta(seconds=3):\n return\n\n english = ['english', 'inglés', 'anglohablante', 'angloparlante']\n spanish = ['spanish', 'español', 'hispanohablante', 'hispanoparlante', 'castellano']\n other = ['other', 'neither', 'otro', 'otra', 'arabic', 'french', 'árabe', 'francés', 'portuguese',\n 'brazil', 'portuguesa', 'brazilian']\n both = ['both', 'ambos', 'los dos']\n txt1 = ''\n language_score = {'english': 0, 'spanish': 0, 'other': 0, 'both': 0} # eng, sp, other, both\n split = content.split()\n\n def check_language(language, index):\n skip_next_word = False # just defining the variable\n for language_word in language: # language = one of the four word lists above\n for content_word in split: # content_word = the words in their message\n if len(content_word) <= 3:\n continue # skip words three letters or less\n if content_word in ['there']:\n continue # this triggers the word \"other\" so I skip it\n if skip_next_word: # if i marked this true from a previous loop...\n skip_next_word = False # ...first, reset it to false...\n continue # then skip this word\n if content_word.startswith(\"learn\") or content_word.startswith('aprend') \\\n or content_word.startswith('estud') or content_word.startswith('stud') or \\\n content_word.startswith('fluent'):\n skip_next_word = True # if they say any of these words, skip the *next* word\n continue # example: \"I'm learning English, but native Spanish\", skip \"English\"\n if LDist(language_word, content_word) < 3:\n language_score[language[0]] += 1\n\n check_language(english, 0) # run the function I just defined four times, once for each of these lists\n check_language(spanish, 1)\n check_language(other, 2)\n check_language(both, 3)\n\n num_of_hits = 0\n for lang in language_score:\n if language_score[lang]: # will add 1 if there's any value in that dictionary entry\n num_of_hits += 1 # so \"english spanish\" gives 2, but \"english english\" gives 1\n\n if num_of_hits != 1: # the bot found more than one language statement in their message, so ask again\n await msg.channel.send(f\"{msg.author.mention}\\n\"\n f\"Hello! Welcome to the server! Is your **native language**: \"\n f\"__English__, __Spanish__, __both__, or __neither__?\\n\"\n f\"¡Hola! ¡Bienvenido(a) al servidor! ¿Tu **idioma materno** es: \"\n f\"__el inglés__, __el español__, __ambos__ u __otro__?\")\n return\n\n if msg.content.startswith(';') or msg.content.startswith('.'):\n return\n\n if language_score['english']:\n txt1 = \" I've given you the `English Native` role! ¡Te he asignado el rol de `English Native`!\\n\\n\"\n try:\n await msg.author.add_roles(english_role)\n except discord.NotFound:\n return\n if language_score['spanish']:\n txt1 = \" I've given you the `Spanish Native` role! ¡Te he asignado el rol de `Spanish Native!`\\n\\n\"\n try:\n await msg.author.add_roles(spanish_role)\n except discord.NotFound:\n return\n if language_score['other']:\n txt1 = \" I've given you the `Other Native` role! ¡Te he asignado el rol de `Other Native!`\\n\\n\"\n try:\n await msg.author.add_roles(other_role)\n except discord.NotFound:\n return\n if language_score['both']:\n txt1 = \" I've given you both roles! ¡Te he asignado ambos roles! \"\n try:\n await msg.author.add_roles(english_role, spanish_role)\n except discord.NotFound:\n return\n\n txt2 = \"You can add more roles in <#703075065016877066>:\\n\" \\\n \"Puedes añadirte más en <#703075065016877066>:\\n\\n\" \\\n \"Before using the server, please read the rules in <#243859172268048385>.\\n\" \\\n \"Antes de usar el servidor, por favor lee las reglas en <#499544213466120192>.\"\n await hf.safe_send(msg.channel, msg.author.mention + txt1 + txt2)\n await smart_welcome(msg)\n\n \"\"\"mods ping on spanish server\"\"\"\n if msg.guild.id in [SP_SERVER_ID, JP_SERVER_ID]:\n if '<@&642782671109488641>' in msg.content or '<@&240647591770062848>' in msg.content:\n em = discord.Embed(title=f\"Staff Ping\",\n description=f\"From {msg.author.mention} ({msg.author.name}) \"\n f\"in {msg.channel.mention}\\n[Jump URL]({msg.jump_url})\",\n color=discord.Color(int('FFAA00', 16)),\n timestamp=datetime.utcnow())\n content = msg.content.replace('<@&642782671109488641>', '').replace('<@&240647591770062848>', '')\n if content:\n em.add_field(name=\"Content\", value=content)\n for user in self.bot.db['staff_ping'][str(msg.guild.id)]['users']:\n await hf.safe_send(self.bot.get_user(user), embed=em)\n\n if 'channel' in self.bot.db['staff_ping'][str(msg.guild.id)]:\n notif_channel = self.bot.get_channel(self.bot.db['staff_ping'][str(msg.guild.id)]['channel'])\n await hf.safe_send(notif_channel, embed=em)\n elif str(msg.guild.id) in self.bot.db['submod_channel']:\n notif_channel = self.bot.get_channel(self.bot.db['submod_channel'][str(msg.guild.id)])\n await hf.safe_send(notif_channel, embed=em)\n\n \"\"\"Replace .mute on spanish server\"\"\"\n if msg.guild.id == SP_SERVER_ID:\n if msg.content.startswith('.mute'):\n ctx = await self.bot.get_context(msg)\n if not hf.submod_check(ctx):\n return\n args = msg.content.split()[1:]\n if len(args) == 1:\n await ctx.invoke(self.bot.get_command('mute'), args[0])\n elif len(args) > 1:\n await ctx.invoke(self.bot.get_command('mute'), args[0], member=' '.join(args[1:]))\n else:\n await hf.safe_send(ctx, \"Use `;mute` instead\")\n\n \"\"\"super_watch\"\"\"\n async def super_watch():\n try:\n config = self.bot.db['super_watch'][str(msg.guild.id)]\n except KeyError:\n return\n if str(msg.author.id) in config['users']:\n desc = \"❗ \"\n which = 'sw'\n elif hf.count_messages(msg.author) < 10 and config.get('enable', None):\n minutes_ago_created = int(((datetime.utcnow() - msg.author.created_at).total_seconds()) // 60)\n if minutes_ago_created > 60 or msg.channel.id == SP_SERVER_ID:\n return\n desc = '🆕 '\n which = 'new'\n else:\n return\n\n desc += f\"**{msg.author.name}#{msg.author.discriminator}** ({msg.author.id})\"\n emb = discord.Embed(description=desc, color=0x00FFFF, timestamp=datetime.utcnow())\n emb.set_footer(text=f\"#{msg.channel.name}\")\n\n link = f\"\\n([Jump URL]({msg.jump_url})\"\n if which == 'sw':\n if config['users'][str(msg.author.id)]:\n link += f\" - [Entry Reason]({config['users'][str(msg.author.id)]})\"\n link += ')'\n emb.add_field(name=\"Message:\", value=msg.content[:2000-len(link)] + link)\n\n await hf.safe_send(self.bot.get_channel(config['channel']), embed=emb)\n await super_watch()\n\n \"\"\"Lang check: will check if above 3 characters + hardcore, or if above 15 characters + stats\"\"\"\n async def lang_check():\n lang = None\n hardcore = False\n if str(msg.guild.id) not in self.bot.stats:\n return None, False\n stripped_msg = hf.rem_emoji_url(msg)\n check_lang = False\n\n if msg.guild.id == SP_SERVER_ID and '*' not in msg.content and len(stripped_msg):\n if stripped_msg[0] not in '=;>' and len(stripped_msg) > 3:\n if msg.channel.id not in self.bot.db['hardcore'][str(SP_SERVER_ID)]['ignore']:\n hardcore_role = msg.guild.get_role(self.bot.db['hardcore'][str(SP_SERVER_ID)]['role'])\n if hardcore_role in msg.author.roles:\n check_lang = True\n hardcore = True\n\n if str(msg.guild.id) in self.bot.stats:\n if len(stripped_msg) > 15 and self.bot.stats[str(msg.guild.id)].get('enable', None):\n check_lang = True\n\n if check_lang:\n try:\n if msg.guild.id == SP_SERVER_ID and msg.channel.id != 817074401680818186:\n if hasattr(self.bot, 'langdetect'):\n lang = hf.detect_language(stripped_msg)\n else:\n return None, False\n else:\n lang = await hf.textblob_detect_language(stripped_msg)\n except (textblob.exceptions.TranslatorError, HTTPError, TimeoutError):\n pass\n return lang, hardcore\n lang, hardcore = await lang_check()\n\n \"\"\"Message counting\"\"\"\n # 'stats':\n # guild id: str:\n # 'enable' = True/False\n # 'messages' (for ,u):\n # {20200403:\n # {user id: str:\n # 'emoji': {emoji1: 1, emoji2: 3},\n # 'lang': {'eng': 25, 'sp': 30},\n # 'channels': {\n # channel id: str: 30,\n # channel id: str: 20}\n # user_id2:\n # emoji: {emoji1: 1, emoji2: 3},\n # lang: {'eng': 25, 'sp': 30},\n # channels: {\n # channel1: 40,\n # channel2: 10}\n # ...}\n # 20200404:\n # {user_id1:\n # emoji: {emoji1: 1, emoji2: 3},\n # lang: {'eng': 25, 'sp': 30},\n # channels: {\n # channel1: 30,\n # channel2: 20}\n # user_id2:\n # emoji: {emoji1: 1, emoji2: 3},\n # lang: {'eng': 25, 'sp': 30},\n # channels: {\n # channel1: 40,\n # channel2: 10}\n # ...}\n # ...\n async def msg_count():\n if msg.author.bot:\n return\n if str(msg.guild.id) not in self.bot.stats:\n return\n if not self.bot.stats[str(msg.guild.id)]['enable']:\n return\n\n config = self.bot.stats[str(msg.guild.id)]\n date_str = datetime.utcnow().strftime(\"%Y%m%d\")\n if date_str not in config['messages']:\n config['messages'][date_str] = {}\n today = config['messages'][date_str]\n author = str(msg.author.id)\n channel = str(msg.channel.id)\n\n # message count\n today.setdefault(author, {})\n today[author].setdefault('channels', {})\n today[author]['channels'][channel] = today[author]['channels'].get(channel, 0) + 1\n\n # emojis\n emojis = re.findall(':([A-Za-z0-9\\_]+):', msg.content)\n for character in msg.content:\n if hf.is_emoji(character):\n emojis.append(character)\n if hf.is_ignored_emoji(character) and character not in self.ignored_characters:\n self.ignored_characters.append(character)\n\n if emojis:\n today[author].setdefault('emoji', {})\n for emoji in emojis:\n if emoji in ['、']:\n continue\n today[author]['emoji'][emoji] = today[author]['emoji'].get(emoji, 0) + 1\n if lang: # language is detected in separate lang_check function\n today[author].setdefault('lang', {})\n today[author]['lang'][lang] = today[author]['lang'].get(lang, 0) + 1\n await msg_count()\n\n \"\"\"Ultra Hardcore\"\"\"\n await hf.uhc_check(msg)\n\n \"\"\"Chinese server hardcore mode\"\"\"\n async def cn_lang_check(check_hardcore_role=True):\n content = re.sub(\"(>>>|>) .*$\\n?\", \"\", msg.content, flags=re.M) # removes lines that start with a quote\n if len(content) > 3:\n if check_hardcore_role:\n try:\n role = msg.guild.get_role(self.bot.db['hardcore'][str(msg.guild.id)]['role'])\n except (KeyError, AttributeError):\n return\n\n if not hasattr(msg.author, 'roles'):\n return\n if role not in msg.author.roles:\n return\n\n learning_eng = msg.guild.get_role(ENG_ROLE[msg.guild.id]) # this function is only called for two guilds\n\n ratio = hf.jpenratio(content)\n if ratio is not None: # it might be \"0\" so I can't do \"if ratio\"\n if learning_eng in msg.author.roles:\n if ratio < .55:\n try:\n await msg.delete()\n except discord.errors.NotFound:\n pass\n if len(content) > 30:\n await hf.long_deleted_msg_notification(msg)\n else:\n if ratio > .45:\n try:\n await msg.delete()\n except discord.errors.NotFound:\n pass\n if len(content) > 60:\n await hf.long_deleted_msg_notification(msg)\n\n if msg.guild.id in [CH_SERVER_ID, CL_SERVER_ID]:\n try:\n if msg.channel.id in self.bot.db['forcehardcore']:\n await cn_lang_check(check_hardcore_role=False)\n\n elif msg.guild.id == CH_SERVER_ID:\n if ('*' not in msg.content\n and msg.channel.id not in self.bot.db['hardcore'][str(CH_SERVER_ID)]['ignore']):\n await cn_lang_check()\n except KeyError:\n self.bot.db['forcehardcore'] = []\n\n \"\"\"Spanish server hardcore\"\"\"\n async def spanish_server_hardcore():\n if not hardcore: # this should be set in the lang_check function\n return\n learning_eng = msg.guild.get_role(247021017740869632)\n learning_sp = msg.guild.get_role(297415063302832128)\n if learning_eng in msg.author.roles: # learning English, delete all Spanish\n if lang == 'es':\n try:\n await msg.delete()\n except discord.errors.NotFound:\n return\n if len(msg.content) > 30:\n await hf.long_deleted_msg_notification(msg)\n elif learning_sp in msg.author.roles: # learning Spanish, delete all English\n if 'holi' in msg.content.casefold():\n return\n if lang == 'en':\n try:\n await msg.delete()\n except discord.errors.NotFound:\n return\n if len(msg.content) > 30:\n await hf.long_deleted_msg_notification(msg)\n else:\n try:\n await msg.author.send(\"You have hardcore enabled but you don't have the proper \"\n \"learning role. Please attach either 'Learning Spanish' or \"\n \"'Learning English' to properly use hardcore mode, or take \"\n \"off hardcore mode using the reactions in the server rules \"\n \"page\")\n except discord.errors.Forbidden:\n pass\n await spanish_server_hardcore()\n\n \"\"\"no filter hc\"\"\"\n async def no_filter_hc():\n if msg.channel.id == 193966083886153729:\n jpRole = msg.guild.get_role(196765998706196480)\n enRole = msg.guild.get_role(197100137665921024)\n if jpRole in msg.author.roles and enRole in msg.author.roles:\n return\n ratio = hf.jpenratio(msg.content.casefold())\n nf = \"<#193966083886153729>\"\n if ratio is None:\n return\n if jpRole in msg.author.roles:\n if ratio < .55:\n try:\n await msg.delete()\n await msg.author.send(f\"I've deleted your message from {nf}. In that channel, Japanese \"\n \"people must speak English only. Here is the message I deleted:\")\n\n await msg.author.send(f\"```{msg.content[:1993]}```\")\n except (discord.errors.NotFound, discord.Forbidden):\n pass\n else:\n if ratio > .45:\n try:\n await msg.delete()\n await msg.author.send(f\"I've deleted your message from {nf}. In that channel, you must \"\n \"speak Japanese only. Here is the message I deleted:\")\n await msg.author.send(f\"```{msg.content[:1993]}```\")\n except (discord.errors.NotFound, discord.Forbidden):\n pass\n await no_filter_hc()\n\n @commands.command(hidden=True)\n @commands.bot_has_permissions(send_messages=True)\n async def help(self, ctx, *, arg=''):\n async def check_command(command):\n try:\n a = await command.can_run(ctx)\n except commands.BotMissingPermissions:\n a = False\n except commands.CheckFailure:\n a = False\n b = not command.hidden\n return a and b\n\n if arg: # user wants help on a specific command/cog\n requested = self.bot.get_command(arg)\n which = 'command'\n if not requested:\n requested = self.bot.get_cog(arg)\n which = 'cog'\n if not requested:\n await hf.safe_send(ctx, \"I was unable to find the command or command module you requested.\")\n return\n if which == 'command':\n message = f\"**;{requested.qualified_name}**\\n\"\n if requested.aliases:\n message += f\"Aliases: `{'`, `'.join(requested.aliases)}`\\n\"\n if isinstance(requested, commands.Group):\n usable_commands = sorted([c.name for c in requested.commands if await check_command(c)])\n if usable_commands:\n message += f\"Subcommands: `{'`, `'.join(usable_commands)}`\\n\" \\\n f\"Use subcommands by chaining with the command group name. For example, \" \\\n f\"`;{requested.name} {usable_commands[0]}`\\n\"\n\n message += '\\n'\n if requested.help:\n message += requested.help\n emb = hf.green_embed(cleandoc(message))\n await hf.safe_send(ctx, embed=emb)\n\n else: # requested a cog\n message = f\"**;{requested.qualified_name}**\\n\"\n c_list = sorted([c.name for c in requested.get_commands() if await check_command(c)])\n if c_list:\n message += f\"Commands: `{'`, `'.join(c_list)}`\\n\\n\"\n else:\n message += '\\n\\n'\n message += requested.description\n emb = hf.green_embed(cleandoc(message))\n await hf.safe_send(ctx, embed=emb)\n\n else: # user wants to see full command list\n cmd_dict = {}\n to_send = \"Type `;help ` for more info on any command or category. For (subcommands), chain with\" \\\n \" the parent command.\\n\\n\"\n for cog in self.bot.cogs:\n cmd_dict[cog] = []\n for command in self.bot.cogs[cog].get_commands():\n if await check_command(command):\n if isinstance(command, commands.Group):\n to_append = [command.name, [c.name for c in command.commands if await check_command(c)]]\n if to_append[1]:\n cmd_dict[cog].append(f\"`{to_append[0]}` (`{'`, `'.join(sorted(to_append[1]))}`)\")\n else:\n cmd_dict[cog].append(f\"`{to_append[0]}`\")\n else:\n cmd_dict[cog].append(f\"`{command.name}`\")\n\n for cog in sorted([name for name in cmd_dict]):\n if cmd_dict[cog]:\n to_send += f\"__**{cog}**__ {', '.join(sorted(cmd_dict[cog]))}\\n\"\n await hf.safe_send(ctx, to_send)\n\n @commands.command()\n @commands.check(lambda ctx: ctx.guild.id == 759132637414817822 if ctx.guild else False)\n async def risk(self, ctx):\n \"\"\"Typing this command will sub you to pings for when it's your turn.\"\"\"\n config = self.bot.db['risk']['sub']\n if str(ctx.author.id) in config:\n config[str(ctx.author.id)] = not config[str(ctx.author.id)]\n else:\n config[str(ctx.author.id)] = True\n if config[str(ctx.author.id)]:\n await hf.safe_send(ctx, \"You will now receive pings when it's your turn.\")\n else:\n await hf.safe_send(ctx, \"You will no longer receive pings when it's your turn.\")\n\n @commands.command()\n async def topic(self, ctx):\n \"\"\"Provides a random conversation topic.\n Hint: make sure you also answer \"why\". Challenge your friends on their answers.\n If you disagree with their answer, talk it out.\"\"\"\n topics = [line.rstrip('\\n') for line in open(f\"{dir_path}/cogs/utils/conversation_topics.txt\", 'r',\n encoding='utf8')]\n topic = choice(topics)\n while topic.startswith('#'):\n topic = choice(topics)\n try:\n await hf.safe_send(ctx, topic)\n except discord.Forbidden:\n pass\n\n @commands.command()\n @commands.is_owner()\n async def get_emojis(self, ctx):\n emojis = ctx.guild.emojis\n index = 1\n for emoji in emojis:\n if emoji.animated:\n continue\n with open(f\"{dir_path}\\emojis\\{emoji.name}.png\", 'wb') as im:\n await emoji.url.save(im)\n index += 1\n\n @commands.command()\n @commands.guild_only()\n async def inrole(self, ctx, *, role_name):\n \"\"\"Type `;inrole ` to see a list of users in a role.\"\"\"\n role_name = role_name.casefold()\n role = discord.utils.find(lambda i: i.name.casefold() == role_name, ctx.guild.roles)\n if not role:\n for i in ctx.guild.roles:\n if i.name.casefold().startswith(role_name):\n role = i\n break\n if not role:\n await hf.safe_send(ctx, \"I couldn't find the role you specified.\")\n return\n emb = discord.Embed(title=f\"**List of members in {role.name} role - {len(role.members)}**\",\n description=\"\",\n color=0x00FF00)\n members = sorted(role.members, key=lambda m: m.name.casefold())\n for member in members:\n new_desc = emb.description + f\"{member.name}#{member.discriminator}\\n\"\n if len(new_desc) < 2045:\n emb.description = new_desc\n else:\n emb.description += \"...\"\n break\n await hf.safe_send(ctx, embed=emb)\n\n @commands.group(aliases=['hc'], invoke_without_command=True)\n @commands.guild_only()\n @commands.check(lambda ctx: ctx.guild.id in [SP_SERVER_ID, CH_SERVER_ID] if ctx.guild else False)\n async def hardcore(self, ctx):\n \"\"\"Adds/removes the hardcore role from you.\"\"\"\n role = ctx.guild.get_role(self.bot.db['hardcore'][str(ctx.guild.id)]['role'])\n if role in ctx.author.roles:\n await ctx.author.remove_roles(role)\n try:\n await hf.safe_send(ctx, \"I've removed hardcore from you.\")\n except discord.Forbidden:\n pass\n else:\n await ctx.author.add_roles(role)\n await hf.safe_send(ctx, \"I've added hardcore to you. You can only speak in the language you're learning.\")\n\n @commands.command(aliases=['forcehardcore', 'forcedhardcore'])\n @commands.guild_only()\n @commands.check(lambda ctx: ctx.guild.id in [CH_SERVER_ID, CL_SERVER_ID] if ctx.guild else False)\n @commands.bot_has_permissions(manage_messages=True)\n @hf.is_admin()\n async def force_hardcore(self, ctx):\n try:\n if ctx.channel.id in self.bot.db['forcehardcore']:\n self.bot.db['forcehardcore'].remove(ctx.channel.id)\n await hf.safe_send(ctx, f\"Removed {ctx.channel.name} from list of channels for forced hardcore mode\")\n else:\n self.bot.db['forcehardcore'].append(ctx.channel.id)\n await hf.safe_send(ctx, f\"Added {ctx.channel.name} to list of channels for forced hardcore mode\")\n except KeyError:\n self.bot.db['forcehardcore'] = [ctx.channel.id]\n await hf.safe_send(ctx, f\"Created forced hardcore mode config; \"\n f\"added {ctx.channel.name} to list of channels for forced hardcore mode\")\n\n @hardcore.command()\n async def ignore(self, ctx):\n \"\"\"Ignores a channel for hardcore mode.\"\"\"\n if str(ctx.guild.id) in self.bot.db['hardcore']:\n config = self.bot.db['hardcore'][str(ctx.guild.id)]\n else:\n return\n try:\n if ctx.channel.id not in config['ignore']:\n config['ignore'].append(ctx.channel.id)\n await hf.safe_send(ctx, f\"Added {ctx.channel.name} to list of ignored channels for hardcore mode\")\n else:\n config['ignore'].remove(ctx.channel.id)\n await hf.safe_send(ctx, f\"Removed {ctx.channel.name} from list of ignored channels for hardcore mode\")\n except KeyError:\n config['ignore'] = [ctx.channel.id]\n await hf.safe_send(ctx, f\"Added {ctx.channel.name} to list of ignored channels for hardcore mode\")\n\n @hardcore.command()\n async def list(self, ctx):\n \"\"\"Lists the channels in hardcore mode.\"\"\"\n channels = []\n try:\n for channel_id in self.bot.db['hardcore'][str(ctx.guild.id)]['ignore']:\n channel = self.bot.get_channel(int(channel_id))\n if channel:\n channels.append(channel)\n else:\n self.bot.db['hardcore'][str(ctx.guild.id)]['ignore'].remove(channel_id)\n await hf.safe_send(ctx, f\"Removed {channel_id} from list of excepted channels (couldn't find it).\")\n except KeyError:\n return\n if channels:\n string = \"__List of channels excepted from hardcore__:\\n#\" + '\\n#'.join([c.name for c in channels])\n await hf.safe_send(ctx, string)\n\n @commands.group(hidden=True, aliases=['lh'], invoke_without_command=True)\n async def lovehug(self, ctx, url=None):\n \"\"\"A command group for subscribing to lovehug mangas.\"\"\"\n await ctx.invoke(self.lovehug_add, url)\n\n @lovehug.command(name='add')\n async def lovehug_add(self, ctx, url):\n \"\"\"Adds a URL to your subscriptions.\"\"\"\n search = await ctx.invoke(self.bot.get_command('lovehug_get_chapter'), url)\n if isinstance(search, str):\n if search.startswith('html_error'):\n await hf.safe_send(ctx, search)\n return\n if search.startswith('invalid_url'):\n await hf.safe_send(ctx, search)\n return\n if not search:\n await hf.safe_send(ctx, \"The search failed to find a chapter\")\n return\n if url not in self.bot.db['lovehug']:\n self.bot.db['lovehug'][url] = {'last': f\"{url}{search['href']}\",\n 'subscribers': [ctx.author.id]}\n else:\n if ctx.author.id not in self.bot.db['lovehug'][url]['subscribers']:\n self.bot.db['lovehug'][url]['subscribers'].append(ctx.author.id)\n else:\n await hf.safe_send(ctx, \"You're already subscribed to this manga.\")\n return\n await hf.safe_send(ctx, f\"The latest chapter is: {url}{search['href']}\\n\\n\"\n f\"I'll tell you next time a chapter is uploaded.\")\n\n @lovehug.command(name='remove')\n async def lovehug_remove(self, ctx, url):\n \"\"\"Unsubscribes you from a manga. Input the URL: `;lh remove `.\"\"\"\n if url not in self.bot.db['lovehug']:\n await hf.safe_send(ctx, \"No one is subscribed to that manga. Check your URL.\")\n return\n else:\n if ctx.author.id in self.bot.db['lovehug'][url]['subscribers']:\n self.bot.db['lovehug'][url]['subscribers'].remove(ctx.author.id)\n await hf.safe_send(ctx, \"You've been unsubscribed from that manga.\")\n if len(self.bot.db['lovehug'][url]['subscribers']) == 0:\n del self.bot.db['lovehug'][url]\n else:\n await hf.safe_send(\"You're not subscribed to that manga.\")\n return\n\n @lovehug.command(name='list')\n async def lovehug_list(self, ctx):\n \"\"\"Lists the manga you subscribed to.\"\"\"\n subscriptions = []\n for url in self.bot.db['lovehug']:\n if ctx.author.id in self.bot.db['lovehug'][url]['subscribers']:\n subscriptions.append(f\"<{url}>\")\n subs_list = '\\n'.join(subscriptions)\n if subscriptions:\n await hf.safe_send(ctx, f\"The list of mangas you're subscribed to:\\n{subs_list}\")\n else:\n await hf.safe_send(ctx, \"You're not subscribed to any mangas.\")\n\n @commands.command(aliases=['git'])\n @commands.bot_has_permissions(send_messages=True)\n async def github(self, ctx):\n \"\"\"Gives my github page\"\"\"\n await hf.safe_send(ctx, 'https://github.com/ryry013/Rai')\n\n @commands.command()\n @commands.bot_has_permissions(send_messages=True)\n async def punch(self, ctx, user: discord.Member = None):\n \"\"\"A punch command I made as a test\"\"\"\n if not user:\n user = ctx.author\n await hf.safe_send(ctx, \"ONE PUNCH! And \" + user.mention + \" is out! ლ(ಠ益ಠლ)\")\n\n @commands.command()\n @commands.bot_has_permissions(send_messages=True)\n async def ping(self, ctx, x=4):\n \"\"\"sends back 'hello'\"\"\"\n await hf.safe_send(ctx, str(round(self.bot.latency, x)) + 's')\n\n @commands.command()\n @commands.bot_has_permissions(send_messages=True)\n async def invite(self, ctx):\n \"\"\"Gives an invite to bring this bot to your server\"\"\"\n modchat = self.bot.get_guild(MODCHAT_SERVER_ID)\n if modchat:\n members = modchat.members\n else:\n members = []\n if ctx.author in members or ctx.author.id == self.bot.owner_id:\n await hf.safe_send(ctx, discord.utils.oauth_url(self.bot.user.id,\n permissions=discord.Permissions(permissions=27776)))\n else:\n await hf.safe_send(ctx, \"Sorry, the bot is currently not public. \"\n \"The bot owner can send you an invite link.\")\n\n @commands.Cog.listener()\n async def on_reaction_add(self, reaction, user: discord.Member):\n \"\"\"removes people from the waiting list for ;report if they react with '🚫' to a certain message\"\"\"\n async def remove_from_waiting_list():\n if reaction.emoji == '🚫':\n if user == self.bot.user:\n return\n if reaction.message.channel == user.dm_channel:\n config = self.bot.db['report']\n for guild_id in config:\n if user.id in config[guild_id]['waiting_list']:\n config[guild_id]['waiting_list'].remove(user.id)\n await user.send(\"Understood. You've been removed from the waiting list. Have a nice day.\")\n\n mod_channel = self.bot.get_channel(self.bot.db[\"mod_channel\"][guild_id])\n msg_to_mod_channel = f\"The user {user.name} was previously on the wait list for the \" \\\n f\"report room but just removed themselves.\"\n await hf.safe_send(mod_channel, msg_to_mod_channel)\n return\n await user.send(\"You aren't on the waiting list.\")\n await remove_from_waiting_list()\n\n \"I or people with manage messages permission can delete bot messages by attaching X or trash can\"\n async def delete_rai_message():\n if str(reaction.emoji) in '🗑❌':\n if reaction.message.author == self.bot.user and \\\n (user.id == self.bot.owner_id or\n reaction.message.channel.permissions_for(user).manage_messages):\n await reaction.message.delete()\n await delete_rai_message()\n\n \"Count emojis for stats\"\n def count_emojis_for_stats():\n if user.bot:\n return # ignore reactions from bots\n if not hasattr(user, 'guild'):\n return # if not in a guild\n if str(user.guild.id) not in self.bot.stats:\n return # if guild not in stats counting module\n if self.bot.stats[str(user.guild.id)]['enable']:\n try:\n emoji = reaction.emoji.name\n except AttributeError:\n emoji = reaction.emoji\n config = self.bot.stats[str(user.guild.id)]\n date_str = datetime.utcnow().strftime(\"%Y%m%d\")\n if date_str not in config['messages']:\n config['messages'][date_str] = {}\n today = config['messages'][date_str]\n today.setdefault(str(user.id), {})\n today[str(user.id)].setdefault('emoji', {})\n today[str(user.id)]['emoji'][emoji] = today[str(user.id)]['emoji'].get(emoji, 0) + 1\n count_emojis_for_stats()\n\n \"Remove reactions for if you're self muted\"\n async def remove_selfmute_reactions():\n if not reaction.message.guild:\n return\n try:\n if self.bot.db['selfmute'][str(reaction.message.guild.id)][str(user.id)]['enable']:\n try:\n await reaction.remove(user)\n except (discord.Forbidden, discord.NotFound):\n pass\n except KeyError:\n pass\n await remove_selfmute_reactions()\n\n def reactionroles_get_role(self, payload, guild):\n guild_id = str(payload.guild_id)\n message_id = str(payload.message_id)\n emoji = payload.emoji\n if guild_id in self.bot.db['reactionroles']:\n if message_id in self.bot.db['reactionroles'][guild_id]:\n if emoji.id:\n emoji_str = str(emoji.id)\n else:\n emoji_str = emoji.name\n\n if emoji_str not in self.bot.db['reactionroles'][guild_id][message_id]:\n return\n role_id = self.bot.db['reactionroles'][guild_id][message_id][emoji_str]\n role = guild.get_role(role_id)\n if not role:\n del(self.bot.db['reactionroles'][guild_id][message_id][emoji_str])\n return\n return role\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n if payload.user_id == self.bot.user.id:\n return\n\n if payload.emoji.name == '⬆':\n if payload.channel_id == BLACKLIST_CHANNEL_ID: # votes on blacklist\n channel = self.bot.get_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n ctx = await self.bot.get_context(message)\n ctx.author = self.bot.get_user(payload.user_id)\n ctx.reacted_user_id = payload.user_id\n user_id = message.embeds[0].title.split(' ')[0]\n config = self.bot.db['global_blacklist']\n if user_id not in config['votes2']:\n return\n if str(payload.user_id) in config['residency']:\n voting_guild_id = config['residency'][str(payload.user_id)]\n if voting_guild_id not in config['votes2'][user_id]['votes']:\n if message.embeds[0].color != discord.Color(int('ff0000', 16)):\n await ctx.invoke(self.blacklist_add, args=user_id)\n else:\n try:\n await hf.safe_send(ctx.author, \"Please claim residency on a server first with \"\n \"`;global_blacklist residency`\")\n except discord.Forbidden:\n await hf.safe_send(ctx, \"Please claim residency on a server first with `;global_blacklist \"\n \"residency`.\")\n return\n\n elif payload.channel_id == BANS_CHANNEL_ID:\n channel = self.bot.get_channel(BANS_CHANNEL_ID)\n message = await channel.fetch_message(payload.message_id)\n ctx = await self.bot.get_context(message)\n ctx.author = self.bot.get_user(payload.user_id)\n ctx.reacted_user_id = payload.user_id\n user_id = re.search('^.*\\n\\((\\d{17,22})\\)', message.embeds[0].description).group(1)\n try:\n reason = re.search('__Reason__: (.*)$', message.embeds[0].description, flags=re.S).group(1)\n except AttributeError as e:\n await hf.safe_send(channel, \"I couldn't find the reason attached to the ban log for addition to \"\n \"the GBL.\")\n return\n config = self.bot.db['global_blacklist']\n if str(payload.user_id) in config['residency']:\n if user_id not in config['blacklist'] and str(user_id) not in config['votes2']:\n await ctx.invoke(self.blacklist_add,\n args=f\"{user_id} {reason}\\n[Ban Entry]({message.jump_url})\")\n else:\n await hf.safe_send(ctx.author, \"Please claim residency on a server first with `;gbl residency`\")\n return\n\n if payload.emoji.name == '✅': # captcha\n if str(payload.guild_id) in self.bot.db['captcha']:\n config = self.bot.db['captcha'][str(payload.guild_id)]\n if config['enable']:\n guild = self.bot.get_guild(payload.guild_id)\n role = guild.get_role(config['role'])\n if not 'message' in config:\n return\n if payload.message_id == config['message']:\n try:\n await guild.get_member(payload.user_id).add_roles(role)\n return\n except discord.errors.Forbidden:\n await self.bot.get_user(202995638860906496).send(\n 'on_raw_reaction_add: Lacking `Manage Roles` permission'\n f' <#{payload.guild_id}>')\n\n if payload.guild_id == CH_SERVER_ID: # chinese\n if payload.emoji.name in '🔥📝🖋🗣🎙📖':\n roles = {'🔥': 496659040177487872,\n '📝': 509446402016018454,\n '🗣': 266713757030285313,\n '🖋': 344126772138475540,\n '🎙': 454893059080060930,\n '📖': 655082146494545924}\n server = 1\n else:\n return\n elif payload.guild_id == SP_SERVER_ID: # spanish/english\n if payload.emoji.name in '🎨🐱🐶🎮table👪🎥❗👚💻📔✏🔥📆':\n roles = {'🎨': 401930364316024852,\n '🐱': 254791516659122176,\n '🐶': 349800774886359040,\n '🎮': 343617472743604235,\n '👪': 402148856629821460,\n '🎥': 354480160986103808,\n '👚': 376200559063072769,\n '💻': 401930404908630038,\n '❗': 243859335892041728,\n '📔': 286000427512758272,\n '✏': 382752872095285248,\n '🔥': 526089127611990046,\n 'table': 396080550802096128,\n '📆': 555478189363822600}\n server = 2\n else:\n server = None\n else:\n server = None\n\n if not payload.guild_id:\n return\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n\n assignable_role = self.reactionroles_get_role(payload, guild)\n if assignable_role:\n try:\n await user.add_roles(assignable_role)\n except discord.Forbidden:\n pass\n\n if not user:\n return\n\n if not user.bot and server:\n try:\n config = self.bot.db['roles'][str(payload.guild_id)]\n except KeyError:\n return\n if server == 1:\n if payload.message_id != config['message']:\n return\n elif server == 2:\n if payload.message_id != config['message1'] and payload.message_id != config['message2']:\n return\n role = guild.get_role(roles[payload.emoji.name])\n try:\n await user.add_roles(role)\n except discord.errors.Forbidden:\n self.bot.get_user(202995638860906496).send(\n 'on_raw_reaction_add: Lacking `Manage Roles` permission'\n f'<#{payload.guild_id}>')\n except AttributeError:\n return\n\n @commands.Cog.listener()\n async def on_raw_reaction_remove(self, payload):\n if not payload.guild_id:\n return\n if payload.guild_id == CH_SERVER_ID: # chinese\n if not payload.emoji.name:\n return\n if payload.emoji.name in '🔥📝🖋🗣🎙📖':\n roles = {'🔥': 496659040177487872,\n '📝': 509446402016018454,\n '🗣': 266713757030285313,\n '🖋': 344126772138475540,\n '🎙': 454893059080060930,\n '📖': 655082146494545924}\n server = 1\n else:\n server = 0\n elif payload.guild_id == SP_SERVER_ID: # spanish/english\n if payload.emoji.name in '🎨🐱🐶🎮table👪🎥❗👚💻📔✏🔥📆':\n roles = {'🎨': 401930364316024852,\n '🐱': 254791516659122176,\n '🐶': 349800774886359040,\n '🎮': 343617472743604235,\n '👪': 402148856629821460,\n '🎥': 354480160986103808,\n '👚': 376200559063072769,\n '💻': 401930404908630038,\n '❗': 243859335892041728,\n '📔': 286000427512758272,\n '✏': 382752872095285248,\n '🔥': 526089127611990046,\n 'table': 396080550802096128,\n '📆': 555478189363822600}\n server = 2\n else:\n server = 0\n else:\n server = 0\n\n if not payload.guild_id:\n return\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n\n assignable_role = self.reactionroles_get_role(payload, guild)\n if assignable_role:\n try:\n await user.remove_roles(assignable_role)\n except discord.Forbidden:\n pass\n\n if server:\n if user.bot:\n return\n try:\n config = self.bot.db['roles'][str(payload.guild_id)]\n except KeyError:\n return\n if server == 1:\n if payload.message_id != config['message']:\n return\n elif server == 2:\n if payload.message_id != config['message1'] and payload.message_id != config['message2']:\n return\n role = guild.get_role(roles[payload.emoji.name])\n try:\n await user.remove_roles(role)\n except discord.errors.Forbidden:\n self.bot.get_user(202995638860906496).send(\n 'on_raw_reaction_remove: Lacking `Manage Roles` permission'\n f'<#{payload.guild_id}>')\n except AttributeError:\n return\n\n @commands.command()\n @commands.guild_only()\n @commands.bot_has_permissions(send_messages=True)\n async def pencil(self, ctx):\n \"\"\"Adds a pencil to your name. Rai cannot edit the nickname of someone above it on the role list\"\"\"\n try:\n await ctx.message.delete()\n except (discord.Forbidden, discord.NotFound):\n pass\n try:\n await ctx.author.edit(nick=ctx.author.display_name + '📝')\n msg = await hf.safe_send(ctx,\n \"I've added 📝 to your name. This means you wish to be corrected in your sentences\")\n asyncio.sleep(7)\n await msg.delete()\n except discord.errors.Forbidden:\n msg = await hf.safe_send(ctx, \"I lack the permissions to change your nickname\")\n asyncio.sleep(7)\n await msg.delete()\n except discord.errors.HTTPException:\n try:\n await ctx.message.add_reaction('💢')\n except discord.NotFound:\n pass\n\n @commands.command()\n @commands.guild_only()\n @commands.bot_has_permissions(send_messages=True)\n async def eraser(self, ctx):\n \"\"\"Erases the pencil from `;pencil`. Rai cannot edit the nicknames of users above it on the role list.\"\"\"\n try:\n await ctx.author.edit(nick=ctx.author.display_name[:-1])\n await ctx.message.add_reaction('◀')\n except discord.errors.Forbidden:\n await hf.safe_send(ctx, \"I lack the permissions to change your nickname\")\n\n @commands.command(aliases=['ryry'])\n @commands.bot_has_permissions(send_messages=True)\n async def ryan(self, ctx):\n \"\"\"Posts a link to the help docs server for my bot\"\"\"\n await hf.safe_send(ctx, \"You can find some shitty docs for how to use my bot here: \"\n \"https://github.com/ryry013/Rai/blob/master/README.md \\n\"\n \"You can ask questions and find some further details here: https://discord.gg/7k5MMpr\")\n\n @commands.command(aliases=[';p', ';s', ';play', ';skip', '_;', '-;', ')', '__;', '___;', ';leave', ';join',\n ';l', ';q', ';queue', ';pause', ';volume', ';1', ';vol', ';np', ';list'], hidden=True)\n async def ignore_commands_list(self, ctx):\n pass\n\n @commands.command(aliases=['cl', 'checklanguage'])\n @commands.bot_has_permissions(send_messages=True)\n @commands.cooldown(1, 15, type=commands.BucketType.user)\n async def check_language(self, ctx, *, msg: str):\n \"\"\"Shows what's happening behind the scenes for hardcore mode. Will try to detect the language that your\\\n message was typed in, and display the results. Note that this is non-deterministic code, which means\\\n repeated results of the same exact message might give different results every time.\n\n Usage: `;cl `\"\"\"\n stripped_msg = hf.rem_emoji_url(msg)\n if len(msg) > 900:\n await hf.safe_send(ctx, \"Please pick a shorter test message\")\n return\n if not stripped_msg:\n stripped_msg = ' '\n if ctx.guild.id in [SP_SERVER_ID, 759132637414817822]:\n probs = self.bot.langdetect.predict_proba([stripped_msg])[0]\n lang_result = f\"English: {round(probs[0], 3)}\\nSpanish: {round(probs[1], 3)}\"\n ctx.command.reset_cooldown(ctx)\n else:\n lang_result = await hf.textblob_detect_language(stripped_msg)\n str = f\"Your message:```{msg}```\" \\\n f\"The message I see (no emojis or urls): ```{stripped_msg}```\" \\\n f\"The language I detect: ```{lang_result}```\"\n await hf.safe_send(ctx, str)\n\n @commands.command(aliases=['server', 'info', 'sinfo'])\n @commands.cooldown(1, 30, type=commands.BucketType.channel)\n @commands.bot_has_permissions(send_messages=True, embed_links=True)\n async def serverinfo(self, ctx):\n \"\"\"Shows info about this server\"\"\"\n guild = ctx.guild\n if not guild:\n await hf.safe_send(ctx,\n f\"{ctx.channel}. Is that what you were looking for? (Why are you trying to call info \"\n f\"on 'this server' in a DM?)\")\n return\n em = discord.Embed(title=f\"**{guild.name}**\",\n description=f\"**ID:** {guild.id}\",\n timestamp=guild.created_at,\n colour=discord.Colour(0x877AD6))\n em.set_thumbnail(url=guild.icon_url)\n em.add_field(name=\"Region\", value=guild.region)\n em.add_field(name=\"Channels\", value=f\"{len(guild.text_channels)} text / {len(guild.voice_channels)} voice\")\n em.add_field(name=\"Verification Level\", value=guild.verification_level)\n em.add_field(name=\"Guild created on (UTC)\", value=guild.created_at.strftime(\"%Y/%m/%d %H:%M:%S\"))\n em.add_field(name=\"Number of members\", value=ctx.guild.member_count)\n\n if guild.afk_channel:\n em.add_field(name=\"Voice AFK Timeout\",\n value=f\"{guild.afk_timeout//60} mins → {guild.afk_channel.mention}\")\n\n if guild.explicit_content_filter != \"disabled\":\n em.add_field(name=\"Explicit Content Filter\", value=guild.explicit_content_filter)\n\n if guild.id not in [JP_SERVER_ID, SP_SERVER_ID]:\n em.add_field(name=\"Server owner\", value=f\"{guild.owner.name}#{guild.owner.discriminator}\")\n\n # count top 6 member roles\n if len(guild.members) < 30000:\n role_count = Counter(role.name for member in guild.members\n for role in member.roles if not role.is_default())\n\n top_six_roles = '\\n'.join(f\"{role}: {count}\" for role, count in role_count.most_common(6))\n em.add_field(name=f\"Top 6 roles (out of {len(guild.roles)})\", value=top_six_roles)\n else:\n em.add_field(name=\"Roles\", value=str(len(guild.roles)))\n\n how_long_ago = datetime.utcnow() - guild.created_at\n days = how_long_ago.days\n years = days // 365\n bef_str = ''\n if years:\n bef_str = f\"{years} years, \"\n months = (days - 365 * years) // 30.416666666666668\n if months:\n bef_str += f\"{int(months)} months, \"\n days = days - 365 * years - round(30.416666666666668 * months)\n bef_str += f\"{days} days\"\n em.set_footer(text=f\"Guild created {bef_str} ago on:\")\n if len(em.fields) % 2 == 0:\n two = em.fields[-2]\n em.add_field(name=two.name, value=two.value)\n em.remove_field(-3)\n try:\n await hf.safe_send(ctx, embed=em)\n except discord.Forbidden:\n pass\n\n @commands.group(invoke_without_command=True, aliases=['gb', 'gbl', 'blacklist'], hidden=True)\n @blacklist_check()\n async def global_blacklist(self, ctx):\n \"\"\"A global blacklist for banning spammers, requires three votes from mods from three different servers\"\"\"\n config = hf.database_toggle(ctx, self.bot.db['global_blacklist'])\n if config['enable']:\n if not ctx.me.guild_permissions.ban_members:\n await hf.safe_send(ctx,\n 'I lack the permission to ban members. Please fix that before enabling this module')\n hf.database_toggle(ctx, self.bot.db['global_blacklist'])\n return\n await hf.safe_send(ctx,\n \"Enabled the global blacklist on this server. Anyone voted into the blacklist by three \"\n \"mods and joining your server will be automatically banned. \"\n \"Type `;global_blacklist residency` to claim your residency on a server.\")\n else:\n await hf.safe_send(ctx, \"Disabled the global blacklist. \"\n \"Anyone on the blacklist will be able to join your server.\")\n\n @global_blacklist.command(name='reason', aliases=['edit'])\n @blacklist_check()\n async def blacklist_reason(self, ctx, entry_message_id, *, reason):\n \"\"\"Add a reason to a blacklist entry: `;gbl reason `\"\"\"\n blacklist_channel = self.bot.get_channel(BLACKLIST_CHANNEL_ID)\n try:\n entry_message = await blacklist_channel.fetch_message(int(entry_message_id))\n except discord.NotFound:\n await hf.safe_send(ctx, \"I couldn't find the message you were trying to edit. Make sure you link to \"\n f\"the message ID in the {blacklist_channel.mention}.\")\n return\n emb = entry_message.embeds[0]\n old_reason = emb.fields[1].value\n emb.set_field_at(1, name=emb.fields[1].name, value=reason)\n await entry_message.edit(embed=emb)\n await hf.safe_send(ctx, f\"Changed reason of {entry_message.jump_url}\\nOld reason: ```{old_reason}```\")\n\n @global_blacklist.command(name='remove', alias=['delete'])\n @blacklist_check()\n async def blacklist_remove(self, ctx, entry_message_id):\n \"\"\"Removes a voting entry from the blacklist channel.\"\"\"\n blacklist_channel = self.bot.get_channel(BLACKLIST_CHANNEL_ID)\n try:\n entry_message = await blacklist_channel.fetch_message(int(entry_message_id))\n except discord.NotFound:\n await hf.safe_send(ctx,\n f\"Message not found. If you inputted the ID of a user, please input the message ID of \"\n f\"the entry in the blacklist instead.\")\n return\n emb = entry_message.embeds[0]\n target_id = emb.title.split(' ')[0]\n\n try:\n self.bot.db['global_blacklist']['blacklist'].remove(str(target_id))\n except ValueError:\n pass\n except KeyError:\n await hf.safe_send(ctx, \"This user is not currently in the GBL.\")\n\n try:\n del self.bot.db['global_blacklist']['votes2'][str(target_id)]\n except ValueError:\n pass\n except KeyError:\n await hf.safe_send(ctx, \"This user is not currently under consideration for votes.\")\n\n await entry_message.delete()\n\n emb.color = discord.Color(int('ff00', 16))\n emb.set_field_at(0, name=\"Entry removed by\", value=f\"{str(ctx.author)}\")\n await blacklist_channel.send(embed=emb)\n\n await ctx.message.add_reaction('✅')\n\n @global_blacklist.command()\n @blacklist_check()\n async def residency(self, ctx):\n \"\"\"Claims your residency on a server\"\"\"\n config = self.bot.db['global_blacklist']['residency']\n if ctx.guild.id == MODCHAT_SERVER_ID:\n await hf.safe_send(ctx, \"You can't claim residency here. Please do this command on the server you mod.\")\n return\n\n if str(ctx.author.id) in config:\n server = self.bot.get_guild(config[str(ctx.author.id)])\n await hf.safe_send(ctx,\n f\"You've already claimed residency on {server.name}. You can not change this without \"\n f\"talking to Ryan.\")\n return\n\n await hf.safe_send(ctx,\n \"For the purpose of maintaining fairness in a ban, you're about to claim your mod residency to \"\n f\"`{ctx.guild.name}`. This can not be changed without talking to Ryan. \"\n f\"Do you wish to continue?\\n\\nType `yes` or `no` (case insensitive).\")\n msg = await self.bot.wait_for('message',\n timeout=25.0,\n check=lambda m: m.author == ctx.author and m.channel == ctx.channel)\n\n if msg.content.casefold() == 'yes': # register\n config[str(ctx.author.id)] = ctx.guild.id\n await hf.safe_send(ctx,\n f\"Registered your residency to `{ctx.guild.name}`. Type `;global_blacklist add ` to \"\n f\"vote on a user for the blacklist\")\n\n elif msg.content.casefold() == 'no': # cancel\n await hf.safe_send(ctx, \"Understood. Exiting module.\")\n\n else: # invalid response\n await hf.safe_send(ctx, \"Invalid response\")\n\n @blacklist_check()\n @global_blacklist.command(aliases=['vote'], name=\"add\")\n async def blacklist_add(self, ctx, *, args):\n \"\"\"Add people to the blacklist\"\"\"\n args = args.replace('\\n', ' ').split()\n list_of_ids = []\n reason = \"None\"\n for arg_index in range(len(args)):\n if re.search('\\d{17,22}', args[arg_index]):\n list_of_ids.append(str(args[arg_index]))\n else:\n reason = ' '.join(args[arg_index:])\n break\n channel = self.bot.get_channel(BLACKLIST_CHANNEL_ID)\n config = self.bot.db['global_blacklist']\n if not list_of_ids:\n await hf.safe_send(ctx.author, f\"No valid ID found in command\")\n return\n for user in list_of_ids:\n user_obj = self.bot.get_user(user)\n if not user_obj:\n try:\n user = user.replace('<@!', '').replace('<@', '').replace('>', '')\n user_obj = await self.bot.fetch_user(user)\n except (discord.NotFound, discord.HTTPException):\n user_obj = None\n\n async def post_vote_notification(target_user, reason):\n try:\n await ctx.message.add_reaction('✅')\n except discord.Forbidden:\n await ctx.send(\"User added to blacklist ✅\")\n if not target_user:\n target_user = ''\n emb = discord.Embed(title=f\"{user} {target_user} (1 vote)\", color=discord.Color(int('ffff00', 16)))\n emb.add_field(name='Voters', value=ctx.author.name)\n emb.add_field(name='Reason', value=reason)\n msg = await hf.safe_send(channel, embed=emb)\n await msg.add_reaction('⬆')\n return msg\n\n try: # the guild ID that the person trying to add a vote belongs to\n user_residency = config['residency'][str(ctx.author.id)] # a guild id\n except KeyError:\n await hf.safe_send(ctx.author,\n \"Please claim residency on a server first with `;global_blacklist residency`\")\n return\n\n if user in config['blacklist']: # already blacklisted\n await hf.safe_send(ctx, f\"{user} is already on the blacklist\")\n continue\n\n if user not in config['votes2']: # 0 votes\n config['votes2'][user] = {'votes': [user_residency], 'message': 0}\n msg = await post_vote_notification(user_obj, reason)\n config['votes2'][user]['message'] = msg.id\n continue\n\n if user in config['votes2']: # 1, 2, or 3 votes\n list_of_votes = config['votes2'][user]['votes']\n if user_residency in list_of_votes:\n try:\n await hf.safe_send(ctx.author, f\"{user} - Someone from your server already voted\")\n except discord.Forbidden:\n await hf.safe_send(ctx, f\"{user} - Someone from your server already voted\")\n continue\n\n message = await channel.fetch_message(config['votes2'][user]['message'])\n emb = message.embeds[0]\n title_str = emb.title\n result = re.search('(\\((.*)\\))? \\((.) votes?\\)', title_str)\n # target_username = result.group(2)\n num_of_votes = result.group(3)\n emb.title = re.sub('(.) vote', f'{int(num_of_votes)+1} vote', emb.title)\n if num_of_votes in '1': # 1-->2\n emb.title = emb.title.replace('vote', 'votes')\n if num_of_votes in '12': # 1-->2 or 2-->3\n config['votes2'][user]['votes'].append(user_residency)\n if num_of_votes == '3': # 2-->3\n emb.color = discord.Color(int('ff0000', 16))\n del config['votes2'][user]\n config['blacklist'].append(int(user))\n emb.set_field_at(0, name=emb.fields[0].name, value=emb.fields[0].value + f', {ctx.author.name}')\n await message.edit(embed=emb)\n\n @global_blacklist.command(name='list')\n @blacklist_check()\n async def blacklist_list(self, ctx):\n \"\"\"Lists the users with residencies on each server\"\"\"\n users_str = ''\n users_dict = {}\n config = self.bot.db['global_blacklist']['residency']\n for user_id in config:\n user = self.bot.get_user(int(user_id))\n guild = self.bot.get_guild(config[user_id])\n if guild in users_dict:\n users_dict[guild].append(user)\n else:\n users_dict[guild] = [user]\n for guild in users_dict:\n try:\n users_str += f\"**{guild.name}:** {', '.join([user.name for user in users_dict[guild]])}\\n\"\n except AttributeError:\n pass\n emb = discord.Embed(title=\"Global blacklist residencies\", description=\"Listed below is a breakdown of who \"\n \"holds residencies in which servers.\\n\\n\")\n emb.description += users_str\n await hf.safe_send(ctx, embed=emb)\n\n @global_blacklist.command(name=\"sub\")\n @blacklist_check()\n async def blacklist_bansub(self, ctx):\n \"\"\"Subscribes yourself to pings for your server\"\"\"\n # a list of which server IDs a user is subscribed to\n guild = self.bot.get_guild(MODCHAT_SERVER_ID)\n subbed_roles: list = self.bot.db['bansub']['user_to_role'].setdefault(str(ctx.author.id), [])\n user_role_ids = [role.id for role in ctx.author.roles if str(role.color) == \"#3498db\"] # only want blue roles\n selection_dictionary = {} # for later when the user selects a role to toggle\n guild_id_to_role: dict = self.bot.db['bansub']['guild_to_role'] # links a guild ID to the corresponding role\n role_to_guild_id = {guild_id_to_role[a]: a for a in guild_id_to_role} # reverses the dictionary\n\n # ########################## DISPLAYING CURRENT SUBSCRIPTIONS ###########################\n\n counter = 1\n if not subbed_roles:\n msg = \"You are currently not subscribed to pings for any servers.\\n\"\n else:\n msg = \"You are currently subscribed to pings for the following servers: \\n\"\n for role_id in subbed_roles: # a list of role IDs corresponding to server roles\n if role_id in user_role_ids:\n user_role_ids.remove(role_id)\n role: discord.Role = guild.get_role(role_id)\n msg += f\" {counter}) {role.name}\\n\"\n selection_dictionary[counter] = role.id\n counter += 1\n\n msg += \"\\nHere are the roles to which you're not subscribed:\\n\"\n for role_id in user_role_ids: # remaining here should only be the unsubscribed roles on the user's profile\n role: discord.Role = guild.get_role(role_id)\n msg += f\" {counter}) {role.name}\\n\"\n selection_dictionary[counter] = role.id\n counter += 1\n\n # ########################## ASK FOR WHICH ROLE TO TOGGLE ########################\n\n msg += f\"\\nTo toggle the subscription for a role, please input now the number for that role.\"\n await hf.safe_send(ctx, msg)\n try:\n resp = await self.bot.wait_for(\"message\", timeout=20.0,\n check=lambda m: m.author == ctx.author and m.channel == ctx.channel)\n except asyncio.TimeoutError:\n await hf.safe_send(ctx, \"Module timed out.\")\n return\n try:\n resp = int(resp.content)\n except ValueError:\n await hf.safe_send(ctx, \"Sorry, I didn't understand your response. Please input only a single number.\")\n return\n if resp not in selection_dictionary:\n await hf.safe_send(ctx, \"Sorry, I didn't understand your response. Please input only a single number.\")\n return\n\n # ################################ TOGGLE THE ROLE #################################\n\n role_selection: int = selection_dictionary[resp]\n if role_selection in subbed_roles:\n subbed_roles.remove(role_selection)\n await hf.safe_send(ctx, \"I've unsubcribed you from that role.\")\n else:\n # ####### Possibly match a role to a guild ########\n if role_selection not in role_to_guild_id:\n await hf.safe_send(ctx, \"Before we continue, you need to tell me which server corresponds to that role.\"\n \" We'll only need to do this once for your server. Please tell me either the \"\n \"server ID of that server, or the exact name of it.\")\n try:\n resp = await self.bot.wait_for(\"message\", timeout=20.0,\n check=lambda m: m.author == ctx.author and m.channel == ctx.channel)\n resp = resp.content\n except asyncio.TimeoutError:\n await hf.safe_send(ctx, \"Module timed out.\")\n return\n\n if re.search('^\\d{17,22}$', resp): # user specifies a guild ID\n guild = self.bot.get_guild(int(resp))\n if not guild:\n await hf.safe_send(ctx, \"I couldn't find the guild corresponding to that ID. \"\n \"Please start over.\")\n return\n else: # user probably specified a guild name\n guild = discord.utils.find(lambda g: g.name == resp, self.bot.guilds)\n if not guild:\n await hf.safe_send(ctx, \"I couldn't find the guild corresponding to that guild name. \"\n \"Please start over.\")\n return\n guild_id_to_role[str(guild.id)] = role_selection\n\n # ####### Add the role #######\n subbed_roles.append(role_selection)\n await hf.safe_send(ctx, \"I've added you to the subscriptions for that role. I'll ping you for that server.\")\n\n @global_blacklist.command(name=\"ignore\")\n @blacklist_check()\n async def blacklist_ignore(self, ctx, user_id):\n \"\"\"Types ;gbl ignore to remove a user from (or add back to) all future logging in the bans channel.\n Use this for test accounts, alt accounts, etc.\"\"\"\n try:\n user_id = int(user_id)\n if not (17 < len(str(user_id)) < 22):\n raise ValueError\n except ValueError:\n await hf.safe_send(ctx, \"Please input a valid ID.\")\n return\n if user_id in self.bot.db['bansub']['ignore']:\n self.bot.db['bansub']['ignore'].remove(user_id)\n await hf.safe_send(ctx, embed=hf.red_embed(\"I've removed that user from the ignore list.\"))\n else:\n self.bot.db['bansub']['ignore'].append(user_id)\n await hf.safe_send(ctx, embed=hf.green_embed(\"I've added that user to the ignore list for ban logging.\"))\n\n @commands.command()\n @commands.guild_only()\n async def lsar(self, ctx, page_num=1):\n \"\"\"Lists self-assignable roles (type `;lsar ` to view other pages, example: `;lsar 2`).\"\"\"\n if not ctx.guild:\n return\n roles_list = []\n config = self.bot.db['SAR'].setdefault(str(ctx.guild.id), {'0': []})\n for group in config.copy():\n if len(config[group]) == 0:\n del config[group]\n groups = sorted([int(key) for key in config])\n groups = [str(i) for i in groups]\n for group in groups:\n for role in config[group]:\n roles_list.append((group, role))\n role_list_str = f\"**There are {len(roles_list)} self-assignable roles**\\n\"\n if len(roles_list) == 1:\n role_list_str = role_list_str.replace('roles', 'role').replace('are', 'is')\n current_group = ''\n try:\n current_group = roles_list[20 * (page_num - 1)][0]\n role_list_str += f\"⟪Group {current_group}⟫\\n\"\n except IndexError:\n pass\n\n for role_tuple in roles_list[20 * (page_num - 1):20 * page_num]:\n if current_group != role_tuple[0]:\n current_group = groups[groups.index(current_group) + 1]\n role_list_str += f\"\\n⟪Group {current_group}⟫\\n\"\n\n role = ctx.guild.get_role(role_tuple[1])\n if not role:\n await ctx.send(f\"Couldn't find role with ID {role_tuple[1]}. Removing from self-assignable roles.\")\n config[current_group].remove(role_tuple[1])\n continue\n role_list_str += f\"⠀{role.name}\\n\"\n\n emb = discord.Embed(description=role_list_str, color=discord.Color(int('00ff00', 16)))\n num_of_pages = (len(roles_list)//20)+1\n footer_text = f\"{page_num} / {num_of_pages}\"\n if page_num <= num_of_pages:\n footer_text += f\" ・ (view the next page: ;lsar {page_num + 1})\"\n emb.set_footer(text=footer_text)\n await hf.safe_send(ctx, embed=emb)\n\n @commands.command(hidden=True)\n @commands.bot_has_permissions(send_messages=True, embed_links=True, manage_roles=True)\n async def i(self, ctx, *, role_name):\n if role_name[:2] == 'am':\n await ctx.invoke(self.iam, role_name=role_name[3:])\n\n @staticmethod\n def iam_find_role(ctx, r_name):\n r_name = r_name.casefold()\n found_role = discord.utils.find(lambda r: r.name.casefold() == r_name, ctx.guild.roles)\n if not found_role:\n if 3 <= len(r_name):\n found_role = discord.utils.find(lambda r: r.name.casefold().startswith(r_name), ctx.guild.roles)\n if not found_role:\n if 3 <= len(r_name) <= 6:\n found_role = discord.utils.find(lambda r: LDist(r.name.casefold()[:len(r_name)], r_name) <= 1,\n ctx.guild.roles)\n elif 6 < len(r_name):\n found_role = discord.utils.find(lambda r: LDist(r.name.casefold()[:len(r_name)], r_name) <= 3,\n ctx.guild.roles)\n return found_role\n\n @commands.command(aliases=['im'])\n @commands.bot_has_permissions(send_messages=True, embed_links=True, manage_roles=True)\n @commands.guild_only()\n async def iam(self, ctx, *, role_name):\n \"\"\"Command used to self-assign a role. Type `;iam `. Type `;lsar` to see the list of roles.\n\n You can also just type the beginning of a role name and it will find it. You can also slightly misspel it.\n\n Example: `;iam English`\"\"\"\n if not ctx.guild:\n return\n if str(ctx.guild.id) not in self.bot.db['SAR']:\n return\n config = self.bot.db['SAR'][str(ctx.guild.id)]\n role_name = role_name.casefold()\n found_role = self.iam_find_role(ctx, role_name)\n if not found_role:\n await hf.safe_send(ctx,\n embed=hf.red_embed(f\"**{str(ctx.author)}** No role found\"))\n return\n\n if found_role in ctx.author.roles:\n await hf.safe_send(ctx, embed=hf.red_embed(f\"**{str(ctx.author)}** \"\n f\"You already have that role\"))\n return\n\n for group in config:\n for role_id in config[group]:\n if found_role.id == role_id:\n await ctx.author.add_roles(found_role)\n await hf.safe_send(ctx, embed=hf.green_embed(\n f\"**{str(ctx.author)}** You now have\"\n f\" the **{found_role.name}** role.\"))\n return\n\n @commands.command(aliases=['iamn', '!iam'])\n @commands.guild_only()\n @commands.bot_has_permissions(send_messages=True, embed_links=True, manage_roles=True)\n async def iamnot(self, ctx, *, role_name):\n \"\"\"Command used to remove a self-assigned role\"\"\"\n if str(ctx.guild.id) not in self.bot.db['SAR']:\n return\n config = self.bot.db['SAR'][str(ctx.guild.id)]\n\n found_role = self.iam_find_role(ctx, role_name)\n if not found_role:\n await hf.safe_send(ctx,\n embed=hf.red_embed(f\"**{str(ctx.author)}** No role found\"))\n return\n\n if found_role not in ctx.author.roles:\n await hf.safe_send(ctx, embed=hf.red_embed(f\"**{str(ctx.author)}** \"\n f\"You don't have that role\"))\n return\n\n for group in config:\n for role_id in config[group]:\n if found_role.id == role_id:\n await ctx.author.remove_roles(found_role)\n await hf.safe_send(ctx,\n embed=hf.green_embed(\n f\"**{str(ctx.author)}** You no longer have \"\n f\"the **{found_role.name}** role.\"))\n return\n\n await hf.safe_send(ctx, embed=hf.red_embed(f\"**{str(ctx.author)}** That role is not \"\n f\"self-assignable.\"))\n\n @commands.command(aliases=['vmute', 'vm'])\n @hf.is_voicemod()\n @commands.bot_has_permissions(manage_roles=True, embed_links=True)\n async def voicemute(self, ctx, time, member=None, *, reason=None):\n \"\"\"Mutes a user. Syntax: `;voicemute
\n \n\"\"\"\n def item(target, title, highlight):\n nonlocal r\n if menu_hl is not None and menu_hl == highlight:\n r += \"↘ \" + title + \"\\n\"\n else:\n r += \"↘ \" + title + \"\\n\"\n\n def subitem(target, title):\n nonlocal r\n r += \"\" + title + \"\\n\"\n \n item(\"artiq/index.html\", \"ARTIQ\", \"artiq\")\n subitem(\"artiq/index.html\", \"Overview\")\n subitem(\"artiq/sinara.html\", \"Sinara hardware\")\n subitem(\"artiq/resources.html\", \"Resources\")\n item(\"migen/index.html\", \"Migen\", \"migen\")\n item(\"smoltcp.html\", \"smoltcp\", \"smoltcp\")\n item(\"solvespace/index.html\", \"SolveSpace\", \"solvespace\")\n item(\"ionpak.html\", \"ionpak\", \"ionpak\")\n item(\"about.html\", \"About\", \"about\")\n subitem(\"about.html\", \"Company\")\n subitem(\"office.html\", \"Office\")\n r += \"\"\"