diff --git "a/781.jsonl" "b/781.jsonl"
new file mode 100644--- /dev/null
+++ "b/781.jsonl"
@@ -0,0 +1,639 @@
+{"seq_id":"263291268","text":"from flask import json\nfrom dicttoxml import dicttoxml\nfrom xml.dom.minidom import parseString\nfrom xml.etree import ElementTree\nfrom collections import OrderedDict\nfrom workalendar.africa import SouthAfrica\nfrom datetime import datetime, timedelta\nfrom zeep import Client\nfrom passlib.hash import des_crypt\nimport hashlib\nimport configparser\nimport re\nimport pymysql\nimport threading\nimport emailclient\n\n# RUN SERVER\nhost_path = '/var/www/franc/franc/'\nmode = 'francdb'\n\n# RUN LOCALHOST\n# host_path = '/Users/tpb/dev/franc/server/'\n# mode = 'franctest'\n\n# CONFIG\nconfigParser = configparser.RawConfigParser()\nconfigParser.read(host_path+'config.ini')\n\ndbhost = configParser.get(mode,'host');\ndbuser = configParser.get(mode,'user');\ndbpswd = configParser.get(mode,'password');\ndbname = configParser.get(mode,'db');\n\nmode = 'email'\nemail_adm = configParser.get(mode,'email_adm')\nemail_err = configParser.get(mode,'email_err')\nemail_hlp = configParser.get(mode,'email_hlp')\nemail_usr = configParser.get(mode,'email_usr')\nemail_pwd = configParser.get(mode,'email_pwd')\nemail_host = configParser.get(mode,'email_host')\nemail_port = configParser.get(mode,'email_port')\n\nmode = 'debit'\ndebit_api = configParser.get(mode,'debit_api')\ndebit_mno = configParser.get(mode,'debit_mno')\ndebit_aid = configParser.get(mode,'debit_aid')\n\nmode = 'enum'\nfund_enum = json.loads(configParser.get(mode,'fund_enum'))\ntranstype_enum = json.loads(configParser.get(mode,'transtype_enum'))\ntransstatus_enum = json.loads(configParser.get(mode,'transstatus_enum'))\n\n# EMAIL CLIENT\neclient = emailclient.EmailClient(email_host, email_port, email_usr, email_pwd)\n\nif __name__ == '__main__':\n\n # INIT\n db = 0\n tid = 0\n todaydate = datetime.today()\n nowstr = (datetime.today()).strftime('%Y-%m-%d %H:%M:%S')\n todaystr = (datetime.today()).strftime('%Y-%m-%d')\n # actiondate = datetime.today()+timedelta(days=7)\n actiondate = datetime.today()+timedelta(days=7)\n cal = SouthAfrica()\n\n try: \n debitday = str(todaydate.day)\n\n db = pymysql.connect(dbhost,dbuser,dbpswd,dbname)\n cursor = db.cursor()\n query = \"SELECT u.id uid, u.firstname, u.lastname, d.amount, d.debitday, d.split, b.id bid, b.bank, b.accno, b.branch, c.balance cash, e.balance equity FROM (SELECT * FROM (SELECT uid, MAX(reg_date) AS reg_date FROM debitorders WHERE debitday=5 AND active=TRUE GROUP BY uid) as p JOIN debitorders USING (uid, reg_date)) AS d, (SELECT * FROM (SELECT uid, MAX(reg_date) AS reg_date FROM bankacc GROUP BY uid) as x LEFT JOIN bankacc USING (uid, reg_date)) AS b, (SELECT uid, balance FROM portfolio WHERE fid=1) AS c, (SELECT uid, balance FROM portfolio WHERE fid=2) AS e, users u WHERE d.uid=u.id AND d.uid=b.uid AND d.uid=c.uid AND d.uid=e.uid\".format(actiondate,fund_enum.index('cash')+1,fund_enum.index('equity')+1)\n print(query)\n cursor.execute(query)\n fields = [descr[0] for descr in cursor.description]\n results = [dict(zip(fields,row)) for row in cursor.fetchall()]\n \n print('{0}: {1} debitorders'.format(todaystr,len(results)))\n\n if len(results)>0:\n \n # ACTIONDATE\n while not cal.is_working_day(actiondate):\n actiondate += timedelta(days=1) \n \n client = Client(debit_api)\n\n header = OrderedDict([('merchantno', debit_mno), \\\n ('applicationid', debit_aid), \\\n ('servicetype', 3), \\\n ('totaltransactions', len(results)), \\\n ('firstactiondate', actiondate.strftime('%y%m%d')), \\\n ('lastactiondate', actiondate.strftime('%y%m%d')), \\\n ('merchantcellnotify', ''), \\\n ('merchantemailnotify',email_adm)])\n\n debitorder = OrderedDict([('header', header)])\n\n debittotal = 0\n seqno = 0\n ntrans = len(results)\n for t in results:\n\n trans='{0}-{1}-{2}-{3}'.format(t['uid'],t['amount'],t['bid'],todaystr)\n print(trans)\n # hash_object = hashlib.md5(trans.encode())\n # detail=hash_object.hexdigest()\n detail = des_crypt.hash(trans)\n print(detail)\n\n cash=round(t['amount']*(100-t['split'])/100)\n print('Cash deposit: R{0}'.format(cash))\n transaction=(t['uid'],t['bid'],fund_enum.index('cash')+1,transtype_enum.index('Debitorder')+1,cash,t['cash'],transstatus_enum.index('Submitted')+1,detail,nowstr)\n query=\"INSERT INTO transactions (uid, bid, fid, type, amount, balance, status, detail, reg_date) VALUES ({0},{1},{2},{3},{4},{5},{6},'{7}','{8}');\".format(*transaction)\n print(query)\n cursor.execute(query)\n db.commit()\n\n equity=round(t['amount']*t['split']/100)\n print('Equity deposit: R{0}'.format(equity))\n transaction=(t['uid'],t['bid'],fund_enum.index('equity')+1,transtype_enum.index('Debitorder')+1,equity,t['equity'],transstatus_enum.index('Submitted')+1,detail,nowstr)\n query=\"INSERT INTO transactions (uid, bid, fid, type, amount, balance, status, detail, reg_date) VALUES ({0},{1},{2},{3},{4},{5},{6},'{7}','{8}');\".format(*transaction)\n print(query)\n cursor.execute(query)\n db.commit()\n\n debittotal += t['amount']\n accountholder = t['firstname'] + ' ' + t['lastname']\n transaction = OrderedDict([('sequenceno', seqno+1), \\\n ('branchcode', t['branch']), \\\n ('accounttype', 1), \\\n ('accountno', t['accno']), \\\n ('debitamount', t['amount']),\\\n ('debitdate', actiondate.strftime('%y%m%d')),\\\n ('debitreference',detail),\\\n ('accountholder', accountholder),\\\n ('debitcellnotify', ''), \\\n ('debitemailnotify', ''), \\\n ('transactionrefno', detail)])\n debitorder.update(OrderedDict([('transaction%d'%(seqno), transaction)]))\n seqno+=1\n\n footer = OrderedDict([('totaltransactions', ntrans), \\\n ('firstactiondate', actiondate.strftime('%y%m%d')), \\\n ('lastactiondate', actiondate.strftime('%y%m%d')), \\\n ('debittotal', debittotal)])\n\n debitorder.update(OrderedDict({'footer':footer}))\n\n xml = dicttoxml(debitorder, custom_root='debitorder', attr_type=False)\n dom = parseString(xml)\n xml = dom.toxml()\n xml = re.sub(r\"transaction\\d+\", \"transaction\", xml)\n print(xml)\n\n filename = \"debit-%s.xml\"%(actiondate.strftime('%y%m%d'))\n print('Archving: '+filename) \n f = open(host_path+'assets/archive/debitorders/'+filename,\"w+\")\n f.write(xml)\n f.close()\n\n result = client.service.uploadDebitFile(xml)\n\n el = ElementTree.XML(result).find(\"transactionreference\")\n\n if el is not None: \n\n tref = el.text\n print('[INFO] Debit upload ref {0}'.format(tref))\n release = OrderedDict([('merchantno', debit_mno), \\\n ('applicationid', debit_aid), \\\n ('referenceno', tref)])\n xml = dicttoxml(release, custom_root='dorelease', attr_type=False)\n print(xml)\n result = client.service.releaseDebitFile(xml)\n success = ElementTree.XML(result).find(\"result\").text\n\n if success=='1':\n\n query = \"UPDATE transactions SET ref='{0}', status={1} WHERE type={2} and reg_date=STR_TO_DATE('{3}','%Y-%m-%d %H:%i:%s');\".format(tref,transstatus_enum.index('Processing')+1,transtype_enum.index('Debitorder')+1,nowstr)\n print(query)\n cursor.execute(query)\n tid = str(cursor.lastrowid)\n db.commit()\n\n subject = '[SUCCESS] Debit release'\n print(subject)\n body = 'Actiondate: {0}\\nNo transaction {1}\\nTotal: {2}'.format(actiondate.strftime('%y%m%d'),ntrans,debittotal)\n print(body)\n t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_adm))\n t.daemon = True\n t.start() \n\n else:\n subject = '[ERROR] Debit release'\n print(subject)\n body = ElementTree.XML(result).find('description').text\n print(body)\n t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_adm))\n t.daemon = True\n t.start() \n\n else:\n subject = '[ERROR] Debit upload'\n print(subject)\n body = ElementTree.XML(result).find('debituploaderror').find('description').text\n t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_adm))\n t.daemon = True\n t.start() \n\n except Exception as ex:\n query = \"DELETE FROM transactions WHERE type={0} and reg_date=STR_TO_DATE('{1}','%Y-%m-%d %H:%i:%s');\".format(transtype_enum.index('Debitorder')+1,nowstr)\n print(query)\n cursor.execute(query)\n db.commit()\n subject = '[ERROR] Debitorder'\n print(subject)\n body = '{0}'.format(ex)\n print(body)\n # t = threading.Thread(target=eclient.sendemail, args=(subject, body, email_err))\n # t.daemon = True\n # t.start() \n\n finally:\n if (db):\n db.close()\n\n\n","sub_path":"server/clearance.py","file_name":"clearance.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"338676179","text":"#!/usr/bin/python\n# Filename: pr15.py\n\ngridSize = 20;\n\ngrid = list();\nfor i in range(gridSize + 1):\n grid.append(1);\n\nfor i in range(1, gridSize):\n for j in range(0, gridSize):\n grid[j] = sum(grid[j:]);\n\nprint(sum(grid));\n","sub_path":"pr15.py","file_name":"pr15.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"472281760","text":"import os\nimport urllib.request\nimport urllib.parse\nfrom bs4 import BeautifulSoup\n\nQUERY = str(input(\"SEARCH : \"))\n\nfile = open('./Crawler_KISTI_'+QUERY+'.txt','w', encoding='utf-8')\n\nArr_URL = ['Default', 'Key', 'Target', 'Search', 'Count', 'Start', 'Sort', 'Type', 'Res', 'Query']\n\nArr_Value = ['http://openapi.ndsl.kr/itemsearch.do', '?keyValue=03323430', '&target=ARTI', '&searchField=BI', '&displayCount=100', '&startPosition=1', '&sortby=', '&returnType=xml', '&responseGroup=advance', '&query='+QUERY]\n\nURL=''\n\nfor arr in range(len(Arr_URL)):\n URL += Arr_Value[arr]\nprint(URL)\nheaders = {\n'Host': 'openapi.ndsl.kr',\n'Connection': 'keep-alive',\n'Upgrade-Insecure-Requests': 1,\n'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36',\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',\n'Cookie': '_ga=GA1.2.886947753.1522289626; WMONID=sRG64EHzxAF; JSESSIONID=aeEYqiOaIN5u1jVvsSp3Ru9n0imSEBf7AkwuFBwWXNlzDQrZM1lI2itgzZmg6Nfs.ar228_servlet_engine12'\n}\nreq=urllib.request.Request(URL, headers=headers)\nf = urllib.request.urlopen(req)\nresultXML = f.read()\nxmlsoup = BeautifulSoup(resultXML, 'html.parser')\nitems = xmlsoup.find_all('dissertation')\nfor item in items:\n print(item.dissertationtitle)\n file.write('-------------------------------\\n')\n file.write(' 제목 : '+ item.dissertationtitle.get_text(strip=1)+'\\n')\n file.write(' 내용 : '+ item.abstract.get_text(strip=1)+'\\n')\n file.write(' 링크 : '+ item.deeplink.get_text(strip=1)+'\\n')#본문의 링크를 가져오기 위해\n file.write('-------------------------------\\n')\nfile.close()\n'''\nDefault_URL = 'http://openapi.ndsl.kr/itemsearch.do'\nKey_URL = '?keyValue=03323430'\nTarget_URL = '&target=ARTI'#ARTI=all/NART=학위제외/JAKO=국내/JAFO=해외/CFKO=국내회의/CFFO=해외회의/DIKO=국내학위\nSearch_URL = '&searchField=BI'#BI=all/TI=제목/SO=저널명/KW=키워드\nCount_URL = '&displayCount=100'\nStart_URL = '&startPosition=1'\nSort_URL = '&sortby='#default=정확도/title=논문명/jtitle=저널명/pubYear=발행일\nType_URL = '&returnType=xml'#xml/json\nRes_URL = '&responseGroup=advance'#simple=URLx/advance=URL\nQuery_URL = '&query='+QUERY#검색질의어(UTF-8)\n'''\n","sub_path":"Crawler_KISTI.py","file_name":"Crawler_KISTI.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"512248827","text":"# 1. Ref: https://www.jiuzhang.com/solution/two-sum-iv-input-is-a-bst/#tag-highlight-lang-python\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def findTarget(self, root: TreeNode, n: int) -> bool:\n if not root:\n return\n self.res = None\n node_set = set()\n self.inorder(root, n, node_set)\n return bool(self.res)\n \n def inorder(self, root, n, node_set):\n if not root:\n return\n self.inorder(root.left, n, node_set)\n if root.val in node_set:\n self.res = [n-root.val, root.val]\n else:\n node_set.add(n-root.val)\n self.inorder(root.right, n, node_set)","sub_path":"653_two_sum_iv.py","file_name":"653_two_sum_iv.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"424498824","text":"\n# -*- coding: utf-8 -*- \n\nimport sys\nimport __builtin__\nimport os\n\n\n# Globals\n#\n__builtin__.__dict__['cout'] = sys.stdout.write\n__builtin__.__dict__['flushout'] = sys.stdout.flush\n__builtin__.__dict__['cerr'] = sys.stderr.write\n__builtin__.__dict__['dbg'] = id\n\n\n# Platform adaptations\n#\nimport time\nimport platform\nsystem = platform.system()\nif system == \"Linux\":\n# __builtin__.__dict__['_'] = lambda x: x.encode('utf8')\n __builtin__.__dict__['_'] = lambda x: x\n __builtin__.__dict__['timer'] = time.time\nelif system == \"Windows\":\n __builtin__.__dict__['_'] = lambda x: x.encode('cp850')\n __builtin__.__dict__['timer'] = time.clock\nelse:\n# print \"Unknown platform %s\\n\" % system\n __builtin__.__dict__['_'] = lambda x: x\n __builtin__.__dict__['timer'] = time.clock\n\n\n# Trace function\n#\ntracetime = timer()\n\n# Disable trace\n#\nx_trace = False\n\ndef enable_trace():\n global x_trace\n x_trace = True\n\ndef disable_trace():\n global x_trace\n x_trace = False\n\ndef trace(arg=\"\"):\n global x_trace\n if not x_trace:\n return\n import inspect\n caller = inspect.stack()[1]\n global tracetime\n t = timer()\n dt = max(0, round((t-tracetime)*1000))\n tracetime = t\n cout(\"%08dms %s() %s:%d: %s\\n\" % \\\n (dt, caller[3], os.path.basename(caller[1]),\n caller[2], arg))\n__builtin__.__dict__['trace'] = trace\n__builtin__.__dict__['enable_trace'] = enable_trace\n__builtin__.__dict__['disable_trace'] = disable_trace\n\n\n# Print error message and exit with error code 1\n#\ndef die(s):\n sys.stderr.write(\"\\nFatal: \"+s+'\\n')\n sys.exit(1)\n\n__builtin__.__dict__['die'] = die\n","sub_path":"python/premain.py","file_name":"premain.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"486395530","text":"import discord\nfrom redbot.core import commands\nimport random\nimport re\nfrom functools import reduce\n\nBaseCog = getattr(commands, \"Cog\", object)\n\n\nclass Zalgo(BaseCog):\n def __init__(self, bot):\n self.bot = bot\n\n async def april_fools(message):\n # Prevent acting on DM's\n if (\n random.random() <= 0.999\n or (message.guild is None)\n or message.guild.name.lower() != \"cortex\"\n ):\n return\n\n clean_message = message.clean_content.lower()\n # MM: Added so list instead of string\n message_split = clean_message.split(\" \")\n # BLACKLIST CHANNELS\n blacklist = [\n \"news\",\n \"rpg\",\n \"events\",\n \"recommends\",\n \"politisophy\",\n \"eyebleach\",\n \"weeb-lyfe\",\n \"out-of-context\",\n \"jokes\",\n \"anime-club\",\n ]\n\n message_channel = message.channel.name.lower()\n\n if (\n # DO NOT RESPOND TO SELF MESSAGES\n (bot.user.id == message.author.id or message.content.startswith(\".\"))\n or (message.channel.name is None)\n or (\n reduce(\n lambda acc, n: acc or (n == message_channel), blacklist, False\n )\n )\n or (\"thank\" in clean_message)\n or (\"http\" in clean_message)\n ):\n return\n\n ctx = await bot.get_context(message)\n\n new_msg = self.uwuify(message.content)\n\n await ctx.message.delete()\n await ctx.send(new_msg)\n\n self.bot.add_listener(april_fools, \"on_message\")\n\n @commands.command()\n async def zalgo(self, ctx):\n \"\"\"Zalgo the text\"\"\"\n # first pull out the .zalgo part of the message\n raw_msg = \" \".join(ctx.message.clean_content.split(\" \")[1:])\n if raw_msg == \"\":\n raw_msg = \"HE COMES\"\n\n # random intensity\n intensity = random.randint(50, 150)\n\n # zalgo characters to fuck with\n zalgo_chrs = [chr(x) for x in range(0x0300, 0x036F + 1)]\n zalgo_chrs += [\"\\u0488\", \"\\u0489\"]\n\n msg_array = list(raw_msg)\n for i in range(intensity):\n index = random.randint(0, len(msg_array) - 1)\n msg_array.insert(index, random.choice(zalgo_chrs))\n\n zalgo_msg = \"\".join(msg_array)\n\n await ctx.message.delete()\n await ctx.send(zalgo_msg)\n\n def uwuify(self, msg):\n replacements = {\n \"r\": \"w\",\n \"R\": \"W\",\n \"l\": \"w\",\n \"L\": \"W\",\n \"this\": \"dis\",\n \"This\": \"Dis\",\n \"they\": \"dey\",\n \"They\": \"Dey\",\n \"there\": \"dere\",\n \"There\": \"Dere\",\n \"the\": \"da\",\n \"The\": \"Da\",\n }\n\n new_msg = msg\n for regex, replacement in replacements.items():\n new_msg, _ = re.subn(regex, replacement, new_msg)\n\n new_msg += \" *uwu*\"\n\n return new_msg\n\n @commands.command()\n async def uwu(self, ctx):\n \"\"\"uwu the text\"\"\"\n # first pull out the .zalgo part of the message\n raw_msg = \" \".join(ctx.message.content.split(\" \")[1:])\n if raw_msg == \"\":\n raw_msg = \"uwu\"\n\n new_msg = self.uwuify(raw_msg)\n\n await ctx.message.delete()\n await ctx.send(new_msg)\n\n @commands.command()\n async def spoilerify(self, ctx, *msg):\n new_msg = []\n do_it = False\n for word in msg:\n if do_it:\n new_msg.append(f\"||{word}||\")\n else:\n new_msg.append(word)\n do_it = not do_it\n new_msg = \" \".join(new_msg)\n\n await ctx.message.delete()\n await ctx.send(new_msg)\n","sub_path":"zalgo/zalgo.py","file_name":"zalgo.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"650451830","text":"\"\"\"\nA script having a set of simple functions to check graph properties.\n\n\"\"\"\n\n\nclass UndirectedGraph:\n def __init__(self, V, E):\n self.V = V\n self.E = E\n\n def __str__(self):\n graph_str = ''\n for start in self.E.keys():\n for stop in self.E[start]:\n graph_str += start + ' ' + stop + '\\n'\n return graph_str\n \n \ndef check_vertex_cover(G, S):\n \"\"\"\n Returns True if set S is a vertex cover of the edge set E,\n False otherwise.\n S is implemented as a set\n E is implemented as an adjacency list mapping a vertex to a list\n of edges\n \"\"\"\n\n for start in G.E.keys():\n if start in S:\n # vertex start is already in the cover\n # so we needn't check its corresponding edges\n continue\n for end in G.E[start]:\n if not (end in S):\n return False\n return True\n","sub_path":"GraphUtil.py","file_name":"GraphUtil.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"455997934","text":"import logging\n\n\nclass Browser:\n \"A class to fetch reports and dispatch to actors.\"\n def __init__(self, actors, subreddit, db, cursor):\n self.actors = actors\n self.subreddit = subreddit\n self.db = db\n self.cursor = cursor\n\n def check_command(self, command, mod, post):\n \"Check if any actor matches this report.\"\n for actor in self.actors:\n actor.parse(command, mod, post)\n\n def reports(self):\n \"\"\"Generator for mod reports in a subreddit.\n\n Yields tuple of report, mod name, and target.\n\n \"\"\"\n try:\n for post in self.subreddit.mod.reports(limit=None):\n for mod_report in post.mod_reports:\n yield (str(mod_report[0]), mod_report[1], post)\n except Exception as e:\n logging.error(\"Error fetching reports: {err}\".format(err=e))\n\n def run(self):\n \"Fetch reports and dispatch to actors.\"\n for command, mod, post in self.reports():\n self.check_command(command, mod, post)\n for actor in self.actors:\n actor.after()\n","sub_path":"bernard/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"488297317","text":"from dao import log_dao\nfrom handlers import installationHandler, logHandler\nfrom datetime import datetime\nimport time, config\n\n\ndef set_status_for_customers(customers):\n for customer in customers:\n set_status_for_customer(customer)\n\n\ndef set_status_for_customer(customer):\n # assume false\n customer['connected'] = False\n installations = installationHandler.get_by_customer(customer.get('id'))\n if installations:\n one_connected = set_status_for_installations(installations)\n customer['connected'] = one_connected\n customer['warnings'] = logHandler.get_warnings_customer(customer)\n customer['alarms'] = logHandler.get_alarms_customer(customer)\n\n\ndef set_status_for_installations(installations):\n one_connected = False\n for installation in installations:\n if set_status_for_installation(installation):\n one_connected = True\n return one_connected\n\n\ndef set_status_for_installation(installation):\n # assume false\n connected = installationHandler.get_connection_status_for(installation.get('serial_number'))\n installation['connected'] = connected\n installation['warnings'] = logHandler.get_warnings_installation(installation)\n installation['alarms'] = logHandler.get_alarms_installation(installation)\n return connected\n\n\ndef set_status_for_controllers(controllers):\n d2 = datetime.now()\n d2_ts = time.mktime(d2.timetuple())\n for controller in controllers:\n set_status_for_controller(controller, d2_ts)\n\n\ndef set_status_for_controller(controller, d2_ts=None):\n set_status_for_entity(controller, d2_ts)\n controller['warnings'] = logHandler.get_warnings_controller(controller)\n controller['alarms'] = logHandler.get_alarms_controller(controller)\n\n\ndef set_status_for_tags(tag_list):\n d2 = datetime.now()\n d2_ts = time.mktime(d2.timetuple())\n for tag in tag_list:\n set_status_for_entity(tag, d2_ts)\n return tag_list\n\n\n\ndef set_status_for_entity(entity, d2_ts=None):\n if not d2_ts:\n d2 = datetime.now()\n d2_ts = time.mktime(d2.timetuple())\n # assume false\n entity['connected'] = False\n d1 = entity.get('time')\n if d1:\n\n # convert to unix timestamp\n d1_ts = time.mktime(d1.timetuple())\n\n # they are now in seconds\n diff = int(d2_ts-d1_ts)\n\n # Two times the actual synch intervall\n if diff < config.RUN_SYNC_FREQUENCY * 3:\n entity['connected'] = True\n","sub_path":"portal/backend/handlers/statusHandler.py","file_name":"statusHandler.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"598483232","text":"from django.shortcuts import render, redirect, get_object_or_404, HttpResponse\nfrom django.contrib.auth.views import login\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.hashers import check_password\nfrom django.utils.text import slugify\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.views.decorators.cache import cache_control\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.utils import timezone\n\nfrom .models import Kategori, Kegiatan, Kasus, Anggota\nfrom perek.models import PersonilPerek\nfrom perek_tugas.models import BuktiLaporan\nfrom .forms import KategoriForm, KegiatanForm, KasusForm, SandiForm, ProfilForm, AnggotaForm, PersonilForm\n\n# import datetime\nimport json\n\n# data_waktu = datetime.datetime.now()\ndata_waktu = timezone.now()\n\n\ndef bantu_kegiatan(request):\n data_raw = [{'title': o.kegiatan_nama, 'description': o.kegiatan_deskripsi[:20]} for o in Kegiatan.objects.all()]\n data_json = json.dumps(data_raw)\n\n return HttpResponse(data_json, content_type='application/json')\n\n\ndef bantu_username(request):\n data_raw = [{'title': o.username} for o in User.objects.all()]\n data_json = json.dumps(data_raw)\n\n return HttpResponse(data_json, content_type='application/json')\n\n\ndef cari_kegiatan(request, slug):\n unslug = slug.replace('-', ' ')\n\n cari = Kegiatan.objects.select_related().filter(kegiatan_nama__icontains=unslug)\n data_kategori = Kategori.objects.all()\n return render(request, 'logapp/kegiatan_cari.html', {'waktu': data_waktu, 'kegiatan': cari,\n 'kategori': data_kategori})\n\n\ndef custom_login(request):\n if request.user.is_authenticated():\n return redirect('halaman_utama')\n else:\n return login(request)\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef kegiatan(request, slug=None):\n data_kategori = Kategori.objects.all()\n\n if slug is None:\n data_kegiatan = Kegiatan.objects.select_related().order_by('-kegiatan_waktu_aktif')\n else:\n data_kegiatan = Kegiatan.objects.select_related().filter(kegiatan_kategori__kategori_slug__contains=slug) \\\n .order_by('-kegiatan_waktu_aktif')\n\n paginator = Paginator(data_kegiatan, 14, 1)\n page = request.GET.get('halaman')\n\n try:\n keg = paginator.page(page)\n\n except PageNotAnInteger:\n keg = paginator.page(1)\n\n except EmptyPage:\n keg = paginator.page(paginator.num_pages)\n\n maks = len(paginator.page_range)\n\n start_number = keg.number - 3 if keg.number >= 4 else 0\n end_number = keg.number + 2 if keg.number <= maks else maks\n page_range = paginator.page_range[start_number:end_number]\n\n return render(request, 'logapp/kegiatan.html', {'kategori': data_kategori, 'kegiatan': keg,\n 'waktu': data_waktu, 'page_range': page_range})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kegiatan_tambah(request):\n if request.method == 'POST':\n # Slug operation\n max_length = Kegiatan._meta.get_field('kegiatan_slug').max_length\n slug_ori = slugify(request.POST['kegiatan_nama'])[:max_length]\n\n kgtn = Kegiatan(kegiatan_slug=slug_ori, kegiatan_waktu=data_waktu)\n\n form_kegiatan = KegiatanForm(request.POST, instance=kgtn)\n if form_kegiatan.is_valid():\n form_kegiatan.save()\n return redirect('halaman_kegiatan')\n\n else:\n form_kegiatan = KegiatanForm()\n form_kegiatan.fields['kegiatan_owner'].queryset = User.objects.filter(username=request.user.username)\n\n return render(request, 'logapp/kegiatan_tambah.html', {'FormKegiatan': form_kegiatan, 'waktu': data_waktu})\n\n\n@login_required\ndef kegiatan_edit(request, slug):\n kgtn_edit = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n\n if request.method == 'POST':\n # Slug operation\n max_length = Kegiatan._meta.get_field('kegiatan_slug').max_length\n slug_ori = slugify(request.POST['kegiatan_nama'])[:max_length]\n\n kgtn_edit.kegiatan_slug = slug_ori\n kgtn_edit.kegiatan_waktu_aktif = data_waktu\n\n form_kegiatan = KegiatanForm(request.POST, instance=kgtn_edit)\n if form_kegiatan.is_valid():\n if request.POST.get('kegiatan_ada_stkk') != 'on':\n kgtn_edit.kegiatan_kode_stkk = ''\n form_kegiatan.save()\n return redirect('halaman_kegiatan')\n\n else:\n form_kegiatan = KegiatanForm(instance=kgtn_edit)\n\n return render(request, 'logapp/kegiatan_tambah.html', {'FormKegiatan': form_kegiatan, 'waktu': data_waktu})\n\n\n@login_required\ndef kegiatan_hapus(request, slug):\n hapus_kegiatan = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n kegiatan_punya = hapus_kegiatan.kegiatan_owner\n kategori_bersangkutan = hapus_kegiatan.kegiatan_kategori.kategori_slug\n\n if request.user == kegiatan_punya or request.user.is_superuser is True:\n hapus_kegiatan.delete()\n\n return redirect('saring_kegiatan', slug=kategori_bersangkutan)\n\n else:\n return redirect('halaman_logout')\n\n\n@login_required\ndef kategori_pindah(request, nama_kegiatan, slug):\n kegiatan_pindah = get_object_or_404(Kegiatan, kegiatan_slug=nama_kegiatan)\n kegiatan_punya = kegiatan_pindah.kegiatan_owner\n kategori_tujuan = get_object_or_404(Kategori, kategori_slug=slug)\n\n if request.user == kegiatan_punya or request.user.is_superuser is True:\n kegiatan_pindah.kegiatan_kategori = kategori_tujuan\n kegiatan_pindah.save()\n return redirect('saring_kegiatan', slug=kategori_tujuan.kategori_slug)\n\n else:\n return redirect('halaman_logout')\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef kategori_lihat(request):\n pilih_kategori = Kategori.objects.all().order_by('kategori_nama')\n pilih_kategori_kegiatan = Kegiatan.objects.select_related()\n\n return render(request, 'logapp/kategori.html', {'kegiatan': pilih_kategori_kegiatan, 'kategori': pilih_kategori,\n 'waktu': data_waktu})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kategori_tambah(request):\n if request.method == 'POST':\n # Slug operation\n max_length = Kategori._meta.get_field('kategori_slug').max_length\n slug_ori = slugify(request.POST['kategori_nama'])[:max_length]\n\n slug = Kategori(kategori_slug=slug_ori)\n form_kategori = KategoriForm(request.POST, instance=slug)\n\n if form_kategori.is_valid():\n form_kategori.save()\n return redirect('lihat_kategori')\n else:\n form_kategori = KategoriForm()\n\n return render(request, 'logapp/kategori_tambah.html', {'FormKategori': form_kategori, 'waktu': data_waktu})\n\n\n@login_required\ndef kategori_edit(request, slug):\n kategori = get_object_or_404(Kategori, kategori_slug=slug)\n\n if request.method == 'POST':\n # Slug operation\n max_length = Kategori._meta.get_field('kategori_slug').max_length\n slug_ori = slugify(request.POST['kategori_nama'])[:max_length]\n\n kategori.kategori_slug = slug_ori\n form_kategori = KategoriForm(request.POST, instance=kategori)\n\n if form_kategori.is_valid():\n form_kategori.save()\n return redirect('lihat_kategori')\n\n else:\n form_kategori = KategoriForm(instance=kategori)\n\n return render(request, 'logapp/kategori_tambah.html', {'FormKategori': form_kategori, 'waktu': data_waktu})\n\n\n@login_required\ndef kategori_hapus(request, slug):\n kategori = get_object_or_404(Kategori, kategori_slug=slug)\n kategori.delete()\n\n return redirect('lihat_kategori')\n\n\ndef kasus_lihat(request, slug):\n data_kasus = Kasus.objects.select_related().filter(kasus_kegiatan__kegiatan_slug__contains=slug) \\\n .order_by('-kasus_dibuat')\n\n data_kegiatan = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n member = User.objects.filter(anggota__nama_kegiatan__id=data_kegiatan.pk)\n\n kasus_total = data_kasus.count()\n if kasus_total == 0:\n kasus_total = 1\n kasus_selesai = data_kasus.filter(kasus_status=1).count()\n persentase = (kasus_selesai / kasus_total) * 100\n\n paginator = Paginator(data_kasus, 15, 1)\n page = request.GET.get('halaman')\n\n try:\n kas = paginator.page(page)\n\n except PageNotAnInteger:\n kas = paginator.page(1)\n\n except EmptyPage:\n kas = paginator.page(paginator.num_pages)\n\n maks = len(paginator.page_range)\n\n start_number = kas.number - 3 if kas.number >= 4 else 0\n end_number = kas.number + 2 if kas.number <= maks else maks\n page_range = paginator.page_range[start_number:end_number]\n\n return render(request, 'logapp/kasus.html', {'waktu': data_waktu, 'kasus': kas, 'kegiatan': data_kegiatan,\n 'page_range': page_range, 'persentase': persentase, 'member': member})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kasus_tambah(request, slug):\n keg = get_object_or_404(Kegiatan, kegiatan_slug=slug)\n member = User.objects.filter(anggota__nama_kegiatan__id=keg.pk)\n\n if request.method == 'POST':\n form_kasus = KasusForm(request.POST)\n\n if form_kasus.is_valid():\n tambahan = form_kasus.save(commit=False)\n tambahan.kasus_kegiatan = keg\n tambahan.kasus_anggota = request.user\n keg.kegiatan_waktu_aktif = timezone.now()\n\n if keg.kegiatan_owner == request.user or request.user in member:\n keg.save()\n tambahan.save()\n else:\n return redirect('halaman_logout')\n\n return redirect('halaman_kasus', slug=slug)\n else:\n form_kasus = KasusForm(instance=keg)\n\n if request.user == keg.kegiatan_owner or request.user in member:\n return render(request, 'logapp/kasus_tambah.html', {'waktu': data_waktu, 'FormKasus': form_kasus, 'keg': keg})\n else:\n return render(request, 'logapp/halaman_error.html', {'waktu': data_waktu})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef kasus_ubah(request, pk):\n kas = get_object_or_404(Kasus, pk=pk)\n keg = kas.kasus_kegiatan\n slug = kas.kasus_kegiatan.kegiatan_slug\n member = User.objects.filter(anggota__nama_kegiatan__id=keg.pk)\n\n if request.method == 'POST':\n form_kasus = KasusForm(request.POST, instance=kas)\n\n if form_kasus.is_valid():\n tambahan = form_kasus.save(commit=False)\n tambahan.kasus_kegiatan = keg\n keg.kegiatan_waktu_aktif = timezone.now()\n\n if keg.kegiatan_owner == request.user or request.user in member:\n keg.save()\n tambahan.save()\n\n return redirect('halaman_kasus', slug=slug)\n else:\n form_kasus = KasusForm(instance=kas)\n\n if request.user == keg.kegiatan_owner or request.user in member:\n return render(request, 'logapp/kasus_tambah.html', {'waktu': data_waktu, 'FormKasus': form_kasus, 'keg': keg})\n else:\n return render(request, 'logapp/halaman_error.html', {'waktu': data_waktu})\n\n\n@login_required\ndef kasus_hapus(request, pk):\n kas = get_object_or_404(Kasus, pk=pk)\n slug = kas.kasus_kegiatan.kegiatan_slug\n keg = kas.kasus_kegiatan\n member = User.objects.filter(anggota__nama_kegiatan__id=keg.pk)\n\n if request.user == keg.kegiatan_owner or request.user == kas.kasus_anggota and request.user in member:\n kas.delete()\n else:\n return redirect('halaman_logout')\n\n return redirect('halaman_kasus', slug=slug)\n\n\n@login_required\ndef lihat_setel(request):\n kegiatan_owner = Kegiatan.objects.filter(kegiatan_owner=request.user)\n data_bukti_laporan = BuktiLaporan.objects.select_related().order_by('-laporan_tanggal').filter(\n laporan_jenis__in=['IJZ', 'SRF', 'MP', 'BUK', 'MKL', 'DKM', 'STG', 'PAK', 'TJ', 'PGN'],\n laporan_kepemilikan=request.user)\n\n paginator_bukti = Paginator(data_bukti_laporan, 5, 2)\n\n halaman_bukti = request.GET.get('halaman_bukti')\n\n paginator = Paginator(kegiatan_owner, 15, 1)\n page = request.GET.get('halaman')\n\n try:\n keg = paginator.page(page)\n\n except PageNotAnInteger:\n keg = paginator.page(1)\n\n except EmptyPage:\n keg = paginator.page(paginator.num_pages)\n\n try:\n bukti_laporan = paginator_bukti.page(halaman_bukti)\n\n except PageNotAnInteger:\n bukti_laporan = paginator_bukti.page(1)\n\n except EmptyPage:\n bukti_laporan = paginator_bukti.page(paginator_bukti.num_pages)\n\n maks = len(paginator.page_range)\n\n start_number = keg.number - 3 if keg.number >= 4 else 0\n end_number = keg.number + 2 if keg.number <= maks else maks\n page_range = paginator.page_range[start_number:end_number]\n\n data = {\n 'waktu': data_waktu,\n 'kegiatan': keg,\n 'page_range': page_range,\n 'bukti_laporan': bukti_laporan\n }\n\n return render(request, 'logapp/setel.html', data)\n\n\n@login_required\ndef ganti_sandi(request):\n if request.method == 'POST':\n form = SandiForm(request.POST)\n\n if form.is_valid():\n u = User.objects.get(username=request.user.username)\n p = u.password\n\n if check_password(request.POST['SandiLama'], p):\n sandi_baru = request.POST['SandiBaru']\n sandi_cek = request.POST['CekSandiBaru']\n\n if sandi_baru == sandi_cek:\n u.set_password(sandi_baru)\n u.save()\n\n messages.success(request, 'Selamat, sandi yang baru berhasil disimpan. Silahkan login kembali '\n 'dengan sandi baru')\n else:\n messages.info(request, 'Konfirmasi sandi baru gagal!')\n else:\n messages.info(request, 'Sandi lama salah!')\n\n return redirect('ganti_sandi')\n\n else:\n form = SandiForm()\n\n return render(request, 'registration/ganti_sandi.html', {'waktu': data_waktu, 'form': form})\n\n\n@login_required\ndef ganti_profil(request):\n data_profil = User.objects.get(username=request.user.username)\n cek = PersonilPerek.objects.filter(nama=request.user).count()\n\n if cek != 0:\n data_personil_perek = PersonilPerek.objects.get(nama=request.user)\n else:\n data_personil_perek = PersonilPerek(nama=request.user)\n\n if request.method == 'POST':\n form = ProfilForm(request.POST)\n\n # data_personil_perek.nama = request.user\n form_personil = PersonilForm(request.POST, instance=data_personil_perek)\n\n if form.is_valid() and form_personil.is_valid():\n data_profil.first_name = request.POST['NamaDepan']\n data_profil.last_name = request.POST['NamaBelakang']\n data_profil.email = request.POST['Email']\n data_profil.save()\n form_personil.save()\n\n '''if cek == 0:\n if form_personil.is_valid():\n form_personil.save()'''\n\n messages.success(request, 'Data profil berhasil diubah.')\n\n return redirect('halaman_setel')\n else:\n data = {\n 'NamaDepan': data_profil.first_name,\n 'NamaBelakang': data_profil.last_name,\n 'Email': data_profil.email,\n }\n form = ProfilForm(initial=data)\n\n if cek != 0:\n form_personil = PersonilForm(instance=data_personil_perek)\n else:\n form_personil = PersonilForm()\n\n return render(request, 'registration/ganti_profil.html', {'waktu': data_waktu, 'form': form,\n 'formPersonil': form_personil})\n\n\n@login_required\ndef anggota_tambah(request, slug):\n data_kegiatan = Kegiatan.objects.get(kegiatan_slug=slug)\n data_kegiatan_anggota = data_kegiatan.anggota_set.all().count()\n member = User.objects.filter(anggota__nama_kegiatan__id=data_kegiatan.pk)\n\n if data_kegiatan_anggota > 0:\n data_anggota = Anggota.objects.get(nama_kegiatan=data_kegiatan)\n else:\n data_anggota = Anggota(nama_kegiatan=data_kegiatan)\n\n if request.method == 'POST':\n form = AnggotaForm(request.POST, instance=data_anggota)\n if request.POST.get('anggota_kegiatan') is None:\n data_mau_hapus = Anggota.objects.filter(nama_kegiatan__pk=data_kegiatan.pk)\n\n if data_mau_hapus.count() != 0:\n data_mau_hapus.delete()\n return redirect('halaman_setel')\n else:\n messages.info(request, 'Isikan nama anggota terlebih dahulu.')\n else:\n if form.is_valid():\n form.save()\n\n return redirect('halaman_setel')\n\n else:\n form = AnggotaForm(instance=data_anggota)\n form.fields['anggota_kegiatan'].queryset = User.objects.exclude(username=request.user.username)\n\n if request.user == data_kegiatan.kegiatan_owner or request.user in member:\n return render(request, 'logapp/anggota_tambah.html', {'waktu': data_waktu, 'form': form})\n else:\n return render(request, 'logapp/halaman_error.html', {'waktu': data_waktu})\n","sub_path":"logapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"538162879","text":"# -*- coding: utf-8 -*-\nfrom alipay import AliPay\n\n\nclass MyAlipay(AliPay):\n def api_alipay_trade_wap_pay(\n self, subject, out_trade_no, total_amount,\n return_url=None, notify_url=None, **kwargs\n ):\n biz_content = {\n \"subject\": subject,\n \"out_trade_no\": out_trade_no,\n \"total_amount\": total_amount,\n \"product_code\": \"QUICK_WAP_PAY\"\n }\n biz_content.update(kwargs)\n data = self.build_body(\n \"alipay.trade.wap.pay\",\n biz_content,\n return_url=return_url,\n notify_url=notify_url\n )\n\n return self.sign_data(data)\n\n def api_alipay_trade_page_pay(self, subject, out_trade_no, total_amount,\n return_url=None, notify_url=None, **kwargs):\n biz_content = {\n \"subject\": subject,\n \"out_trade_no\": out_trade_no,\n \"total_amount\": total_amount,\n \"product_code\": \"FAST_INSTANT_TRADE_PAY\"\n }\n\n biz_content.update(kwargs)\n data = self.build_body(\n \"alipay.trade.page.pay\",\n biz_content,\n return_url=return_url,\n notify_url=notify_url\n )\n return self.sign_data(data)\n\n def sign_data(self, data):\n data.pop(\"sign\", None)\n # 排序后的字符串\n unsigned_items = self._ordered_data(data)\n unsigned_string = \"&\".join(\"{}={}\".format(k, v) for k, v in unsigned_items if v)\n # unsigned_string = \"&\".join(\"{}={}\".format(k, v) for k, v in unsigned_items)\n sign = self._sign(unsigned_string)\n data['sign'] = sign\n\n return data\n # ordered_items = self._ordered_data(data)\n # quoted_string = \"&\".join(\"{}={}\".format(k, quote_plus(v)) for k, v in ordered_items)\n #\n # # 获得最终的订单信息字符串\n # signed_string = quoted_string + \"&sign=\" + quote_plus(sign)\n # return signed_string\n\n def build_body(\n self, method, biz_content, return_url=None, notify_url=None, append_auth_token=False\n ):\n data = super(MyAlipay, self).build_body(method, biz_content, return_url, notify_url, append_auth_token)\n if not return_url:\n data.pop('return_url', None)\n\n return data\n\n\n # def _my_verify(self, raw_content, signature, charset):\n # # 开始计算签名\n # key = self.alipay_public_key\n # signer = PKCS1_v1_5.new(key)\n # if self._sign_type == \"RSA\":\n # digest = SHA.new()\n # else:\n # digest = SHA256.new()\n # digest.update(raw_content.encode(charset))\n # if signer.verify(digest, decodebytes(signature.encode(charset))):\n # return True\n # return False\n #\n # def my_verify(self, data, signature, charset):\n # if \"sign_type\" in data:\n # sign_type = data.pop(\"sign_type\")\n # if sign_type != self._sign_type:\n # raise AliPayException(None, \"Unknown sign type: {}\".format(sign_type))\n # # 排序后的字符串\n # unsigned_items = self._ordered_data(data)\n # message = u\"&\".join(u\"{}={}\".format(k, v) for k, v in unsigned_items)\n # return self._my_verify(message, signature, charset)\n\n\n","sub_path":"my_addons/anxe_customer/controllers/my_alipay.py","file_name":"my_alipay.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"303486253","text":"\"\"\" Read and preprocess video data.\n Video processing occurs on a single video at a time. Video are read and\n preprocessed in parallel across multiple threads. The resulting videos\n are concatenated together to form a single batch for training or \n evaluation.\n -- Provide processed video data for a network:\n inputs: Construct batches of evaluation examples of videos.\n distorted_inputs: Construct batches of training examples of videos.\n batch_inputs: Construct batches of training or evaluation examples of \n videos.\n -- Data processing:\n parse_example_proto: Parses an Example proto containing a training example\n of a video.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef decode_jpeg(image_buffer, scope=None):\n \"\"\"Decode a JPEG string into one 3-D float image Tensor.\n Args:\n image_buffer: scalar string Tensor.\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n\n # After this point, all image pixels reside in [0,1)\n # until the very end, when they're rescaled to (-1, 1). The various\n # adjust_* ops all require this range for dtype float.\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n # Crop the central region of the image with an area containing 87.5% of\n # the original image.\n image = tf.image.central_crop(image, central_fraction=0.875)\n\n # Resize the image to the original height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [FLAGS.image_height, FLAGS.image_width],\n align_corners=False)\n image = tf.squeeze(image, [0])\n return image\n\n\ndef decode_video(video_buffer):\n \"\"\"Decode list of string Tensor into list of 3-D float image Tensor.\n Args:\n video_buffer: tensor, shape [num_steps].\n Returns:\n list of 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n # Decode the images of one video\n return tf.map_fn(decode_jpeg, video_buffer, dtype=tf.float32)\n\n\ndef inputs(dataset, config, num_preprocess_threads=4):\n \"\"\" Generate batches of videos for evaluation.\n Use this function as the inputs for evaluating a network.\n Note that some (minimal) video preprocessing occurs during evaluation\n including central cropping and resizing of the video to fit the network.\n Args:\n dataset: instance of Dataset class specifying the dataset.\n config: class, the configuration setting\n num_preprocess_threads: integer, total number of preprocessing threads\n defaults to 4.\n Returns:\n videos: 2-D string Tensor of [batch_size, num_steps] a batch of \n video, each video is a dictionary containing strings providing \n JPEG encoding of all the images of a video clip\n labels: 1-D integer Tensor of [batch_size].\n filenames: 1-D integer Tensor of [batch_size].\n \"\"\"\n # Force all input processing onto CPU in order to reserve the GPU for\n # the forward inference and back-propagation.\n with tf.device('/cpu:0'):\n videos, labels, filenames = batch_inputs(\n dataset, config, train=False,\n num_preprocess_threads=num_preprocess_threads)\n return videos, labels, filenames\n\n\ndef distorted_inputs(dataset, config, num_preprocess_threads=4):\n \"\"\" Generate batches of distorted versions of videos.\n Use this function as the inputs for training a network.\n Distorting videos provides a useful technique for augmenting the data\n set during training in order to make the network invariant to aspects\n of the video that do not effect the label.\n Args:\n dataset: instance of Dataset class specifying the dataset.\n batch_size: integer, number of examples in batch\n num_preprocess_threads: integer, total number of preprocessing threads\n defaults to 4.\n Returns:\n videos: 2-D string Tensor of [batch_size, num_steps] a batch of \n video, each video is a dictionary containing strings providing \n JPEG encoding of all the images of a video clip\n labels: 1-D integer one host Tensor of [batch_size].\n filenames: 1-D integer Tensor of [batch_size].\n \"\"\"\n # Force all input processing onto CPU in order to reserve the GPU for\n # the forward inference and back-propagation.\n videos, labels_one_hot, filenames = batch_inputs(\n dataset, config, train=True,\n num_preprocess_threads=num_preprocess_threads)\n return videos, labels_one_hot, filenames\n\n\ndef video_preprocessing(image_features):\n \"\"\" Transfer dictionary to tensor type\n Args:\n image_features: dictionary contains, Tensor tf.string containing the \n contents of all the JPEG file of a video.\n Returns:\n resutl: 4-D float Tensor containing an appropriately list of scaled image\n [num_steps, encoded JPEG string]\n Raises:\n ValueError: if user does not provide bounding box\n \"\"\"\n # convert the image_features dictionary to array\n images = []\n tmp_dict = {}\n for key, value in image_features.items():\n tmp_dict[int(key[-3:])] = image_features[key]\n image_features.clear()\n for index in range(len(tmp_dict)):\n images.append(tmp_dict[index])\n\n # transfer the images list into a tensor\n for idx, image in enumerate(images):\n images[idx] = tf.expand_dims(image, 0)\n result = tf.concat(images, 0)\n return result\n\n\ndef parse_example_proto(example_serialized, num_steps):\n \"\"\" Parses an Example proto containing a training example of a video clip.\n The output of the convert_to_records.py video preprocessing script is a \n dataset containing serialized Example protocol buffers. Each Example proto \n contains the following fields:\n image/height: 200\n image/width: 100\n image/colorspace: 'RGB'\n image/channels: 3\n image/class/label: 2\n image/class/text: 'walking'\n image/format: 'JPEG'\n image/filename: '00001.JPEG'\n raw/image/001: \n ...\n raw/image/n: \n Args:\n example_serialized: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n Returns:\n image_features: A dictionary containing strings providing JPEG\n encoding of all the images of a video clip.\n label: Tensor tf.int32 containing the label.\n text: Tensor tf.string containing the human-readable label.\n filename: the filename of the image\n \"\"\"\n # Dense features in Example proto.\n feature_map = {\n 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,\n default_value=-1),\n 'image/class/text': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/filename': tf.FixedLenFeature([], dtype=tf.string,\n default_value='')\n }\n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n # subtract the label value by 1, becuae the previous label value range\n # from(1..n)\n label = tf.subtract(label, tf.constant(1))\n\n # images data in the Example proto\n image_map = {}\n for index in range(num_steps):\n image_map['raw/image/%03d' % index] = tf.FixedLenFeature(\n [],\n dtype=tf.string,\n default_value='')\n image_features = tf.parse_single_example(example_serialized, image_map)\n\n return (image_features,\n label,\n features['image/class/text'],\n features['image/filename'])\n\n\ndef batch_inputs(dataset, config, train, num_preprocess_threads=4):\n \"\"\"Contruct batches of training or evaluation examples from the video\n dataset.\n Args:\n dataset: instance of Dataset class specifying the dataset. See\n dataset.py for details.\n config: class, configuration\n train: boolean, shuffle the dataset or not\n num_preprocess_threads: integer, total number of preprocessing threads\n Returns:\n videos: 2-D string Tensor of [batch_size, num_steps] a batch of\n video, each video is a dictionary containing strings providing\n JPEG encoding of all the images of a video clip\n labels: 1-D integer Tensor of [batch_size].\n filenames: an array contains all the filenames\n Raises:\n ValueError: if data is not found\n \"\"\"\n with tf.name_scope('batch_processing'):\n batch_size = config['batch_size']\n data_files = dataset.data_files()\n if data_files is None:\n raise ValueError('No data files found for this dataset')\n\n # Create filename_queue\n if train:\n filename_queue = tf.train.string_input_producer(data_files,\n shuffle=True,\n capacity=16)\n else:\n filename_queue = tf.train.string_input_producer(data_files,\n shuffle=False,\n capacity=1)\n\n if num_preprocess_threads % 4:\n raise ValueError('Please make num_preprocess_threads a multiple '\n 'of 4 (%d % 4 != 0).', num_preprocess_threads)\n\n reader = dataset.reader()\n _, example_serialized = reader.read(filename_queue)\n\n videos_and_labels_and_filenames = []\n\n # Parse a serialized Example proto to extract the video and metadata.\n image_features, label_index, text, filename = parse_example_proto(\n example_serialized, config['num_steps'])\n video = video_preprocessing(image_features)\n videos_and_labels_and_filenames.append([video,\n label_index,\n filename])\n\n videos, label_index_batch, filename_batch = tf.train.batch_join(\n videos_and_labels_and_filenames,\n batch_size=batch_size,\n capacity=2 * num_preprocess_threads * batch_size)\n\n # Convert the label to one hot vector\n labels = tf.reshape(label_index_batch, [batch_size])\n labels_one_hot = tf.one_hot(labels, dataset.num_classes(), 1, 0)\n\n return (videos,\n labels_one_hot,\n tf.reshape(filename_batch, [batch_size]))","sub_path":"data/video_processing.py","file_name":"video_processing.py","file_ext":"py","file_size_in_byte":11007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"346058118","text":"import torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom trainers import ImageClassifierTrainer\nfrom utils import join_path\nfrom models import SmallClassifier\nfrom datasets import CIFAR10\n\n\ndef main():\n #########################\n # (0) hard code configs #\n #########################\n DATA_BASE_DIR = join_path('datasets', 'cifar10')\n DATA_SET_NAME = 'CIFAR10'\n BATCH_SIZE = 64\n NUM_WORKERS = 8\n TRAIN_STEPS = 1000\n VAL_EVERY = 100\n LOG_EVERY = 50\n NAME = 'SmallCNN'\n CUDA = 3\n RUN_ID = 'example'\n PLOT_EVERY = 500\n NUM_CLASSES = 10\n DEBUG = False\n\n #######################\n # (1) Define datasets #\n #######################\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize(\n mean=(0.4914, 0.4822, 0.4465),\n std=(0.2470, 0.2435, 0.2616)\n )])\n\n train_set = CIFAR10(root=DATA_BASE_DIR, train=True,\n download=True, transform=transform)\n train_loader = DataLoader(train_set, batch_size=BATCH_SIZE,\n shuffle=True, num_workers=NUM_WORKERS)\n\n test_set = CIFAR10(root=DATA_BASE_DIR, train=False,\n download=True, transform=transform)\n test_loader = DataLoader(test_set, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=NUM_WORKERS)\n\n ####################\n # (2) Define model #\n ####################\n model = SmallClassifier(num_classes=NUM_CLASSES)\n\n ###############################################################\n # (3) Define loss function inside the Trainer's step function #\n ###############################################################\n\n ########################\n # (4) Define optimizer #\n ########################\n optimizer = torch.optim.SGD(params=model.parameters(), lr=0.001)\n\n ####################\n # (5) Init trainer #\n ####################\n trainer = ImageClassifierTrainer(model=model,\n name=NAME,\n optimizer=optimizer,\n dataset_name=DATA_SET_NAME,\n train_loader=train_loader,\n train_steps=TRAIN_STEPS,\n val_every=VAL_EVERY,\n val_loader=test_loader, # use test set for validation\n log_every=LOG_EVERY,\n run_id=RUN_ID,\n cuda=CUDA,\n plot_every=PLOT_EVERY,\n debug=DEBUG)\n\n ######################\n # (6) Begin Training #\n ######################\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exp1_CNNImageClassifier.py","file_name":"exp1_CNNImageClassifier.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"205854565","text":"# 정은\n\nimport sys\nsys.stdin = open('input.txt','r')\n\nkeys = ['0001101', '0011001', '0010011', '0111101', '0100011',\n '0110001', '0101111', '0111011', '0110111', '0001011']\n\nT = int(input())\nfor t in range(1, T + 1):\n N, M = map(int, input().split())\n G = []\n for i in range(N):\n G.append(input())\n\n r = []\n for i in G:\n j = len(i)\n while j > 0:\n if int(i) == 0: break\n if i[j - 7:j] in keys:\n r.append(keys.index(i[j - 7:j]))\n j -= 7\n continue\n j -= 1\n else:\n break\n\n sum1 = result = ans = 0\n\n for i in range(len(r)):\n if i % 2 == 1:\n sum1 += r[i]\n else:\n result += r[i]\n result += sum1 * 3\n\n if result % 10 == 0:\n for i in r:\n ans += i\n else:\n ans = 0\n print(f\"#{t} {ans}\")","sub_path":"Algorithm/19.03/20/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"244576197","text":"import random\nimport string\nfrom random import randbytes\n\nimport base91\n\nTEXT = \"The quick brown\\r\\nfox\\tjumps\\nover\\rthe lazy\\n\\rdog!\"\nPANGRAM = \"Thequickbrownfoxjumpsoverthelazydog!\"\nDATA = [88, 136, 162, 112, 31, 156, 195, 75, 208, 5, 61, 106, 20, 163, 227, 172, 240, 150, 163, 100, 63, 170, 82,\n 175, 58, 17, 203, 5, 3]\n\n\ndef test_static_alphabet():\n assert len(base91.BASE91_ALPHABET) == base91.BASE91_LEN\n\n\ndef test_static_decode():\n data = base91.decode(TEXT)\n assert list(data) == DATA\n\n\ndef test_static_encode():\n text = base91.encode(bytes(DATA))\n assert text == PANGRAM\n\n\ndef test_refurbish_small():\n for n in range(33):\n original_data = randbytes(n)\n text = base91.encode(original_data)\n refurbish_data = base91.decode(text)\n assert len(original_data) == len(refurbish_data)\n assert original_data == refurbish_data, text\n\n\ndef test_refurbish_large():\n original_data = randbytes(65536)\n text = base91.encode(original_data)\n refurbish_data = base91.decode(text)\n assert len(original_data) == len(refurbish_data)\n assert original_data == refurbish_data, text\n\n\ndef test_stress_full_decode():\n text = \"\"\n text_size = random.randint(0, 65536)\n while text_size > len(text):\n text += chr(random.randint(0, 0x10FFFF))\n assert len(text) == text_size\n data = base91.decode(text)\n assert len(data) <= text_size\n\n\ndef test_stress_ascii_decode():\n text = \"\"\n text_size = random.randint(0, 65536)\n while text_size > len(text):\n text += random.choice(string.printable)\n assert len(text) == text_size\n data = base91.decode(text)\n assert len(data) <= text_size\n","sub_path":"test_base91.py","file_name":"test_base91.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"223423827","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api\n\n\nclass EvaluationLineBtp(models.Model):\n _name = 'btp_price_survey.abstract_evaluation.line'\n _inherit = 'btp_price_survey.weekly_planning'\n\n stage_id = fields.Many2one('btp_price_survey.stage', string='Étape')\n execution_id = fields.Many2one('btp_price_survey.execution', string='Suivi de chantier', ondelete=\"cascade\")\n forecast_id = fields.Many2one('btp_price_survey.forecast', string='Prévision', ondelete=\"cascade\")\n analytical_account = fields.Many2one(related='stage_id.analytical_account', string='N° compte analytique')\n unit = fields.Many2one(related='stage_id.unit', string='Unité')\n unit_price = fields.Float('Prix unitaire')\n weekly_quantity = fields.Float('Qté hebdo', compute='_compute_weekly_quantity', store=True)\n weekly_amount = fields.Float('Mnt hebdo', compute='_compute_weekly_amount', store=True)\n global_quantity = fields.Float('Qté globale', compute='_compute_global_quantity', store=True)\n global_amount = fields.Float('Mnt global', compute='_compute_global_amount', store=True)\n\n @api.one\n @api.depends('monday_quantity', 'tuesday_quantity', 'wednesday_quantity', 'thursday_quantity', 'friday_quantity',\n 'saturday_quantity', 'sunday_quantity')\n def _compute_weekly_quantity(self):\n self.weekly_quantity = self.monday_quantity + self.tuesday_quantity + self.wednesday_quantity + self.thursday_quantity + self.friday_quantity + self.saturday_quantity + self.sunday_quantity\n\n @api.one\n @api.depends('weekly_quantity', 'unit_price')\n def _compute_weekly_amount(self):\n self.weekly_amount = self.unit_price * self.weekly_quantity\n\n @api.one\n @api.depends('stage_id', 'weekly_quantity')\n def _compute_global_quantity(self):\n evaluation_obj = None\n if self.forecast_id:\n evaluation_obj = self.env['btp_price_survey.forecast'].search(\n [('construction_site_id', '=', self.forecast_id.construction_site_id.id), ('state', '=', 'close')],\n limit=1,\n order='id desc')\n if self.execution_id:\n evaluation_obj = self.env['btp_price_survey.execution'].search(\n [('construction_site_id', '=', self.execution_id.construction_site_id.id), ('state', '=', 'close')],\n limit=1,\n order='id desc')\n if not evaluation_obj:\n self.global_quantity = self.weekly_quantity\n else:\n for line in evaluation_obj.line_ids:\n if line.stage_id == self.stage_id:\n self.global_quantity = line.global_quantity + self.weekly_quantity\n\n @api.onchange('stage_id')\n def _onchange_unit_price(self):\n if self.stage_id:\n self.unit_price = self.stage_id.price_total\n\n @api.one\n @api.depends('global_quantity', 'unit_price')\n def _compute_global_amount(self):\n self.global_amount = self.unit_price * self.global_quantity\n","sub_path":"Btp/addons/btp_price_survey/models/evaluation_line.py","file_name":"evaluation_line.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"460125137","text":"import pandas as pd\n# import geopandas as gpd\n\n\nclass TrackConverter():\n\n \"\"\"Handles the envirocar Tracks\"\"\"\n\n def __init__(self):\n print(\"Initializing TrackConverter class\")\n # self.track = track\n # self.crs = track.crs\n\n \"\"\" Returns a geoDataFrame object with the movingpandas plain format\"\"\"\n\n def to_movingpandas(self, track):\n\n # gdf = self.track.copy()\n gdf = track\n gdf = gdf.reindex(columns=(['geometry'] + list([a for a in sorted(\n gdf.columns) if a != 'geometry'])), copy=True)\n gdf['time'] = gdf['time'].astype('datetime64[ns]')\n gdf.set_index('time', inplace=True)\n gdf.index.rename('t', inplace=True)\n return (gdf)\n\n \"\"\" Returns a dataFrame object with the scikitmobility plain format\"\"\"\n\n def to_scikitmobility(self):\n gdf = self.track.copy()\n gdf['lat'] = gdf.geometry.x\n gdf['lng'] = gdf.geometry.y\n gdf.rename(columns=({\"time\": \"datetime\", 'sensor.id': 'uid',\n 'track.id': 'tid'}), inplace=True)\n gdf['datetime'] = gdf['datetime'].astype('datetime64[ns]')\n gdf['tid'] = gdf['tid'].astype(str)\n gdf['uid'] = gdf['uid'].astype(str)\n columns = ['uid', 'tid', 'lat', 'lng', 'datetime']\n gdf = gdf.reindex(columns=(columns + list([a for a in sorted(\n gdf.columns) if a not in columns])), copy=True)\n df = pd.DataFrame(gdf)\n return(df)\n","sub_path":"envirocar/trajectories/track_converter.py","file_name":"track_converter.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"130575822","text":"import conf\r\nimport json,time\r\nimport requests\r\nfrom boltiot import Sms,Bolt\r\nselling_price = 600000.00\r\napi_key = \"3504af71-041a-45c4-b79f-27aa81c2e572\"\r\ndevice_id = \"BOLT3851580\"\r\nsms = Sms(conf.SID, conf.AUTH_TOKEN, conf.TO_NUMBER, conf.FROM_NUMBER)\r\nmybolt = Bolt(api_key, device_id)\r\n\r\ndef price_check():\r\n url = \"https://min-api.cryptocompare.com/data/price\"\r\n querystring = {\"fsym\":\"BTC\",\"tsyms\":\"INR\"}\r\n response = requests.request(\"GET\", url, params=querystring)\r\n response = json.loads(response.text)\r\n current_price = response['INR']\r\n return current_price\r\n\r\nwhile True:\r\n market_price = price_check()\r\n print ('market price is: ',market_price)\r\n print ('Selling price is: ',selling_price)\r\n time.sleep(10)\r\n if market_price > selling_price:\r\n print(\"Making request to Twilio to send a SMS\")\r\n response = sms.send_sms(\"The Current market value of bitcoin is\"+str(market_price)+ \" it is higher than fixed Selling price so time to Sell now\")\r\n print(\"Response received from Twilio is: \" + str(response))\r\n print(\"Status of SMS at Twilio is :\" + str(response.status))\r\n mybolt.digitalWrite(\"0\",\"HIGH\")\r\n time.sleep(5)\r\n mybolt.digitalWrite(\"0\",\"LOW\")\r\n print(\"Process completed.\")\r\n ","sub_path":"Bitcoin price alert using python/cryptoAlert.py","file_name":"cryptoAlert.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"54989957","text":"from create_singly_list_node import ListNode, listNodeToString, stringToListNode\n\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"\n https://leetcode.com/problems/remove-nth-node-from-end-of-list/\n // Time Complexity : O(n)\n // Space Complexity : O(1)\n // Did this code successfully run on Leetcode : Yes\n // Three line explanation of solution in plain english :\n - Make dummy node for edge case, when we want to delete\n - Make the gap required unless count is not greater than\n - Move both slow and fast pointer together\n \"\"\"\n # edge case\n if not head:\n return head\n\n # fore edge case when n = 1\n dummy = ListNode(-1)\n dummy.next = head\n\n slow = fast = dummy\n # traverse until count (the gap)\n # is <= the required n\n count = 0\n while count <= n:\n fast = fast.next\n count += 1\n # move both slow and fast simultaneously\n while fast:\n slow = slow.next\n fast = fast.next\n slow.next = slow.next.next\n return dummy.next\n\n def removeNthFromEndBruteForce(self, head: ListNode, n: int) -> ListNode:\n # edge case\n if not head:\n return head\n\n dummy = ListNode(-1)\n dummy.next = head\n length = 1\n cur = dummy\n while cur and cur.next:\n length += 1\n cur = cur.next\n cur = dummy\n count = 1\n while count < length - n:\n cur = cur.next\n count += 1\n cur.next = cur.next.next\n return dummy.next\n\n\nif __name__ == '__main__':\n h = Solution()\n head = stringToListNode([3, 2, 0, -4])\n new_head = removeNthFromEnd(head)\n print(listNodeToString(new_head))\n","sub_path":"19_remove_nth_node_from_end_of_list.py","file_name":"19_remove_nth_node_from_end_of_list.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"76445390","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom random import sample\r\ndef split_data_set(x,y,ratio=[0.7,0.3]):\r\n data_len=len(x)\r\n lens=[int(data_len*rati) for rati in ratio]\r\n trainx,trainy=x[:lens[0]],y[:lens[0]]\r\n testx,testy=x[lens[0]:],y[lens[0]:]\r\n return (trainx,trainy), (testx,testy)\r\n\r\ndef batch_gen(x, y, batch_size):\r\n while True:\r\n for i in range(0, len(x), batch_size):\r\n if (i+1)*batch_size < len(x):\r\n yield x[i : (i+1)*batch_size ].T, y[i : (i+1)*batch_size ].T\r\n\r\ndef rand_batch_gen(x, y, batch_size):\r\n while True:\r\n sample_idx = sample(list(np.arange(len(x))), 7)\r\n yield x[sample_idx].T, y[sample_idx].T\r\n\r\ndef decode(sequence, lookup, separator=''):\r\n return separator.join([ lookup[element] for element in sequence if element ])\r\n","sub_path":"seq-research/data_utills.py","file_name":"data_utills.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"498262920","text":"from datetime import datetime\nfrom flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:build-a-blog@localhost:8889/build-a-blog'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\n\n\n# create db\nclass Blog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120), unique=True)\n body = db.Column(db.Text)\n date = db.Column(db.DateTime)\n\n def __init__(self, title):\n self.title = title\n self.date = datetime.utcnow()\n\n def __repr__(self):\n return '' % self.title\n\n\n@app.route('/')\ndef index():\n return redirect('/blog')\n\n\n@app.route('/posts', methods=['GET'])\ndef get_posts():\n return Blog.query.all()\n\n\n# order post\n@app.route('/get_ordered_posts', methods=['GET'])\ndef get_ordered_posts():\n return Blog.query.order_by(\"date desc\").all()\n\n\n# homepage\n@app.route('/blog', methods=['GET'])\ndef blog():\n id = request.args.get('id', None)\n\n if id:\n post = Blog.query.filter_by(id=id).first()\n return render_template('posts.html', post=post)\n\n posts = get_ordered_posts()\n return render_template('blog.html', posts=posts)\n\n\n# Add new post\n@app.route('/newpost', methods=['GET'])\ndef newpost():\n title = request.args.get('title', '')\n body = request.args.get('body', '')\n title_error = request.args.get('title_error', '')\n body_error = request.args.get('body_error', '')\n\n return render_template('newpost.html', title=title, body=body, title_error=title_error, body_error=body_error)\n\n\n# Route after adding new post\n@app.route('/post', methods=['POST'])\ndef post():\n title = request.form.get('title', '')\n body = request.form.get('body', '')\n title_error = request.form.get('title_error', '')\n body_error = request.form.get('body_error', '')\n\n # validation for empty values\n if not title or not body:\n if title == '':\n title_error = \"Please provide a title\"\n if body == '':\n body_error = \"Please write your blog\"\n return redirect(f'/newpost?title={title}&body={body}&title_error={title_error}&body_error={body_error}')\n\n # Add new post\n new_post = Blog(title)\n new_post.body = body\n db.session.add(new_post)\n db.session.commit()\n\n # Load the new post in an individual page\n id = new_post.id\n posts = Blog.query.filter_by(id=id).all()\n return render_template('blog.html', posts=posts)\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"17316570","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'member'\n\nurlpatterns = [\n url(regex=r'^login$', view=views.login, name='login'),\n url(regex=r'^logout$', view=views.logout, name='logout'),\n url(regex=r'^registration$', view=views.registration, name='registration'),\n url(regex=r'^model/registration$', view=views.model_registration, name='model_registration'),\n url(regex=r'^company/registration$', view=views.company_registration, name='company_registration'),\n url(regex=r'^model/mypage$', view=views.model_mypage, name='model_mypage'),\n # url(regex=r'^company/mypage$', view=views.company_mypage, name='company_mypage'),\n]\n","sub_path":"ssProject/member/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"164356278","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom tkinter.messagebox import *\r\nfrom tkinter.filedialog import (askopenfilename,\r\n askopenfilenames,\r\n askdirectory,\r\n asksaveasfilename)\r\n# from WebFlash import *\r\nfrom WebFlash.WebBase import *\r\n\r\nclass MainPage(object):\r\n def __init__(self,master,driver):\r\n self.root = master\r\n self.root.geometry('%dx%d' % (400, 180)) # 设置窗口大小\r\n self.driver = driver\r\n self.updatetype = StringVar()\r\n self.filepath = StringVar()\r\n self.createPage()\r\n self.flag = False\r\n\r\n def createPage(self):\r\n self.page = Frame(self.root)\r\n self.page.pack()\r\n Label(self.page).grid(row=0, stick=W)\r\n Label(self.page,text=\"Update Type:\").grid(row=1,stick=W,pady=10)\r\n self.selecttype = Combobox(self.page,values=[\"BIOS\",\"BMC\"])\r\n self.selecttype.grid(row=1,column=1,stick=E)\r\n Button(self.page,text=\"select image\",command=self.selectimage).grid(row=2,stick=W,pady=10)\r\n Entry(self.page,textvariable=self.filepath,width=25).grid(row=2,column=1,stick=E)\r\n Button(self.page,text=\"Start update\",command=self.startupdate).grid(row=3,stick=W,pady=10)\r\n Button(self.page,text=\"Sign out\",command=self.signout).grid(row=3,column=1,stick=E)\r\n\r\n def signout(self):\r\n self.page.quit()\r\n self.driver.close()\r\n #self.page.destroy()\r\n # LoginPage(self.root,self.driver)\r\n\r\n def selectimage(self):\r\n if self.selecttype.get() == \"BIOS\":\r\n filetype=[(\"BIOS\",\"*.BIN\"),(\"BIOS\",\"*.ROM\")]\r\n if self.selecttype.get() == \"BMC\":\r\n filetype = [(\"BMC\",\"*.ima\")]\r\n if len(self.selecttype.get()) == 0:\r\n showinfo(\"Warning\",\"Please select update type\")\r\n self.filepath.set(askopenfilename(title=\"选择文件\", filetypes=filetype))\r\n self.flag = True\r\n\r\n def veiw(self):\r\n top = Toplevel(self.root)\r\n top.geometry(\"300x100+520+260\")\r\n self.canvas = Canvas(top, width=170, height=26, bg=\"white\")\r\n # 创建一个矩形外边框(距离左边,距离顶部,矩形宽度,矩形高度),线型宽度,颜色\r\n self.out_line = self.canvas.create_rectangle(2, 2, 180, 27, width=1, outline=\"black\")\r\n self.canvas.grid(row=0, column=1, ipadx=5)\r\n\r\n def startupdate(self):\r\n if self.flag:\r\n if WebUpdate(self.driver).ready(self.selecttype.get(),self.filepath.get()):\r\n self.veiw()\r\n else:\r\n showinfo(\"Warning\",\"Please try again\")\r\n else:\r\n showwarning(message=\"Please select image file\")\r\n\r\n\r\n# root = Tk()\r\n# MainPage(root)\r\n# root.mainloop()\r\n\r\n\r\n","sub_path":"WebFlash/MainPage.py","file_name":"MainPage.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"315751258","text":"\"\"\"Base grammar, Ref, Anything and Nothing.\"\"\"\n\nimport copy\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, List, Optional, Union, Type, Tuple, Any\n\nfrom sqlfluff.core.errors import SQLParseError\nfrom sqlfluff.core.string_helpers import curtail_string\n\nfrom sqlfluff.core.parser.segments import BaseSegment, BracketedSegment, allow_ephemeral\nfrom sqlfluff.core.parser.helpers import trim_non_code_segments, iter_indices\nfrom sqlfluff.core.parser.match_result import MatchResult\nfrom sqlfluff.core.parser.match_logging import (\n parse_match_logging,\n LateBoundJoinSegmentsCurtailed,\n)\nfrom sqlfluff.core.parser.match_wrapper import match_wrapper\nfrom sqlfluff.core.parser.matchable import Matchable\nfrom sqlfluff.core.parser.context import ParseContext\nfrom sqlfluff.core.parser.parsers import BaseParser\n\n# Either a Matchable (a grammar or parser) or a Segment CLASS\nMatchableType = Union[Matchable, Type[BaseSegment]]\n\nif TYPE_CHECKING:\n from sqlfluff.core.dialects.base import Dialect # pragma: no cover\n\n\n@dataclass\nclass BracketInfo:\n \"\"\"BracketInfo tuple for keeping track of brackets during matching.\n\n This is used in BaseGrammar._bracket_sensitive_look_ahead_match but\n defined here for type checking.\n \"\"\"\n\n bracket: BaseSegment\n segments: Tuple[BaseSegment, ...]\n\n def to_segment(self, end_bracket):\n \"\"\"Turn the contained segments into a bracketed segment.\"\"\"\n return BracketedSegment(\n segments=self.segments,\n start_bracket=(self.bracket,),\n end_bracket=end_bracket,\n )\n\n\ndef cached_method_for_parse_context(func):\n \"\"\"A decorator to cache the output of this method for a given parse context.\n\n This cache automatically invalidates if the uuid\n of the parse context changes. The value is store\n in the __dict__ attribute of the class against a\n key unique to that function.\n \"\"\"\n cache_key = \"__cache_\" + func.__name__\n\n def wrapped_method(self, parse_context: ParseContext, **kwargs):\n \"\"\"Cache the output of the method against a given parse context.\n\n Note: kwargs are not taken into account in the caching, but\n for the current use case of dependency loop debugging that's\n ok.\n \"\"\"\n cache_tuple: Tuple = self.__dict__.get(cache_key, (None, None))\n # Do we currently have a cached value?\n if cache_tuple[0] == parse_context.uuid:\n return cache_tuple[1]\n # Generate a new value, cache it and return\n result = func(self, parse_context=parse_context, **kwargs)\n self.__dict__[cache_key] = (parse_context.uuid, result)\n return result\n\n return wrapped_method\n\n\nclass BaseGrammar(Matchable):\n \"\"\"Grammars are a way of composing match statements.\n\n Any grammar must implement the `match` function. Segments can also be\n passed to most grammars. Segments implement `match` as a classmethod. Grammars\n implement it as an instance method.\n\n \"\"\"\n\n is_meta = False\n # Are we allowed to refer to keywords as strings instead of only passing\n # grammars or segments?\n allow_keyword_string_refs = True\n equality_kwargs: Tuple[str, ...] = (\"optional\", \"allow_gaps\")\n\n @staticmethod\n def _resolve_ref(elem):\n \"\"\"Resolve potential string references to things we can match against.\"\"\"\n initialisers = [\n # t: instance / f: class, ref, func\n (True, str, Ref.keyword),\n (True, BaseGrammar, lambda x: x),\n (True, BaseParser, lambda x: x),\n (False, BaseSegment, lambda x: x),\n ]\n # Get-out clause for None\n if elem is None:\n return None\n\n for instance, init_type, init_func in initialisers:\n if (instance and isinstance(elem, init_type)) or (\n not instance and issubclass(elem, init_type)\n ):\n return init_func(elem)\n raise TypeError(\n \"Grammar element [{!r}] was found of unexpected type [{}] was \"\n \"found.\".format(elem, type(elem)) # pragma: no cover\n )\n\n def __init__(\n self,\n *args: Union[MatchableType, str],\n allow_gaps=True,\n optional=False,\n ephemeral_name=None,\n ):\n \"\"\"Deal with kwargs common to all grammars.\n\n Args:\n *args: Any number of elements which because the subjects\n of this grammar. Optionally these elements may also be\n string references to elements rather than the Matchable\n elements themselves.\n allow_gaps (:obj:`bool`, optional): Does this instance of the\n grammar allow gaps between the elements it matches? This\n may be exhibited slightly differently in each grammar. See\n that grammar for details. Defaults `True`.\n optional (:obj:`bool`, optional): In the context of a sequence,\n is this grammar *optional*, i.e. can it be skipped if no\n match is found. Outside of a Sequence, this option does nothing.\n Defaults `False`.\n ephemeral_name (:obj:`str`, optional): If specified this allows\n the grammar to match anything, and create an EphemeralSegment\n with the given name in its place. The content of this grammar\n is passed to the segment, and will become the parse grammar\n for it. If used widely this is an excellent way of breaking\n up the parse process and also signposting the name of a given\n chunk of code that might be parsed separately.\n \"\"\"\n # We provide a common interface for any grammar that allows positional elements.\n # If *any* for the elements are a string and not a grammar, then this is a\n # shortcut to the Ref.keyword grammar by default.\n if self.allow_keyword_string_refs:\n self._elements = []\n for elem in args:\n self._elements.append(self._resolve_ref(elem))\n else:\n self._elements = list(args)\n\n # Now we deal with the standard kwargs\n self.allow_gaps = allow_gaps\n self.optional = optional\n # ephemeral_name is a flag to indicate whether we need to make an\n # EphemeralSegment class. This is effectively syntactic sugar\n # to allow us to avoid specifying a EphemeralSegment directly in a dialect.\n # If this is the case, the actual segment construction happens in the\n # match_wrapper.\n self.ephemeral_name = ephemeral_name\n\n def is_optional(self):\n \"\"\"Return whether this segment is optional.\n\n The optional attribute is set in the __init__ method.\n \"\"\"\n return self.optional\n\n @match_wrapper()\n @allow_ephemeral\n def match(self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext):\n \"\"\"Match a list of segments against this segment.\n\n Matching can be done from either the raw or the segments.\n This raw function can be overridden, or a grammar defined\n on the underlying class.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__.__name__} has no match function implemented\"\n ) # pragma: no cover\n\n @cached_method_for_parse_context\n def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:\n \"\"\"Does this matcher support a lowercase hash matching route?\"\"\"\n return None\n\n @classmethod\n def _longest_trimmed_match(\n cls,\n segments: Tuple[BaseSegment, ...],\n matchers: List[MatchableType],\n parse_context: ParseContext,\n trim_noncode=True,\n terminators: List[MatchableType] = None,\n ) -> Tuple[MatchResult, Optional[MatchableType]]:\n \"\"\"Return longest match from a selection of matchers.\n\n Prioritise the first match, and if multiple match at the same point the longest.\n If two matches of the same length match at the same time, then it's the first in\n the iterable of matchers.\n\n Returns:\n `tuple` of (match_object, matcher).\n\n \"\"\"\n terminated = False\n\n # Have we been passed an empty list?\n if len(segments) == 0: # pragma: no cover\n return MatchResult.from_empty(), None\n\n # If gaps are allowed, trim the ends.\n if trim_noncode:\n pre_nc, segments, post_nc = trim_non_code_segments(segments)\n\n best_match_length = 0\n # iterate at this position across all the matchers\n for matcher in matchers:\n # MyPy seems to require a type hint here. Not quite sure why.\n res_match: MatchResult = matcher.match(\n segments, parse_context=parse_context\n )\n if res_match.is_complete():\n # Just return it! (WITH THE RIGHT OTHER STUFF)\n if trim_noncode:\n return (\n MatchResult.from_matched(\n pre_nc + res_match.matched_segments + post_nc\n ),\n matcher,\n )\n else:\n return res_match, matcher\n elif res_match:\n # We've got an incomplete match, if it's the best so far keep it.\n if res_match.trimmed_matched_length > best_match_length:\n best_match = res_match, matcher\n best_match_length = res_match.trimmed_matched_length\n\n if terminators:\n\n _, segs, _ = trim_non_code_segments(\n best_match[0].unmatched_segments\n )\n for terminator in terminators:\n terminator_match: MatchResult = terminator.match(\n segs, parse_context=parse_context\n )\n\n if terminator_match.matched_segments:\n terminated = True\n break\n\n if terminated:\n break\n\n # We could stash segments here, but given we might have some successful\n # matches here, we shouldn't, because they'll be mutated in the wrong way.\n # Eventually there might be a performance gain from doing that sensibly\n # here.\n\n # If we get here, then there wasn't a complete match. If we\n # has a best_match, return that.\n if best_match_length > 0:\n if trim_noncode:\n return (\n MatchResult(\n pre_nc + best_match[0].matched_segments,\n best_match[0].unmatched_segments + post_nc,\n ),\n best_match[1],\n )\n else:\n return best_match\n # If no match at all, return nothing\n return MatchResult.from_unmatched(segments), None\n\n @classmethod\n def _look_ahead_match(\n cls,\n segments: Tuple[BaseSegment, ...],\n matchers: List[MatchableType],\n parse_context: ParseContext,\n ) -> Tuple[Tuple[BaseSegment, ...], MatchResult, Optional[MatchableType]]:\n \"\"\"Look ahead for matches beyond the first element of the segments list.\n\n This function also contains the performance improved hash-matching approach to\n searching for matches, which should significantly improve performance.\n\n Prioritise the first match, and if multiple match at the same point the longest.\n If two matches of the same length match at the same time, then it's the first in\n the iterable of matchers.\n\n Returns:\n `tuple` of (unmatched_segments, match_object, matcher).\n\n \"\"\"\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"IN\",\n parse_context=parse_context,\n v_level=4,\n ls=len(segments),\n seg=LateBoundJoinSegmentsCurtailed(segments),\n )\n\n # Have we been passed an empty tuple?\n if not segments: # pragma: no cover TODO?\n return ((), MatchResult.from_empty(), None)\n\n # Here we enable a performance optimisation. Most of the time in this cycle\n # happens in loops looking for simple matchers which we should\n # be able to find a shortcut for.\n # First: Assess the matchers passed in, if any are\n # \"simple\", then we effectively use a hash lookup across the\n # content of segments to quickly evaluate if the segment is present.\n # Matchers which aren't \"simple\" still take a slower route.\n _matchers = [\n (matcher, matcher.simple(parse_context=parse_context))\n for matcher in matchers\n ]\n simple_matchers = [matcher for matcher in _matchers if matcher[1]]\n non_simple_matchers = [matcher[0] for matcher in _matchers if not matcher[1]]\n best_simple_match = None\n if simple_matchers:\n # If they're all simple we can use a hash match to identify the first one.\n # Build a buffer of all the upper case raw segments ahead of us.\n str_buff = []\n # For existing compound segments, we should assume that within\n # that segment, things are internally consistent, that means\n # rather than enumerating all the individual segments of a longer\n # one we just dump out the whole segment, but splitting off the\n # first element separated by whitespace. This is a) faster and\n # also b) prevents some really horrible bugs with bracket matching.\n # See https://github.com/sqlfluff/sqlfluff/issues/433\n\n def _trim_elem(seg):\n s = seg.raw_upper.split(maxsplit=1)\n return s[0] if s else \"\"\n\n str_buff = [_trim_elem(seg) for seg in segments]\n match_queue = []\n\n for matcher, simple in simple_matchers:\n # Simple will be a tuple of options\n assert simple\n for simple_option in simple:\n # NOTE: We use iter_indices to make sure we capture\n # all instances of potential matches if there are many.\n # This is important for bracket counting.\n for buff_pos in iter_indices(str_buff, simple_option):\n match_queue.append((matcher, buff_pos, simple_option))\n\n # Sort the match queue. First to process AT THE END.\n # That means we pop from the end.\n match_queue = sorted(match_queue, key=lambda x: x[1])\n\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"SI\",\n parse_context=parse_context,\n v_level=4,\n mq=match_queue,\n sb=str_buff,\n )\n\n while match_queue:\n # We've managed to match. We can shortcut home.\n # NB: We may still need to deal with whitespace.\n queued_matcher, queued_buff_pos, queued_option = match_queue.pop()\n # Here we do the actual transform to the new segment.\n match = queued_matcher.match(segments[queued_buff_pos:], parse_context)\n if not match:\n # We've had something match in simple matching, but then later\n # excluded. Log but then move on to the next item on the list.\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"NM\",\n parse_context=parse_context,\n v_level=4,\n _so=queued_option,\n )\n continue\n # Ok we have a match. Because we sorted the list, we'll take it!\n best_simple_match = (segments[:queued_buff_pos], match, queued_matcher)\n\n if not non_simple_matchers:\n # There are no other matchers, we can just shortcut now.\n\n parse_match_logging(\n cls.__name__,\n \"_look_ahead_match\",\n \"SC\",\n parse_context=parse_context,\n v_level=4,\n bsm=None\n if not best_simple_match\n else (\n len(best_simple_match[0]),\n len(best_simple_match[1]),\n best_simple_match[2],\n ),\n )\n\n if best_simple_match:\n return best_simple_match\n else:\n return ((), MatchResult.from_unmatched(segments), None)\n\n # Make some buffers\n seg_buff = segments\n pre_seg_buff: Tuple[BaseSegment, ...] = ()\n\n # Loop\n while True:\n # Do we have anything left to match on?\n if seg_buff:\n # Great, carry on.\n pass\n else:\n # We've got to the end without a match, return empty\n return ((), MatchResult.from_unmatched(segments), None)\n\n # We only check the NON-simple ones here for brevity.\n mat, m = cls._longest_trimmed_match(\n seg_buff,\n non_simple_matchers,\n parse_context=parse_context,\n trim_noncode=False,\n )\n\n if mat and not best_simple_match:\n return (pre_seg_buff, mat, m)\n elif mat:\n # Given we have mat - we should always have these two.\n assert m\n assert best_simple_match\n # It will be earlier than the simple one if we've even checked,\n # but there's a chance that this might be *longer*, or just FIRST.\n pre_lengths = (len(pre_seg_buff), len(best_simple_match[0]))\n mat_lengths = (len(mat), len(best_simple_match[1]))\n mat_indexes = (matchers.index(m), matchers.index(best_simple_match[2]))\n if (\n (pre_lengths[0] < pre_lengths[1])\n or (\n pre_lengths[0] == pre_lengths[1]\n and mat_lengths[0] > mat_lengths[1]\n )\n or (\n pre_lengths[0] == pre_lengths[1]\n and mat_lengths[0] == mat_lengths[1]\n and mat_indexes[0] < mat_indexes[1]\n )\n ):\n return (pre_seg_buff, mat, m)\n else:\n # TODO: Make a test case to cover this.\n return best_simple_match # pragma: no cover\n else:\n # If there aren't any matches, then advance the buffer and try again.\n # Two improvements:\n # 1) if we get as far as the first simple match, then return that.\n # 2) be eager in consuming non-code segments if allowed\n if best_simple_match and len(pre_seg_buff) >= len(best_simple_match[0]):\n return best_simple_match\n\n pre_seg_buff += (seg_buff[0],)\n seg_buff = seg_buff[1:]\n\n @classmethod\n def _bracket_sensitive_look_ahead_match(\n cls,\n segments: Tuple[BaseSegment, ...],\n matchers: List[MatchableType],\n parse_context: ParseContext,\n start_bracket: Optional[Matchable] = None,\n end_bracket: Optional[Matchable] = None,\n bracket_pairs_set: str = \"bracket_pairs\",\n ) -> Tuple[Tuple[BaseSegment, ...], MatchResult, Optional[MatchableType]]:\n \"\"\"Same as `_look_ahead_match` but with bracket counting.\n\n NB: Given we depend on `_look_ahead_match` we can also utilise\n the same performance optimisations which are implemented there.\n\n bracket_pairs_set: Allows specific segments to override the available\n bracket pairs. See the definition of \"angle_bracket_pairs\" in the\n BigQuery dialect for additional context on why this exists.\n\n Returns:\n `tuple` of (unmatched_segments, match_object, matcher).\n\n \"\"\"\n # Have we been passed an empty tuple?\n if not segments:\n return ((), MatchResult.from_unmatched(segments), None)\n\n # Get hold of the bracket matchers from the dialect, and append them\n # to the list of matchers. We get them from the relevant set on the\n # dialect. We use zip twice to \"unzip\" them. We ignore the first\n # argument because that's just the name.\n _, start_bracket_refs, end_bracket_refs, persists = zip(\n *parse_context.dialect.sets(bracket_pairs_set)\n )\n # These are matchables, probably StringParsers.\n start_brackets = [\n parse_context.dialect.ref(seg_ref) for seg_ref in start_bracket_refs\n ]\n end_brackets = [\n parse_context.dialect.ref(seg_ref) for seg_ref in end_bracket_refs\n ]\n # Add any bracket-like things passed as arguments\n if start_bracket:\n start_brackets += [start_bracket]\n if end_bracket:\n end_brackets += [end_bracket]\n bracket_matchers = start_brackets + end_brackets\n\n # Make some buffers\n seg_buff: Tuple[BaseSegment, ...] = segments\n pre_seg_buff: Tuple[BaseSegment, ...] = ()\n bracket_stack: List[BracketInfo] = []\n\n # Iterate\n while True:\n # Do we have anything left to match on?\n if seg_buff:\n # Yes we have buffer left to work with.\n # Are we already in a bracket stack?\n if bracket_stack:\n # Yes, we're just looking for the closing bracket, or\n # another opening bracket.\n pre, match, matcher = cls._look_ahead_match(\n seg_buff,\n bracket_matchers,\n parse_context=parse_context,\n )\n\n if match:\n # NB: We can only consider this as a nested bracket if the start\n # and end tokens are not the same. If a matcher is both a start\n # and end token we cannot deepen the bracket stack. In general,\n # quoted strings are a typical example where the start and end\n # tokens are the same. Currently, though, quoted strings are\n # handled elsewhere in the parser, and there are no cases where\n # *this* code has to handle identical start and end brackets.\n # For now, consider this a small, speculative investment in a\n # possible future requirement.\n if matcher in start_brackets and matcher not in end_brackets:\n # Add any segments leading up to this to the previous\n # bracket.\n bracket_stack[-1].segments += pre\n # Add a bracket to the stack and add the matches from the\n # segment.\n bracket_stack.append(\n BracketInfo(\n bracket=match.matched_segments[0],\n segments=match.matched_segments,\n )\n )\n seg_buff = match.unmatched_segments\n continue\n elif matcher in end_brackets:\n # Found an end bracket. Does its type match that of\n # the innermost start bracket? E.g. \")\" matches \"(\",\n # \"]\" matches \"[\".\n # For the start bracket we don't have the matcher\n # but we can work out the type, so we use that for\n # the lookup.\n start_index = [\n bracket.type for bracket in start_brackets\n ].index(bracket_stack[-1].bracket.get_type())\n # For the end index, we can just look for the matcher\n end_index = end_brackets.index(matcher)\n bracket_types_match = start_index == end_index\n if bracket_types_match:\n # Yes, the types match. So we've found a\n # matching end bracket. Pop the stack, construct\n # a bracketed segment and carry\n # on.\n\n # Complete the bracketed info\n bracket_stack[-1].segments += (\n pre + match.matched_segments\n )\n # Construct a bracketed segment (as a tuple) if allowed.\n persist_bracket = persists[end_brackets.index(matcher)]\n if persist_bracket:\n new_segments: Tuple[BaseSegment, ...] = (\n bracket_stack[-1].to_segment(\n end_bracket=match.matched_segments\n ),\n )\n else:\n new_segments = bracket_stack[-1].segments\n # Remove the bracket set from the stack\n bracket_stack.pop()\n # If we're still in a bracket, add the new segments to\n # that bracket, otherwise add them to the buffer\n if bracket_stack:\n bracket_stack[-1].segments += new_segments\n else:\n pre_seg_buff += new_segments\n seg_buff = match.unmatched_segments\n continue\n else:\n # The types don't match. Error.\n raise SQLParseError(\n f\"Found unexpected end bracket!, \"\n f\"was expecting \"\n f\"{end_brackets[start_index]}, \"\n f\"but got {matcher}\",\n segment=match.matched_segments[0],\n )\n\n else: # pragma: no cover\n raise RuntimeError(\"I don't know how we get here?!\")\n else: # pragma: no cover\n # No match, we're in a bracket stack. Error.\n raise SQLParseError(\n \"Couldn't find closing bracket for opening bracket.\",\n segment=bracket_stack[-1].bracket,\n )\n else:\n # No, we're open to more opening brackets or the thing(s)\n # that we're otherwise looking for.\n pre, match, matcher = cls._look_ahead_match(\n seg_buff,\n matchers + bracket_matchers,\n parse_context=parse_context,\n )\n\n if match:\n if matcher in matchers:\n # It's one of the things we were looking for!\n # Return.\n return (pre_seg_buff + pre, match, matcher)\n elif matcher in start_brackets:\n # We've found the start of a bracket segment.\n # NB: It might not *actually* be the bracket itself,\n # but could be some non-code element preceding it.\n # That's actually ok.\n\n # Add the bracket to the stack.\n bracket_stack.append(\n BracketInfo(\n bracket=match.matched_segments[0],\n segments=match.matched_segments,\n )\n )\n # The matched element has already been added to the bracket.\n # Add anything before it to the pre segment buffer.\n # Reset the working buffer.\n pre_seg_buff += pre\n seg_buff = match.unmatched_segments\n continue\n elif matcher in end_brackets:\n # We've found an unexpected end bracket! This is likely\n # because we're matching a section which should have ended.\n # If we had a match, it would have matched by now, so this\n # means no match.\n parse_match_logging(\n cls.__name__,\n \"_bracket_sensitive_look_ahead_match\",\n \"UEXB\",\n parse_context=parse_context,\n v_level=3,\n got=matcher,\n )\n # From here we'll drop out to the happy unmatched exit.\n else: # pragma: no cover\n # This shouldn't happen!?\n raise NotImplementedError(\n \"This shouldn't happen. Panic in \"\n \"_bracket_sensitive_look_ahead_match.\"\n )\n # Not in a bracket stack, but no match.\n # From here we'll drop out to the happy unmatched exit.\n else:\n # No we're at the end:\n # Now check have we closed all our brackets?\n if bracket_stack: # pragma: no cover\n # No we haven't.\n raise SQLParseError(\n \"Couldn't find closing bracket for opened brackets: \"\n f\"`{bracket_stack}`.\",\n segment=bracket_stack[-1].bracket,\n )\n\n # This is the happy unmatched path. This occurs when:\n # - We reached the end with no open brackets.\n # - No match while outside a bracket stack.\n # - We found an unexpected end bracket before matching something\n # interesting. We return with the mutated segments so we can reuse any\n # bracket matching.\n return ((), MatchResult.from_unmatched(pre_seg_buff + seg_buff), None)\n\n def __str__(self): # pragma: no cover TODO?\n return repr(self)\n\n def __repr__(self):\n return \"<{}: [{}]>\".format(\n self.__class__.__name__,\n curtail_string(\n \", \".join(curtail_string(repr(elem), 40) for elem in self._elements),\n 100,\n ),\n )\n\n def __eq__(self, other):\n \"\"\"Two grammars are equal if their elements and types are equal.\n\n NOTE: We use the equality_kwargs tuple on the class to define\n other kwargs which should also be checked so that things like\n \"optional\" is also taken into account in considering equality.\n \"\"\"\n return (\n type(self) is type(other)\n and self._elements == other._elements\n and all(\n getattr(self, k, None) == getattr(other, k, None)\n for k in self.equality_kwargs\n )\n )\n\n def copy(\n self,\n insert: Optional[list] = None,\n at: Optional[int] = None,\n before: Optional[Any] = None,\n remove: Optional[list] = None,\n **kwargs,\n ):\n \"\"\"Create a copy of this grammar, optionally with differences.\n\n This is mainly used in dialect inheritance.\n\n\n Args:\n insert (:obj:`list`, optional): Matchable elements to\n insert. This is inserted pre-expansion so can include\n unexpanded elements as normal.\n at (:obj:`int`, optional): The position in the elements\n to insert the item. Defaults to `None` which means\n insert at the end of the elements.\n before (optional): An alternative to _at_ to determine the\n position of an insertion. Using this inserts the elements\n immediately before the position of this element.\n Note that this is not an _index_ but an element to look\n for (i.e. a Segment or Grammar which will be compared\n with other elements for equality).\n remove (:obj:`list`, optional): A list of individual\n elements to remove from a grammar. Removal is done\n *after* insertion so that order is preserved.\n Elements are searched for individually.\n\n \"\"\"\n # Copy only the *grammar* elements. The rest comes through\n # as is because they should just be classes rather than\n # instances.\n new_elems = [\n elem.copy() if isinstance(elem, BaseGrammar) else elem\n for elem in self._elements\n ]\n if insert:\n if at is not None and before is not None: # pragma: no cover\n raise ValueError(\n \"Cannot specify `at` and `before` in BaseGrammar.copy().\"\n )\n if before is not None:\n try:\n idx = new_elems.index(before)\n except ValueError: # pragma: no cover\n raise ValueError(\n \"Could not insert {} in copy of {}. {} not Found.\".format(\n insert, self, before\n )\n )\n new_elems = new_elems[:idx] + insert + new_elems[idx:]\n elif at is None:\n new_elems = new_elems + insert\n else:\n new_elems = new_elems[:at] + insert + new_elems[at:]\n if remove:\n for elem in remove:\n try:\n new_elems.remove(elem)\n except ValueError: # pragma: no cover\n raise ValueError(\n \"Could not remove {} from copy of {}. Not Found.\".format(\n elem, self\n )\n )\n new_seg = copy.copy(self)\n new_seg._elements = new_elems\n return new_seg\n\n\nclass Ref(BaseGrammar):\n \"\"\"A kind of meta-grammar that references other grammars by name at runtime.\"\"\"\n\n # We can't allow keyword refs here, because it doesn't make sense\n # and it also causes infinite recursion.\n allow_keyword_string_refs = False\n\n def __init__(self, *args: str, **kwargs):\n # Any patterns to _prevent_ a match.\n self.exclude = kwargs.pop(\"exclude\", None)\n super().__init__(*args, **kwargs)\n\n @cached_method_for_parse_context\n def simple(\n self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None\n ) -> Optional[List[str]]:\n \"\"\"Does this matcher support a uppercase hash matching route?\n\n A ref is simple, if the thing it references is simple.\n \"\"\"\n ref = self._get_ref()\n if crumbs and ref in crumbs: # pragma: no cover\n loop = \" -> \".join(crumbs)\n raise RecursionError(f\"Self referential grammar detected: {loop}\")\n return self._get_elem(dialect=parse_context.dialect).simple(\n parse_context=parse_context,\n crumbs=(crumbs or ()) + (ref,),\n )\n\n def _get_ref(self) -> str:\n \"\"\"Get the name of the thing we're referencing.\"\"\"\n # Unusually for a grammar we expect _elements to be a list of strings.\n # Notable ONE string for now.\n if len(self._elements) == 1:\n # We're good on length. Get the name of the reference\n ref = self._elements[0]\n if not isinstance(ref, str): # pragma: no cover\n raise ValueError(\n \"Ref Grammar expects elements to be strings. \"\n f\"Found {ref!r} instead.\"\n )\n return self._elements[0]\n else: # pragma: no cover\n raise ValueError(\n \"Ref grammar can only deal with precisely one element for now. Instead \"\n \"found {!r}\".format(self._elements)\n )\n\n def _get_elem(self, dialect: \"Dialect\") -> Union[Type[BaseSegment], Matchable]:\n \"\"\"Get the actual object we're referencing.\"\"\"\n if dialect:\n # Use the dialect to retrieve the grammar it refers to.\n return dialect.ref(self._get_ref())\n else: # pragma: no cover\n raise ReferenceError(\"No Dialect has been provided to Ref grammar!\")\n\n def __repr__(self):\n return \"\".format(\n \", \".join(self._elements), \" [opt]\" if self.is_optional() else \"\"\n )\n\n @match_wrapper(v_level=4) # Log less for Ref\n @allow_ephemeral\n def match(\n self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext\n ) -> \"MatchResult\":\n \"\"\"Match a list of segments against this segment.\n\n Matching can be done from either the raw or the segments.\n This raw function can be overridden, or a grammar defined\n on the underlying class.\n\n The match element of Ref, also implements the caching\n using the parse_context `denylist` methods.\n \"\"\"\n elem = self._get_elem(dialect=parse_context.dialect)\n\n # First if we have an *exclude* option, we should check that\n # which would prevent the rest of this grammar from matching.\n if self.exclude:\n with parse_context.deeper_match() as ctx:\n if self.exclude.match(segments, parse_context=ctx):\n return MatchResult.from_unmatched(segments)\n\n # First check against the efficiency Cache.\n # We rely on segments not being mutated within a given\n # match cycle and so the ids should continue to refer to unchanged\n # objects.\n seg_tuple = (id(seg) for seg in segments)\n self_name = self._get_ref()\n if parse_context.denylist.check(self_name, seg_tuple): # pragma: no cover TODO?\n # This has been tried before.\n parse_match_logging(\n self.__class__.__name__,\n \"match\",\n \"SKIP\",\n parse_context=parse_context,\n v_level=3,\n self_name=self_name,\n )\n return MatchResult.from_unmatched(segments)\n\n # Match against that. NB We're not incrementing the match_depth here.\n # References shouldn't really count as a depth of match.\n with parse_context.matching_segment(self._get_ref()) as ctx:\n resp = elem.match(segments=segments, parse_context=ctx)\n if not resp:\n parse_context.denylist.mark(self_name, seg_tuple)\n return resp\n\n @classmethod\n def keyword(cls, keyword, **kwargs):\n \"\"\"Generate a reference to a keyword by name.\n\n This function is entirely syntactic sugar, and designed\n for more readable dialects.\n\n Ref.keyword('select') == Ref('SelectKeywordSegment')\n\n \"\"\"\n name = keyword.capitalize() + \"KeywordSegment\"\n return cls(name, **kwargs)\n\n\nclass Anything(BaseGrammar):\n \"\"\"Matches anything.\"\"\"\n\n def match(\n self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext\n ) -> \"MatchResult\":\n \"\"\"Matches... Anything.\n\n Most useful in match grammars, where a later parse grammar\n will work out what's inside.\n \"\"\"\n return MatchResult.from_matched(segments)\n\n\nclass Nothing(BaseGrammar):\n \"\"\"Matches nothing.\n\n Useful for placeholders which might be overwritten by other\n dialects.\n \"\"\"\n\n def match(\n self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext\n ) -> \"MatchResult\":\n \"\"\"Matches... nothing.\n\n Useful for placeholders which might be overwritten by other\n dialects.\n \"\"\"\n return MatchResult.from_unmatched(segments)\n","sub_path":"src/sqlfluff/core/parser/grammar/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":41083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"28971666","text":"'''\r\nCreated on 2017年6月21日\r\n\r\n@author: iBook\r\n'''\r\n#!/usr/bin/python3\r\n# -*- coding: UTF-8 -*-\r\nimport MySQLdb\r\n\r\n\r\n# 打开数据库连接\r\ndb = MySQLdb.connect(\"localhost\",\"root\",\"1234\",\"test\" )\r\n# 使用cursor()方法获取操作游标\r\ncursor = db.cursor()\r\n# 使用execute方法执行SQL语句\r\ncursor.execute(\"SELECT VERSION()\")\r\n# 使用 fetchone() 方法获取一条数据库。\r\ndata = cursor.fetchone()\r\nprint(\"Database version : %s \" % data)\r\n# 关闭数据库连接\r\ndb.close()\r\n","sub_path":"PythonDemo/com/weduoo/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"65765204","text":"from gcloud import storage\r\nimport os\r\nimport json\r\n\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"./dbtest-301709-b8daa273ad42.json\"\r\n\r\nos.environ.setdefault(\"GCLOUD_PROJECT\", \"dbtest\") # 프로젝트 id를 넣어줌\r\n\r\nclient = storage.Client()\r\n\r\nbucket = client.get_bucket('store_video2') # 버켓 이름 넣어줌\r\n\r\n\r\n# 프레임 파일 / 초기 파일 지우기\r\n\r\nwith open('upload/emotion/emotions.json') as json_file:\r\n json_data = json.load(json_file)\r\n\r\nfor key in json_data.keys():\r\n if( key == 'total' or key == 'time'):\r\n continue \r\n print(key)\r\n frame = bucket.blob(key+\".jpg\") # 어떤 파일을 지울건지\r\n try:\r\n frame.delete()\r\n except Exception:\r\n print(\"can not detect such file\")\r\n\r\n# 동영상 초기 파일 지우기\r\npre_video = bucket.blob('test.mp4') # 어떤 파일을 지울건지\r\ntry:\r\n pre_video.delete()\r\nexcept Exception:\r\n print(\"no such file\")\r\n\r\npre_result_video = bucket.blob('result.mp4') # 어떤 파일을 지울건지\r\ntry:\r\n pre_result_video.delete()\r\nexcept Exception:\r\n print(\"no such file\")\r\n\r\n\r\n\r\n\r\n# 해야할 것\r\n\r\n# 구글 클라우드 프로젝트 생성\r\n# 생성한 프로젝트의 버켓 생성 -> 세분화된 액세스 제어 선택\r\n# 버켓의 사용자 계정 생성\r\n\r\n# pip install gcloud\r\n# pip install --upgrade google-cloud-storage\r\n\r\n\r\n# export GOOGLE_APPLICATION_CREDENTIALS=\"C:\\Users\\CHOISAYWHY\\jsons\\dbtest-301709-b8daa273ad42.json\" \r\n# ㄴ자기가 json 파일 저장한 경로 쓰기 \r\n# 이후 실행\r\n# pip install -U httplib2==0.15.0\r\n\r\n# https://github.com/googleapis/google-api-python-client/issues/803\r\n","sub_path":"GoogleStorageDefault.py","file_name":"GoogleStorageDefault.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"625385303","text":"# Copyright 2012-2013 OpenStack, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"Network action implementations\"\"\"\n\nimport logging\n\nfrom cliff import command\n\nfrom openstackclient.network import common\n\n\nclass CreateNetwork(common.CreateCommand):\n \"\"\"Create a network\"\"\"\n\n resource = 'network'\n\n def get_parser(self, prog_name):\n parser = super(CreateNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--admin-state-down',\n dest='admin_state', action='store_false',\n default=True, help='Set Admin State Up to false')\n parser.add_argument(\n '--shared',\n action='store_true',\n default=False, help='Set the network as shared')\n parser.add_argument(\n 'name', metavar='NAME',\n help='Name of network to create')\n return parser\n\n def get_body(self, parsed_args):\n return {self.resource: {\n 'name': str(parsed_args.name),\n 'admin_state_up': str(parsed_args.admin_state),\n 'shared': str(parsed_args.shared) } }\n\n\nclass DeleteNetwork(common.DeleteCommand):\n \"\"\"Delete a network\"\"\"\n\n resource = 'network'\n\n\nclass ListNetwork(common.ListCommand):\n \"\"\"List networks\"\"\"\n\n resource = \"network\"\n list_columns = ['id', 'name', 'subnets']\n\n def get_parser(self, prog_name):\n parser = super(ListNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--external',\n action='store_true',\n default=False,\n help='List external networks',\n )\n parser.add_argument(\n '--dhcp',\n dest='dhcp_agent',\n help='ID of the DHCP agent')\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action(%s)' % parsed_args)\n if parsed_args.external:\n self.report_filter = {'router:external': True}\n elif parsed_args.dhcp_agent:\n self.func = 'networks_on_dhcp_agent'\n self.resources = 'networks_on_dhcp_agent'\n self.report_filter = {'dhcp_agent': parsed_args.dhcp_agent}\n return super(ListNetwork, self).take_action(parsed_args)\n\n\nclass SetNetwork(common.SetCommand):\n \"\"\"Set network values\"\"\"\n\n resource = 'network'\n\n\nclass ShowNetwork(common.ShowCommand):\n \"\"\"Show network details\"\"\"\n\n resource = 'network'\n\n\nclass AddGatewayNetwork(command.Command, common.BaseCommand):\n \"\"\"Add a gateway to a network\"\"\"\n\n log = logging.getLogger(__name__ + '.AddGatewayNetwork')\n resource = 'network'\n resources = 'networks'\n\n def get_parser(self, prog_name):\n parser = super(AddGatewayNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--segmentation-type',\n help=('L2 segmentation strategy on the external side of '\n 'the gateway (e.g.: VLAN, FLAT)'))\n parser.add_argument(\n '--segmentation-id',\n help=('Identifier for the L2 segment on the external side '\n 'of the gateway'))\n parser.add_argument(\n 'network',\n metavar='',\n help='Name or identifier of the internal network'\n )\n parser.add_argument(\n 'gateway',\n metavar='',\n help='Name or identifier of the gatway'\n )\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action(%s)' % parsed_args)\n client = self.app.client_manager.network\n network_id = self.find_resource(parsed_args.network)\n gateway_id = self.find('network_gateway', 'network_gateways',\n parsed_args.gateway)\n body = {'network_id': network_id,\n 'segmentation_type': parsed_args.segmentation_type,\n 'segmentation_id': parsed_args.segmentation_id}\n client.connect_network_gateway(gateway_id, body)\n print ('Connected network to gateway %s' % gateway_id)\n\n\nclass RemoveGatewayNetwork(command.Command, common.BaseCommand):\n \"\"\"Remove a gateway from a network\"\"\"\n\n log = logging.getLogger(__name__ + '.RemoveGatewayNetwork')\n resource = 'network'\n resources = 'networks'\n\n def get_parser(self, prog_name):\n parser = super(RemoveGatewayNetwork, self).get_parser(prog_name)\n parser.add_argument(\n '--segmentation-type',\n help=('L2 segmentation strategy on the external side of '\n 'the gateway (e.g.: VLAN, FLAT)'))\n parser.add_argument(\n '--segmentation-id',\n help=('Identifier for the L2 segment on the external side '\n 'of the gateway'))\n parser.add_argument(\n 'network',\n metavar='',\n help='Name or identifier of the internal network'\n )\n parser.add_argument(\n 'gateway',\n metavar='',\n help='Name or identifier of the gatway'\n )\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action(%s)' % parsed_args)\n client = self.app.client_manager.network\n network_id = self.find_resource(parsed_args.network)\n gateway_id = self.find('network_gateway', 'network_gateways',\n parsed_args.gateway)\n body = {'network_id': network_id,\n 'segmentation_type': parsed_args.segmentation_type,\n 'segmentation_id': parsed_args.segmentation_id}\n client.disconnect_network_gateway(gateway_id, body)\n print ('Disconnected network from gateway %s' % gateway_id)\n","sub_path":"openstackclient/network/v2_0/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"641357977","text":"# -*- coding: utf-8 -*-\n\nimport socket\nimport platform\n\n\nclass Network():\n def network(self):\n net = {\n 'hostname': socket.gethostname(),\n }\n return net\n\n\nclass Linux(Network):\n pass\n\n\nplatforms = {\n 'Linux': Linux()\n }\n\n\nclass BaseInfo(object):\n \"\"\" The base info class \"\"\"\n\n def __init__(self):\n \"\"\" Try to detect os type \"\"\"\n self.os = platform.system()\n self.hostname = socket.gethostname()\n try:\n self.info = platforms[self.os]\n except KeyError:\n raise Exception('Platform \"{}\" does not supported'.format(self.os))\n","sub_path":"aboutme/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"272671027","text":"# -*- coding: utf-8 -*-\nimport sys, os \nsys.path.append(os.path.dirname(__file__))\n\nfrom PyQt4 import QtCore, QtGui, uic\n\nGUI, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__),\n 'ui',\n 'merge_dialog.ui'), \n resource_suffix=''\n)\n\nclass Merge_Dialog(QtGui.QDialog, GUI):\n def __init__(self, iface):\n super(Merge_Dialog, self).__init__()\n self.setupUi(self)\n \n def loadCombo(self,itens):\n self.comboBox_branchAtivo.addItems(itens)\n self.comboBox_branch2Merge.addItems(itens) \n\n def getSelectedBranches(self):\n selectedBranches = {\n \"head\":self.comboBox_branchAtivo.currentText(),\n \"mergeHead\":self.comboBox_branch2Merge.currentText()\n }\n return selectedBranches\n \n\n\n \n","sub_path":"geogig_interface/merge_dialog.py","file_name":"merge_dialog.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"489187707","text":"\n# Copyright (C) 2021 Data Mining Group\n# \n# This file is part of POIE\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version_info__ = (0,9,0)\n\n__version__ = \".\".join(map(str, __version_info__))\n\npypoieVersion = __version__ # the poie version is for informational purposes only; doesn't affect behavior\n\ndefaultPFAVersion = \"0.8.1\" # the PFA version determines how poie will interpret PFA documents (can be overridden)\n # must always be in the form [1-9][0-9]*\\.[1-9][0-9]*\\.[1-9][0-9]*\n","sub_path":"py-poie/poie/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"183685370","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth import get_user_model\nfrom django.core import signing\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom business_logic import LogicErrors, LogicException\n\nfrom pca.utils.config import get_setting\n\nUser = get_user_model()\n\n\n_REGISTRATION_SALT = get_setting('REGISTRATION_SALT', 'registration')\n_ACCOUNT_ACTIVATION_DAYS = get_setting('ACCOUNT_ACTIVATION_DAYS', 30)\n_EMAIL_SUBJECT_TEMPLATE = ''\n_EMAIL_BODY_TEMPLATE = ''\n_DEFAULT_FROM_EMAIL = get_setting('GET_OWNER_EMAIL')\n\n\nclass ActivationErrors(LogicErrors):\n ALREADY_ACTIVATED = LogicException(\n _(\"The account you tried to activate has already been activated.\"))\n BAD_USERNAME = LogicException(_(\"The account you attempted to activate is invalid.\"))\n TOKEN_EXPIRED = LogicException(_(\"This account has expired.\"))\n INVALID_TOKEN = LogicException(_(\"The activation token you provided is invalid: {token}\"))\n\n\ndef send_activation_email(user, site, request_scheme):\n \"\"\"\n Send the activation email. The activation key is the username,\n signed using TimestampSigner.\n\n \"\"\"\n activation_token = _get_activation_token(user)\n context = {\n 'user': user,\n 'scheme': request_scheme,\n 'activation_token': activation_token,\n 'expiration_days': _ACCOUNT_ACTIVATION_DAYS,\n 'site': site,\n }\n subject = render_to_string(\n template_name=_EMAIL_SUBJECT_TEMPLATE,\n context=context,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = ''.join(subject.splitlines())\n message = render_to_string(\n template_name=_EMAIL_BODY_TEMPLATE,\n context=context,\n )\n user.email_user(subject, message, _DEFAULT_FROM_EMAIL)\n\n\ndef _get_activation_token(user):\n \"\"\"Generate the activation key which will be emailed to the user.\"\"\"\n return signing.dumps(\n obj=user.get_username(),\n salt=_REGISTRATION_SALT\n )\n\n\ndef activate(activation_token, user_activated):\n username = validate_activation_token(activation_token)\n user = get_user_to_activate(username)\n user.is_active = True\n user.save()\n user_activated.send(user)\n return user\n\n\ndef validate_activation_token(activation_token):\n \"\"\"\n Verify that the activation token is valid and within the\n permitted activation time window, returning the username if\n valid or raising ``ActivationError`` if not.\n \"\"\"\n try:\n username = signing.loads(\n activation_token,\n salt=_REGISTRATION_SALT,\n max_age=_ACCOUNT_ACTIVATION_DAYS * 86400\n )\n return username\n except signing.SignatureExpired:\n raise ActivationErrors.TOKEN_EXPIRED\n except signing.BadSignature:\n raise ActivationErrors.BAD_USERNAME\n\n\ndef get_user_to_activate(username):\n \"\"\"\n Given the verified username, look up and return the corresponding user\n account if it exists, or raising one of ``ActivationErrors`` if it doesn't.\n \"\"\"\n try:\n user = User.objects.get(**{User.USERNAME_FIELD: username})\n if user.is_active:\n raise ActivationErrors.ALREADY_ACTIVATED\n return user\n except User.DoesNotExist:\n raise ActivationErrors.BAD_USERNAME\n","sub_path":"pca/users/services/activation.py","file_name":"activation.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"621905486","text":"class Solution(object):\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n addOne, maxLen = 0, max(len(a), len(b))\n res = []\n i = 1\n while i<=maxLen or addOne:\n aDigit = int(a[-i]) if i<=len(a) else 0\n bDigit = int(b[-i]) if i<=len(b) else 0\n res.append( str((aDigit+bDigit+addOne)%2) )\n addOne = (aDigit+bDigit+addOne)//2\n i += 1\n list.reverse(res)\n return ''.join(res)\n","sub_path":"Leetcode/string/67_AddBinary/iterative/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"638217667","text":"### Simulating Randomness\nimport random\n\nrandom.choice(['Head', 'Tails'])\n\nroll_a_die = random.choice([1, 2, 3, 4, 5, 6])\nprint(roll_a_die)\n\nrandom.choice(range(1, 7))\n\nrandom.choice(random.choice([range(1, 7), range(1, 9), range(1, 11)]))\n\n\nrolls = [random.choice([1, 2, 3, 4, 5, 6]) for i in range(100)]\nplt.hist(rolls, bins=np.linspace(0.5, 6.5, 7))\n\nmore_rolls = [random.choice([1, 2, 3, 4, 5, 6]) for i in range(10000)]\nplt.hist(more_rolls, bins=np.linspace(0.5, 6.5, 7))\n\n### Generate a random number from the standard uniform dist\nf = np.random.random()\nprint(f)\n\nf_arr = np.random.random(50)\nprint (f_arr)\n\nf_mat = np.random.random((5, 3))\nprint(f_mat)\n\n### Generate random number from standard normal dist\ng = np.random.normal(0, 1) # where 0 = mean and 1 = diviation\nprint(g)\ng_arr = np.random.normal(0, 1, 50)\nprint(g_arr)\n\n# Random array of ints\nrarr = np.random.randint(1, 7, (10,3))\nprint(rarr)\nprint(rarr.shape)","sub_path":"HarvardX-PH526x-Py4Research/random_playground.py","file_name":"random_playground.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"625751218","text":"import sympy as smp\nfrom sympy import *\nimport matplotlib.pyplot as plt\nfrom scipy import *\nimport numpy as np\nfrom sympy.plotting import plot\nfrom scipy.integrate import *\nimport scipy as sp\nimport matplotlib as mpl\n\nmpl = \"qt4gg\"\n\n#sybolic part of the code\n\ndef Full():\n\n#define constants, variables and functions\n\n R=Symbol('R')\n r=Symbol('r')\n M=Function('M')\n Rho=Function('Rho')\n theta = Symbol('theta')\n #following values set for NFW profile in Milky Way\n alpha=1\n beta=3\n gamma=1\n Rho_0= 1 #2.8*10**37 #units: kg/kpc\n Rs = 2#16.1 #in kpc\n h = 2 #height of milky way disk, currently in kpc\n G = 1 #6.67191*10**-11 # m**3/kg*s**2 +_ 0.000009*10**-11\n\n #density profile\n #Rho =Rho_0/ ((1+(r/Rs)**2)**(3/2))\n Rho = Rho_0/ ( (r/Rs)**gamma * (1+(r/Rs)**alpha)**((beta-gamma)/alpha) )\n\n print(\"This is the Rho\", Rho)\n #print(Rho)\n\n #Having trouble with the integral using partial fraction\n #supposed to integrate Rho times the cylindrical jacobian over R to get the\n #mass as a function of the Radius Assuming I for a think disk\n #try adding in the central bluge. Assume that the buldge is a sphere of same density profile\n M = smp.integrate(2*smp.pi*r**2*h*Rho,(r,0,R)) + 2*smp.pi*smp.integrate(r**2*Rho,(r,0,R))*smp.integrate(smp.sin(theta),(theta,0,2*smp.pi))\n print(\"this is the Mass\", M)\n #print(M)\n\n #send it through the symbolic integrator\n #set the limits of the Radius here\n Rmin=2\n Rmax=60\n\n #this show the function we wish to plot\n #RH = integrate(M,(R))\n #print \"Our Mass-Radius Equation is:\"\n #print(RH)\n #define the velocity function as a functions\n #of R is\n #V is only a first order Newtonian approax\n V = smp.sqrt(G*M/R)\n print (\"Our Velocity Equation is:\", V)\n #print(V)\n#now we have to prepare the antiderivative of the equation for plotting\n#using the sympy plotting package.\n plot(V,(R,Rmin,Rmax),nb_of_points = 500,xlabel = (\"Radius\"), ylabel = (\"Velocity\"),title = (\"Velocity Profile for Galatic Mass Distributions in Sprial Galaxies\"))\n#this returns a plot of Velocity V and Radius This is good for the Star and Gas\n#distributions\n\n#time to use numpy to do evalutation numercially\n#need to get this working, the integrator is not working. I want to put\n#in a list and get an integrated list back. That way I can plot the function\n#plus the integral\n\n\n#Goal: do the same thing as the symbolic program but numerically\n# take a given density profile and integrate it to get the\n# mass, then take that mass and solve fot the velocity, plot the velocity as a function\n# of radius, show that the symbolic and Numeric parts coincide\n\n# define the numerical function\n #define R as a linear space\n R_min = 2\n R_max = 60\n R_step = 10000\n R = np.linspace(R_min,R_max,R_step)\n\n\n #define the density profile\n Rho =Rho_0/ ( (R/Rs)**gamma * (1+(R/Rs)**alpha)**((beta-gamma)/alpha) )\n #Somethings worng with this one\n\n #Rho_0/ ( (R/Rs)**gamma * (1+(R/Rs)**alpha)**((beta-gamma)/alpha) ) Somethings worng with this one\n\n #Rho_0/ ((1+(R/Rs)**2)**(3/2)) works well\n\n #Get Rho as a list\n #Rho = Rho(R,params)\n\n J = 2*np.pi*R #The Jacodian for the integral\n U = J*Rho #the integrand\n #print(U)\n\n #Get the mass as a list\n #requieres an integral\n M = cumtrapz(U, x=R, dx = R_step, initial = 0)\n #M = M(Rho,R,params)\n #print M\n\n #velocity given by Assuming that the gravational protential is constant\n # and the is a first order Newtionian approax\n V = np.sqrt((G*M)/R)\n\n #plotting the result\n #get plots to appear together\n plt.figure\n plt.plot(R,V,label = \"Velocity Profile Curve\")\n #plt.plot(R,U, label = 'RU curve ')\n #plt.plot(R,M, label = 'RM curve ')\n plt.legend()\n plt.title(\"Velocity Profile for Galatic Mass Distributions in Sprial Galaxies\")\n plt.xlabel(\"Radius\")\n plt.ylabel(\"Velocity as a Function of R\")\n plt.show()\n\nif __name__ == '__Full__':\n Full()\nFull()\n\n#Numerical()\n","sub_path":"Complete_11_30.py","file_name":"Complete_11_30.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"564320426","text":"# Test export module\nimport sys\n\nsys.path.insert(0, '..')\nimport copy\nimport os\nimport shutil\nimport numpy as np\nimport flopy\n\npth = os.path.join('..', 'examples', 'data', 'mf2005_test')\nnamfiles = [namfile for namfile in os.listdir(pth) if namfile.endswith('.nam')]\n# skip = [\"MNW2-Fig28.nam\", \"testsfr2.nam\", \"testsfr2_tab.nam\"]\nskip = []\n\n\ntpth = os.path.join('temp', 't007')\n# make the directory if it does not exist\nif not os.path.isdir(tpth):\n os.makedirs(tpth)\n\nnpth = os.path.join('temp', 't007', 'netcdf')\n# delete the directory if it exists\nif os.path.isdir(npth):\n shutil.rmtree(npth)\n# make the directory\nos.makedirs(npth)\n\nspth = os.path.join('temp', 't007', 'shapefile')\n# make the directory if it does not exist\nif not os.path.isdir(spth):\n os.makedirs(spth)\n\n\ndef export_netcdf(namfile):\n if namfile in skip:\n return\n print(namfile)\n m = flopy.modflow.Modflow.load(namfile, model_ws=pth, verbose=False)\n if m.sr.lenuni == 0:\n m.sr.lenuni = 1\n # print('skipping...lenuni==0 (undefined)')\n # return\n # if sum(m.dis.laycbd) != 0:\n if m.dis.botm.shape[0] != m.nlay:\n print('skipping...botm.shape[0] != nlay')\n return\n assert m, 'Could not load namefile {}'.format(namfile)\n assert isinstance(m, flopy.modflow.Modflow)\n\n # Do not fail if netCDF4 not installed\n try:\n import netCDF4\n import pyproj\n except:\n return\n\n fnc = m.export(os.path.join(npth, m.name + '.nc'))\n fnc.write()\n fnc_name = os.path.join(npth, m.name + '.nc')\n try:\n fnc = m.export(fnc_name)\n fnc.write()\n except Exception as e:\n raise Exception(\n 'ncdf export fail for namfile {0}:\\n{1} '.format(namfile, str(e)))\n try:\n nc = netCDF4.Dataset(fnc_name, 'r')\n except Exception as e:\n raise Exception('ncdf import fail for nc file {0}'.format(fnc_name))\n return\n\n\ndef export_shapefile(namfile):\n try:\n import shapefile as shp\n except:\n return\n\n print(namfile)\n m = flopy.modflow.Modflow.load(namfile, model_ws=pth, verbose=False)\n\n assert m, 'Could not load namefile {}'.format(namfile)\n assert isinstance(m, flopy.modflow.Modflow)\n fnc_name = os.path.join(spth, m.name + '.shp')\n try:\n fnc = m.export(fnc_name)\n #fnc2 = m.export(fnc_name, package_names=None)\n #fnc3 = m.export(fnc_name, package_names=['DIS'])\n\n\n except Exception as e:\n raise Exception(\n 'shapefile export fail for namfile {0}:\\n{1} '.format(namfile,\n str(e)))\n try:\n s = shp.Reader(fnc_name)\n except Exception as e:\n raise Exception(\n ' shapefile import fail for {0}:{1}'.format(fnc_name, str(e)))\n assert s.numRecords == m.nrow * m.ncol, \"wrong number of records in \" + \\\n \"shapefile {0}:{1:d}\".format(\n fnc_name, s.numRecords)\n return\n\ndef test_freyberg_export():\n namfile = 'freyberg.nam'\n model_ws = '../examples/data/freyberg_multilayer_transient/'\n m = flopy.modflow.Modflow.load(namfile, model_ws=model_ws, verbose=False,\n load_only=['DIS', 'BAS6', 'NWT', 'OC',\n 'RCH',\n 'WEL',\n 'DRN',\n 'UPW'])\n m.drn.stress_period_data.export(os.path.join(spth, namfile[:-4]+'.shp'), sparse=True)\n\ndef test_export_output():\n import os\n import numpy as np\n import flopy\n\n # Do not fail if netCDF4 not installed\n try:\n import netCDF4\n import pyproj\n except:\n return\n\n model_ws = os.path.join(\"..\", \"examples\", \"data\", \"freyberg\")\n ml = flopy.modflow.Modflow.load(\"freyberg.nam\", model_ws=model_ws)\n hds_pth = os.path.join(model_ws, \"freyberg.githds\")\n hds = flopy.utils.HeadFile(hds_pth)\n\n out_pth = os.path.join(npth, \"freyberg.out.nc\")\n nc = flopy.export.utils.output_helper(out_pth, ml,\n {\"freyberg.githds\": hds})\n var = nc.nc.variables.get(\"head\")\n arr = var[:]\n ibound_mask = ml.bas6.ibound.array == 0\n arr_mask = arr.mask[0]\n assert np.array_equal(ibound_mask, arr_mask)\n\n\ndef test_mbase_sr():\n import numpy as np\n import flopy\n\n ml = flopy.modflow.Modflow(modelname=\"test\", xul=1000.0,\n rotation=12.5, start_datetime=\"1/1/2016\")\n try:\n print(ml.sr.xcentergrid)\n except:\n pass\n else:\n raise Exception(\"should have failed\")\n\n dis = flopy.modflow.ModflowDis(ml, nrow=10, ncol=5, delr=np.arange(5),\n xul=500)\n print(ml.sr)\n assert ml.sr.xul == 500\n assert ml.sr.yll == -10\n ml.model_ws = tpth\n\n ml.write_input()\n ml1 = flopy.modflow.Modflow.load(\"test.nam\", model_ws=ml.model_ws)\n assert ml1.sr == ml.sr\n assert ml1.start_datetime == ml.start_datetime\n\n\ndef test_free_format_flag():\n import flopy\n Lx = 100.\n Ly = 100.\n nlay = 1\n nrow = 51\n ncol = 51\n delr = Lx / ncol\n delc = Ly / nrow\n top = 0\n botm = [-1]\n ms = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc, top=top, botm=botm)\n bas = flopy.modflow.ModflowBas(ms, ifrefm=True)\n assert ms.free_format_input == bas.ifrefm\n ms.free_format_input = False\n assert ms.free_format_input == bas.ifrefm\n ms.free_format_input = True\n bas.ifrefm = False\n assert ms.free_format_input == bas.ifrefm\n bas.ifrefm = True\n assert ms.free_format_input == bas.ifrefm\n\n ms.model_ws = tpth\n ms.write_input()\n ms1 = flopy.modflow.Modflow.load(ms.namefile, model_ws=ms.model_ws)\n assert ms1.free_format_input == ms.free_format_input\n assert ms1.free_format_input == ms1.bas6.ifrefm\n ms1.free_format_input = False\n assert ms1.free_format_input == ms1.bas6.ifrefm\n bas.ifrefm = False\n assert ms1.free_format_input == ms1.bas6.ifrefm\n bas.ifrefm = True\n assert ms1.free_format_input == ms1.bas6.ifrefm\n\n\ndef test_sr():\n import flopy\n Lx = 100.\n Ly = 100.\n nlay = 1\n nrow = 51\n ncol = 51\n delr = Lx / ncol\n delc = Ly / nrow\n top = 0\n botm = [-1]\n ms = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr, delc=delc, top=top, botm=botm)\n bas = flopy.modflow.ModflowBas(ms, ifrefm=True)\n\n # test instantiation of an empty sr object\n sr = flopy.utils.reference.SpatialReference()\n\n # test instantiation of SR with xul, yul and no grid\n sr = flopy.utils.reference.SpatialReference(xul=1, yul=1)\n\n xul, yul = 321., 123.\n sr = flopy.utils.SpatialReference(delr=ms.dis.delr.array,\n delc=ms.dis.delc.array, lenuni=3,\n xul=xul, yul=yul, rotation=20)\n\n #txt = 'yul does not approximately equal 100 - ' + \\\n # '(xul, yul) = ({}, {})'.format( ms.sr.yul, ms.sr.yul)\n assert abs(ms.sr.yul - Ly) < 1e-3#, txt\n ms.sr.xul = 111\n assert ms.sr.xul == 111\n\n # test that transform for arbitrary coordinates\n # is working in same as transform for model grid\n x, y = ms.sr.xcenter, ms.sr.ycenter[0]\n xt, yt = sr.transform(x, y)\n assert np.sum(xt - sr.xcentergrid[0]) < 1e-3\n x, y = ms.sr.xcenter[0], ms.sr.ycenter\n xt, yt = sr.transform(x, y)\n assert np.sum(yt - sr.ycentergrid[:, 0]) < 1e-3\n\n # test inverse transform\n x0, y0 = 9.99, 2.49\n x1, y1 = sr.transform(x0, y0)\n x2, y2 = sr.transform(x1, y1, inverse=True)\n assert np.abs(x2-x0) < 1e-6\n assert np.abs(y2-y0) < 1e6\n\n # test input using ul vs ll\n xll, yll = sr.xll, sr.yll\n sr2 = flopy.utils.SpatialReference(delr=ms.dis.delr.array,\n delc=ms.dis.delc.array, lenuni=3,\n xll=xll, yll=yll, rotation=20)\n assert sr2.xul == sr.xul\n assert sr2.yul == sr.yul\n assert np.array_equal(sr.xcentergrid, sr2.xcentergrid)\n assert np.array_equal(sr.ycentergrid, sr2.ycentergrid)\n\n ms.sr.lenuni = 1\n assert ms.sr.lenuni == 1\n\n ms.sr.units = \"feet\"\n assert ms.sr.units == \"feet\"\n\n ms.sr = sr\n assert ms.sr == sr\n assert ms.sr.lenuni != ms.dis.lenuni\n\n try:\n ms.sr.units = \"junk\"\n except:\n pass\n else:\n raise Exception(\"should have failed\")\n\n ms.start_datetime = \"1-1-2016\"\n assert ms.start_datetime == \"1-1-2016\"\n assert ms.dis.start_datetime == \"1-1-2016\"\n\n ms.model_ws = tpth\n ms.write_input()\n ms1 = flopy.modflow.Modflow.load(ms.namefile, model_ws=ms.model_ws)\n assert ms1.sr == ms.sr\n assert ms1.dis.sr == ms.dis.sr\n assert ms1.start_datetime == ms.start_datetime\n assert ms1.sr.units == ms.sr.units\n assert ms1.dis.lenuni == ms1.sr.lenuni\n #assert ms1.sr.lenuni != sr.lenuni\n ms1.sr = sr\n assert ms1.sr == ms.sr\n\n\ndef test_sr_scaling():\n nlay, nrow, ncol = 1, 10, 5\n delr, delc = 250, 500\n xll, yll = 286.80, 29.03\n\n print(np.__version__)\n # test scaling of length units\n ms2 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc)\n ms2.sr = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=3,\n xll=xll, yll=yll, rotation=0)\n ms2.sr.epsg = 26715\n ms2.dis.export(os.path.join(spth, 'dis2.shp'))\n ms3 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms3, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc)\n ms3.sr = flopy.utils.SpatialReference(delr=ms3.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n length_multiplier=2.,\n xll=xll, yll=yll, rotation=0)\n ms3.dis.export(os.path.join(spth, 'dis3.shp'), epsg=26715)\n\n # check that the origin(s) are maintained\n assert np.array_equal(ms3.sr.get_vertices(nrow - 1, 0)[1],\n [ms3.sr.xll, ms3.sr.yll])\n\n assert np.allclose(ms3.sr.get_vertices(nrow - 1, 0)[1],\n ms2.sr.get_vertices(nrow - 1, 0)[1])\n\n # check that the upper left corner is computed correctly\n # in this case, length_multiplier overrides the given units\n def check_size(sr):\n xur, yur = sr.get_vertices(0, ncol - 1)[3]\n assert np.abs(xur - (xll + sr.length_multiplier * delr * ncol)) < 1e-4\n assert np.abs(yur - (yll + sr.length_multiplier * delc * nrow)) < 1e-4\n check_size(ms3.sr)\n\n # run the same tests but with units specified instead of a length multiplier\n ms2 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr, delc=delc,\n lenuni=1 # feet; should have no effect on SR\n # (model not supplied to SR)\n )\n ms2.sr = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array,\n lenuni=2, # meters\n epsg=26715, # meters, listed on spatialreference.org\n xll=xll, yll=yll, rotation=0)\n assert ms2.sr.model_length_units == 'meters'\n assert ms2.sr.length_multiplier == 1.\n ms2.sr.lenuni = 1 # feet; test dynamic setting\n assert ms2.sr.model_length_units == 'feet'\n check_size(ms2.sr)\n assert ms2.sr.length_multiplier == .3048\n ms2.sr.lenuni = 3 # centimeters\n assert ms2.sr.model_length_units == 'centimeters'\n check_size(ms2.sr)\n assert ms2.sr.length_multiplier == 0.01\n ms2.sr.lenuni = 2 # meters\n check_size(ms2.sr)\n ms2.sr.units = 'meters'\n ms2.sr.proj4_str = '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'\n assert ms2.sr.proj4_str == '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'\n assert ms2.sr.units == 'feet'\n assert ms2.sr.length_multiplier == 1/.3048\n check_size(ms2.sr)\n ms2.sr.epsg = 6610 # meters, not listed on spatialreference.org but understood by pyproj\n assert ms2.sr.units == 'meters'\n assert ms2.sr.proj4_str is not None\n check_size(ms2.sr)\n\ndef test_dynamic_xll_yll():\n nlay, nrow, ncol = 1, 10, 5\n delr, delc = 250, 500\n xll, yll = 286.80, 29.03\n # test scaling of length units\n ms2 = flopy.modflow.Modflow()\n dis = flopy.modflow.ModflowDis(ms2, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc)\n sr1 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xll=xll, yll=yll, rotation=30)\n xul, yul = sr1.xul, sr1.yul\n sr1.length_multiplier = 1.0 / 3.281\n assert sr1.xll == xll\n assert sr1.yll == yll\n sr2 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xul=xul, yul=yul, rotation=30)\n sr2.length_multiplier = 1.0 / 3.281\n assert sr2.xul == xul\n assert sr2.yul == yul\n\n # test resetting of attributes\n sr3 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xll=xll, yll=yll, rotation=30)\n # check that xul, yul and xll, yll are being recomputed\n sr3.xll += 10.\n sr3.yll += 21.\n assert np.abs(sr3.xul - (xul + 10.)) < 1e-6\n assert np.abs(sr3.yul - (yul + 21.)) < 1e-6\n sr4 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xul=xul, yul=yul, rotation=30)\n assert sr4.origin_loc == 'ul'\n sr4.xul += 10.\n sr4.yul += 21.\n assert np.abs(sr4.xll - (xll + 10.)) < 1e-6\n assert np.abs(sr4.yll - (yll + 21.)) < 1e-6\n sr4.rotation = 0.\n assert np.abs(sr4.xul - (xul + 10.)) < 1e-6 # these shouldn't move because ul has priority\n assert np.abs(sr4.yul - (yul + 21.)) < 1e-6\n assert np.abs(sr4.xll - sr4.xul) < 1e-6\n assert np.abs(sr4.yll - (sr4.yul - sr4.yedge[0])) < 1e-6\n sr4.xll = 0.\n sr4.yll = 10.\n assert sr4.origin_loc == 'll'\n assert sr4.xul == 0.\n assert sr4.yul == sr4.yedge[0] + 10.\n sr4.xul = xul\n sr4.yul = yul\n assert sr4.origin_loc == 'ul'\n sr4.rotation = 30.\n assert np.abs(sr4.xll - xll) < 1e-6\n assert np.abs(sr4.yll - yll) < 1e-6\n\n sr5 = flopy.utils.SpatialReference(delr=ms2.dis.delr.array,\n delc=ms2.dis.delc.array, lenuni=2,\n xll=xll, yll=yll,\n rotation=0, epsg=26915)\n sr5.lenuni = 1\n assert sr5.length_multiplier == .3048\n assert sr5.yul == sr5.yll + sr5.yedge[0] * sr5.length_multiplier\n sr5.lenuni = 2\n assert sr5.length_multiplier == 1.\n assert sr5.yul == sr5.yll + sr5.yedge[0]\n sr5.proj4_str = '+proj=utm +zone=16 +datum=NAD83 +units=us-ft +no_defs'\n assert sr5.units == 'feet'\n assert sr5.length_multiplier == 1/.3048\n\ndef test_namfile_readwrite():\n nlay, nrow, ncol = 1, 30, 5\n delr, delc = 250, 500\n xll, yll = 272300, 5086000\n fm = flopy.modflow\n m = fm.Modflow(modelname='junk', model_ws=os.path.join('temp', 't007'))\n dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,\n delc=delc)\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array, lenuni=3,\n length_multiplier=.3048,\n xll=xll, yll=yll, rotation=30)\n\n # test reading and writing of SR information to namfile\n m.write_input()\n m2 = fm.Modflow.load('junk.nam', model_ws=os.path.join('temp', 't007'))\n assert abs(m2.sr.xll - xll) < 1e-2\n assert abs(m2.sr.yll - yll) < 1e-2\n assert m2.sr.rotation == 30\n assert abs(m2.sr.length_multiplier - .3048) < 1e-10\n\n model_ws = os.path.join(\"..\", \"examples\", \"data\", \"freyberg_multilayer_transient\")\n ml = flopy.modflow.Modflow.load(\"freyberg.nam\", model_ws=model_ws, verbose=False,\n check=False, exe_name=\"mfnwt\")\n assert ml.sr.xul == 619653\n assert ml.sr.yul == 3353277\n assert ml.sr.rotation == 15.\n\ndef test_read_usgs_model_reference():\n nlay, nrow, ncol = 1, 30, 5\n delr, delc = 250, 500\n #xll, yll = 272300, 5086000\n model_ws = os.path.join('temp', 't007')\n shutil.copy('../examples/data/usgs.model.reference', model_ws)\n fm = flopy.modflow\n m = fm.Modflow(modelname='junk', model_ws=model_ws)\n # feet and days\n dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,\n delc=delc, lenuni=1, itmuni=4)\n m.write_input()\n\n # test reading of SR information from usgs.model.reference\n m2 = fm.Modflow.load('junk.nam', model_ws=os.path.join('temp', 't007'))\n from flopy.utils.reference import SpatialReference\n d = SpatialReference.read_usgs_model_reference_file(os.path.join('temp', 't007', 'usgs.model.reference'))\n assert m2.sr.xul == d['xul']\n assert m2.sr.yul == d['yul']\n assert m2.sr.rotation == d['rotation']\n assert m2.sr.lenuni == d['lenuni']\n assert m2.sr.epsg == d['epsg']\n # have to delete this, otherwise it will mess up other tests\n if os.path.exists(os.path.join(tpth, 'usgs.model.reference')):\n os.remove(os.path.join(tpth, 'usgs.model.reference'))\n\n\ndef test_rotation():\n m = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(m, nlay=1, nrow=40, ncol=20,\n delr=250.,\n delc=250., top=10, botm=0)\n xul, yul = 500000, 2934000\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xul=xul, yul=yul, rotation=45.)\n xll, yll = m.sr.xll, m.sr.yll\n assert np.abs(m.dis.sr.xgrid[0, 0] - xul) < 1e-4\n assert np.abs(m.dis.sr.ygrid[0, 0] - yul) < 1e-4\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xul=xul, yul=yul, rotation=-45.)\n assert m.dis.sr.xgrid[0, 0] == xul\n assert m.dis.sr.ygrid[0, 0] == yul\n xll2, yll2 = m.sr.xll, m.sr.yll\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xll=xll2, yll=yll2, rotation=-45.)\n assert m.dis.sr.xgrid[0, 0] == xul\n assert m.dis.sr.ygrid[0, 0] == yul\n m.sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xll=xll, yll=yll, rotation=45.)\n assert m.dis.sr.xgrid[0, 0] == xul\n assert m.dis.sr.ygrid[0, 0] == yul\n\n\ndef test_sr_with_Map():\n import matplotlib.pyplot as plt\n m = flopy.modflow.Modflow(rotation=20.)\n dis = flopy.modflow.ModflowDis(m, nlay=1, nrow=40, ncol=20,\n delr=250.,\n delc=250., top=10, botm=0)\n # transformation assigned by arguments\n xul, yul, rotation = 500000., 2934000., 45.\n modelmap = flopy.plot.ModelMap(model=m, xul=xul, yul=yul,\n rotation=rotation)\n lc = modelmap.plot_grid()\n xll, yll = modelmap.sr.xll, modelmap.sr.yll\n plt.close()\n\n def check_vertices():\n xllp, yllp = lc._paths[0].vertices[0]\n xulp, yulp = lc._paths[0].vertices[1]\n assert np.abs(xllp - xll) < 1e-6\n assert np.abs(yllp - yll) < 1e-6\n assert np.abs(xulp - xul) < 1e-6\n assert np.abs(yulp - yul) < 1e-6\n\n check_vertices()\n\n modelmap = flopy.plot.ModelMap(model=m, xll=xll, yll=yll,\n rotation=rotation)\n lc = modelmap.plot_grid()\n check_vertices()\n plt.close()\n\n # transformation in m.sr\n sr = flopy.utils.SpatialReference(delr=m.dis.delr.array,\n delc=m.dis.delc.array,\n xll=xll, yll=yll, rotation=rotation)\n m.sr = copy.deepcopy(sr)\n modelmap = flopy.plot.ModelMap(model=m)\n lc = modelmap.plot_grid()\n check_vertices()\n plt.close()\n\n # transformation assign from sr instance\n m.sr._reset()\n m.sr.set_spatialreference()\n modelmap = flopy.plot.ModelMap(model=m, sr=sr)\n lc = modelmap.plot_grid()\n check_vertices()\n plt.close()\n\n # test plotting of line with specification of xul, yul in Dis/Model Map\n mf = flopy.modflow.Modflow()\n\n # Model domain and grid definition\n dis = flopy.modflow.ModflowDis(mf, nlay=1, nrow=10, ncol=20, delr=1., delc=1., xul=100, yul=210)\n #fig, ax = plt.subplots()\n verts = [[101., 201.], [119., 209.]]\n modelxsect = flopy.plot.ModelCrossSection(model=mf, line={'line': verts},\n xul=mf.dis.sr.xul, yul=mf.dis.sr.yul)\n linecollection = modelxsect.plot_grid()\n plt.close()\n\n\ndef test_netcdf_classmethods():\n import os\n import flopy\n\n # Do not fail if netCDF4 not installed\n try:\n import netCDF4\n import pyproj\n except:\n return\n\n nam_file = \"freyberg.nam\"\n model_ws = os.path.join('..', 'examples', 'data',\n 'freyberg_multilayer_transient')\n ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,\n verbose=True, load_only=[])\n\n f = ml.export(os.path.join(npth, \"freyberg.nc\"))\n v1_set = set(f.nc.variables.keys())\n fnc = os.path.join(npth, \"freyberg.new.nc\")\n new_f = flopy.export.NetCdf.zeros_like(f, output_filename=fnc)\n v2_set = set(new_f.nc.variables.keys())\n diff = v1_set.symmetric_difference(v2_set)\n assert len(diff) == 0, str(diff)\n\n# def test_netcdf_overloads():\n# import os\n# import flopy\n# nam_file = \"freyberg.nam\"\n# model_ws = os.path.join('..', 'examples', 'data', 'freyberg_multilayer_transient')\n# ml = flopy.modflow.Modflow.load(nam_file,model_ws=model_ws,check=False,\n# verbose=False,load_only=[])\n#\n# f = ml.export(os.path.join(\"temp\",\"freyberg.nc\"))\n# fzero = flopy.export.NetCdf.zeros_like(f)\n# assert fzero.nc.variables[\"model_top\"][:].sum() == 0\n# print(f.nc.variables[\"model_top\"][0,:])\n# fplus1 = f + 1\n# assert fplus1.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] + 1\n# assert (f + fplus1).nc.variables[\"model_top\"][0,0] ==\\\n# f.nc.variables[\"model_top\"][0,0] + \\\n# fplus1.nc.variables[\"model_top\"][0,0]\n#\n# fminus1 = f - 1\n# assert fminus1.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] - 1\n# assert (f - fminus1).nc.variables[\"model_top\"][0,0]==\\\n# f.nc.variables[\"model_top\"][0,0] - \\\n# fminus1.nc.variables[\"model_top\"][0,0]\n#\n# ftimes2 = f * 2\n# assert ftimes2.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] * 2\n# assert (f * ftimes2).nc.variables[\"model_top\"][0,0] ==\\\n# f.nc.variables[\"model_top\"][0,0] * \\\n# ftimes2.nc.variables[\"model_top\"][0,0]\n#\n# fdiv2 = f / 2\n# assert fdiv2.nc.variables[\"model_top\"][0,0] == f.nc.variables[\"model_top\"][0,0] / 2\n# assert (f / fdiv2).nc.variables[\"model_top\"][0,0] == \\\n# f.nc.variables[\"model_top\"][0,0] / \\\n# fdiv2.nc.variables[\"model_top\"][0,0]\n#\n# assert f.nc.variables[\"ibound\"][0,0,0] == 1\n\n\ndef test_shapefile_ibound():\n import os\n import flopy\n try:\n import shapefile\n except:\n return\n\n shape_name = os.path.join(spth, \"test.shp\")\n nam_file = \"freyberg.nam\"\n model_ws = os.path.join('..', 'examples', 'data',\n 'freyberg_multilayer_transient')\n ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,\n verbose=True, load_only=[])\n ml.export(shape_name)\n shp = shapefile.Reader(shape_name)\n field_names = [item[0] for item in shp.fields][1:]\n ib_idx = field_names.index(\"ibound_001\")\n txt = \"should be int instead of {0}\".format(type(shp.record(0)[ib_idx]))\n assert type(shp.record(0)[ib_idx]) == int, txt\n\n\ndef test_shapefile():\n for namfile in namfiles:\n yield export_shapefile, namfile\n return\n\ndef test_netcdf():\n for namfile in namfiles:\n yield export_netcdf, namfile\n\n return\n\n\ndef build_netcdf():\n for namfile in namfiles:\n export_netcdf(namfile)\n return\n\n\ndef build_sfr_netcdf():\n namfile = 'testsfr2.nam'\n export_netcdf(namfile)\n return\n\n\nif __name__ == '__main__':\n #test_shapefile()\n # test_shapefile_ibound()\n # test_netcdf_overloads()\n #test_netcdf_classmethods()\n # build_netcdf()\n # build_sfr_netcdf()\n #test_sr()\n #test_mbase_sr()\n #test_rotation()\n test_sr_with_Map()\n #test_sr_scaling()\n #test_read_usgs_model_reference()\n #test_dynamic_xll_yll()\n #test_namfile_readwrite()\n # test_free_format_flag()\n # test_export_output()\n #for namfile in namfiles:\n # for namfile in [\"fhb.nam\"]:\n # export_netcdf(namfile)\n #test_freyberg_export()\n pass\n","sub_path":"autotest/t007_test.py","file_name":"t007_test.py","file_ext":"py","file_size_in_byte":26204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"543692453","text":"import flopy as fp\nimport numpy as np\nimport geopandas as gp\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom flopy.utils.gridgen import Gridgen \nfrom flopy.utils.gridintersect import GridIntersect\nfrom flopy.utils import Raster\nimport shapely\nfrom shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon\nfrom shapely.strtree import STRtree \n\n#1\ndef gp2cellids (grid, gp, idomain, idomain_active=True, type = \"polygon\",layer=0,areas=3):\n \n \"\"\"\n this function extract the cellids of the intersection between a geopandas object and a grid \n grid : modelgrid with flopy.discretisation !\n gp : geopandas object with one entity only\n idomain : array, the idomain array to update it\n idomain_active : bool, if true the idomain is update (cells intersect by the gp will be noted as active), prevents some issues\n type : str, features type (polygon or line)\n layer : int, the layer on which is the gp\n areas : factor that determine if a cell is accounted as intersected or not based on the total area intersected\n (a value of 3, for example, means only cells which have 1/3 of their area intersected by the polygon will be taken into account)\n \"\"\"\n \n ix = GridIntersect(grid)\n if type == \"polygon\":\n result = ix.intersect(gp.geometry[0])\n result = result[result.areas>(np.nanmax(result.areas)/3)] # only take into account cells that have a least 1/3 intersected \n \n \n if type == \"boundary\" :\n result = ix.intersect(gp.geometry[0].boundary)\n \n if type == \"line\" :\n result = ix.intersect(gp.geometry[0])\n \n result = result[result.areas!=0] # fix bug with some null areas\n \n lst=[]\n for irow, icol in result.cellids:\n lst.append(((layer,irow,icol)))\n if idomain_active:\n idomain[irow*grid.ncol+icol] = 1\n return lst\n\n#2\ndef cellidBD(idomain, layer=0): \n \n \"\"\"\n extract the cellids at the boundary of the domain at a given layer\n idomain : 3D array, idomain array which determine if a cell is active or not (1 active, 0 inactive)\n layer : int, layer on which the boundary cells are extract\n \"\"\"\n lst_cellBD=[]\n\n for irow in range(idomain.shape[1]):\n for icol in range(idomain.shape[2]):\n if idomain[layer][irow,icol]==1:\n #check neighbours\n if np.sum(idomain[layer][irow-1:irow+2,icol-1:icol+2]==1) < 8:\n lst_cellBD.append((layer,irow,icol))\n return lst_cellBD\n\n\n#3 get functions\ndef get_heads(model_name,workspace,obj=False):\n \"\"\"\n Function that returns the heads from the headfile\n model_name : str, the name of the current model\n workspace : str, the path to workspace (where output files are stored)\n obj : bool, if we want to retrieve the head object rather than the computed heads for the last stress period\n \"\"\"\n headfile = '{}.hds'.format(model_name)\n fname = os.path.join(workspace,headfile) \n hdobj = fp.utils.HeadFile(fname, precision='double') \n head = hdobj.get_data()\n \n if obj:\n return hdobj\n else:\n return head\n\ndef get_spdis(model_name,workspace):\n \"\"\"\n Function that returns the specific discharge from the cbcfile\n \"\"\"\n spdfile = '{}.cbc'.format(model_name)\n fname = os.path.join(workspace,spdfile) \n spdobj = fp.utils.CellBudgetFile(fname, precision='double') \n spd = spdobj.get_data(text=\"SPDIS\")\n return spd\n\ndef get_cbc(model_name,workspace):\n cbcfile = '{}.cbc'.format(model_name)\n fname = os.path.join(workspace,cbcfile) \n cbcobj = fp.utils.CellBudgetFile(fname, precision='double') \n return cbcobj\n\ndef get_budgetobj(model_name,workspace):\n \"\"\"\n Function that returns the budget file as an object\n \"\"\"\n lstBudgetfile = \"{}.lst\".format(model_name)\n fname = os.path.join(workspace,lstBudgetfile)\n Budgetobj = fp.utils.Mf6ListBudget(fname)\n return Bugetobj\n\n\n#4\ndef inter_lst (lst1,lst2,typ = \"intersection\"):\n \n \"\"\"\n return the intersection/unique values of the list1 compared to list2\n lst1 and lst2 : list\n typ : type of comparison (intersection or unique)\n \"\"\"\n \n if typ == \"intersection\":\n return [i for i in lst1 if i in lst2]\n if typ == \"unique\":\n return [i for i in lst1 if i not in lst2]\n\n#5 \ndef import_riv(grid,gp,lst_domain):\n \n \"\"\"\n This function extract infos about a river (geopandas object, LINESTRING),cellids + lengths of in each cells in the right order. \n Format : \n \n grid : from the gwf model, gwf.modelgrid for ex. or flopy.discretisation)\n gp : a geopandas object containing a single Linestring(which can have multiple segements however)\n lst_domain : list of all active cells\n\n Return a dataframe containing these datas, post-processing necessary to remove cells that are already counted as BC in the model\n \"\"\"\n \n nlay = np.max(np.array(lst_domain)[:,0])+1 #nlay\n \n ix = GridIntersect(grid)\n coord_riv=[]\n for x,y in zip(gp.geometry[0].xy[0],gp.geometry[0].xy[1]): # extract river coord\n coord_riv.append((x,y))\n\n verti=[]\n df_tot_ord = pd.DataFrame() # empty DF\n for i in range(len(coord_riv)):\n if i < len(coord_riv)-1:\n lsi = LineString([coord_riv[i],coord_riv[i+1]]) # create the linestring btw point i and i+1\n res = ix.intersect(lsi) # do the intersection\n res = res[res[\"lengths\"]!=0] # remove a bug issue on Linux with lengths == 0\n cellids = res.cellids # extract cellids (row,col, only)\n\n if len(cellids)>1: # if more than one cells is intersected --> we need to order them\n\n dirx = coord_riv[i+1][0]-coord_riv[i][0] # variation of x (to know if the segment go to right or left)\n\n for x,y in res.vertices: \n verti.append(x)\n vertix = np.array(verti)[:,0] # extract the 1st vertice of the intersections in order to organize the cells\n df = pd.DataFrame({\"cellids\":cellids,\"vertix\":vertix,\"lengths\":res.lengths}) # create a temp DF to order\n verti=[]\n\n #organize the cells given the direction\n if dirx > 0:\n df.sort_values(by=[\"vertix\"],ascending=True,inplace=True) \n if dirx < 0:\n df.sort_values(by=[\"vertix\"],ascending=False,inplace=True) \n\n # append these data in a big DF\n df_tot_ord = df_tot_ord.append(df,sort=True).drop([\"vertix\"],axis=1)\n\n else : # if only one cell is intersected by the linestring\n df_tot_ord = df_tot_ord.append(pd.DataFrame({\"cellids\":cellids,\"lengths\":res.lengths}))\n\n df_riv = df_tot_ord.groupby([\"cellids\"],sort=False).sum() # regroup river within the same cells and sum the lengths\n\n # retrieve data\n lst_len_Riv = df_riv[\"lengths\"].values\n\n # attribute on which layer these cells are active\n cellids_Riv=[]; # list of all the cells intersected by the river\n cellids = df_riv.index\n for irow,icol in cellids:\n for layer in range(nlay):\n cell = (layer,irow,icol)\n if cell in lst_domain: #attribute the river to the uppermost active cell\n break\n if cell not in cellids_Riv:\n cellids_Riv.append(cell)\n\n df_riv = pd.DataFrame({\"cellids\":cellids_Riv,\"lengths\":lst_len_Riv}) \n return df_riv\n\n \n#6\ndef Complete_riv(riv_path,stations_csv,us,ds,lst_chd,lst_domain,grid):\n \n \"\"\"\n a complete function that import a river and return the stress data.\n the river path, the station path and the upstream and downstream head as number of layers must be provided\n \n riv_path : the path to the shapefile of the river (one linestring only)\n stations_csv : path to the csv file containing the infos about the stations (x,y,elevation)\n lst_chd : a list of every cells constant heads\n lst_domain : a list of each active cell\n grid : grid of the model\n \"\"\"\n \n BC_riv = gp.read_file(riv_path) # read shp, linestring from ups to dws\n df_riv = import_riv(grid,BC_riv,lst_domain) # extract cellids intersected + lengths in each cells\n df_riv[\"xc\"],df_riv[\"yc\"] = get_cellcenters(grid,df_riv.cellids)\n df_riv[\"head\"] = np.zeros([df_riv.shape[0]]) # create a new column for the heads\n\n # us and ds heads\n df_riv.loc[0,\"head\"] = us\n df_riv.loc[df_riv.index[-1],\"head\"] = ds\n \n # ref points and assignement of heads\n riv_stations = pd.read_csv(stations_csv,sep=\";\")\n for i in riv_stations.index:\n xs = riv_stations.loc[i].x\n ys = riv_stations.loc[i].y\n elev = riv_stations.loc[i].elev\n dist = ((df_riv[\"xc\"] - xs)**2 + (df_riv[\"yc\"] - ys)**2)**0.5\n df_riv.loc[dist==np.min(dist),\"head\"] = elev\n\n # interpolation of the heads btw ups,stations and ds\n # linInt_Dfcol(df_riv,col=\"head\")\n \n # length cumulated\n lcm=0\n l_cum=[]\n for l in df_riv.lengths:\n lcm += l/2\n l_cum.append(lcm)\n lcm += l/2\n df_riv[\"l_cum\"] = l_cum\n \n # linear interp (0 as a null value)\n yp = df_riv[\"head\"][df_riv[\"head\"]!=0]\n xp = df_riv[\"l_cum\"][df_riv[\"head\"]!=0]\n df_riv[\"head\"] = np.interp(df_riv[\"l_cum\"],xp,yp)\n \n # drop cells outside domain or already chd\n for cellid in df_riv.cellids:\n if (cellid in lst_chd) | (cellid not in lst_domain): \n df_riv = df_riv.drop(df_riv[df_riv[\"cellids\"] == cellid].index)\n \n # create the stress package\n df_riv= df_riv.reset_index()\n H_riv = df_riv[\"head\"]\n riv_chd=[]; o =-1;\n for x in df_riv.cellids:\n o = o + 1\n riv_chd.append((x,H_riv[o]))\n lst_chd.append(x) # update chd list\n return riv_chd\n\n\n#7\ndef get_cellcenters (grid,cellids): \n \"\"\"\n This function return the x and y coordinates of a given cellid and a grid (dis only)\n \"\"\"\n xc=[];yc=[]\n \n for i,j,k in cellids:\n xc.append(grid.xcellcenters[j,k])\n yc.append(grid.ycellcenters[j,k])\n return xc,yc\n \n#8\ndef ra_pack(pack,ibd,iper=0,value=-1):\n \n \"\"\"\n Return an array containing position of cells from a certain package\n Can be used to plot the bc zones of a certain package (pack)\n pack : a bc package which possess a stress_period_data attribute\n ibd : 3D array on which the value will be change \n iper : int, stress period\n value : int, value of replacement in ibd\n \"\"\"\n \n ra = pack.stress_period_data.get_data(key=iper)\n for k, i, j in ra['cellid']:\n ibd[k, i, j] = value \n\n#9\ndef importControlPz (file_path,grid,sheetName=\"1990\",np_col = \"NP\",x_col=\"x\",y_col=\"y\"):\n \n \"\"\"\n For 2D models ! \n return an array (nrow,ncol) containing infos about pz observations in control pz\n file_path : the file path to the excel sheet\n grid : modelgrid (flopy.discretization.structuredgrid object)\n sheetName : the name of the data sheet \n np_col : the name of the column in the file containing infos about the PL\n x_col,y_col : the name of the columns containings geo infos (x and y coordinates)\n \"\"\"\n \n DB = pd.read_excel(file_path,sheet_name = sheetName) # read the file with pandas\n \n Control_pz = np.zeros([grid.nrow,grid.ncol]) #ini list\n lstIDpz=[];Pz = [];\n \n for o in np.arange(DB.shape[0]): # loop to iterate through the data and returns the intersected cellids\n xc = DB[\"x\"][o]\n yc = DB[\"y\"][o] \n cellid = grid.intersect(xc,yc)\n \n if DB[np_col][o]: # check that a head data is available\n lstIDpz.append(cellid) # list of cellids\n Pz.append(DB[np_col][o]) # list of value\n \n df = pd.DataFrame()\n df[\"cellid\"]=lstIDpz\n df[\"Pz\"] = Pz\n df = df.groupby([\"cellid\"]).mean().reset_index() # regroup pz in the same cells and apply mean\n \n #create the obs array\n for i in df.index:\n j,k = df.loc[i,\"cellid\"] #extract cellids\n Control_pz[j,k] = df.loc[i,\"Pz\"] # change pz value\n \n return Control_pz\n\n#10\ndef importWells(GDB,grid,lst_domain,fac=1/365/86400,V_col=\"V Bancaris\",layer=0):\n \n \"\"\"\n 2D only !\n extract the infos about the uptake of wells\n path : path to the shp (multi points required)\n grid : the modelgrid\n fac : the factor to apply on the Volume to get m3/s\n V_col : the column name containing info about Volume\n layer : the layer on which the wells are active\n \"\"\"\n \n\n stress_data_well=[]\n ix = GridIntersect(grid)\n\n for o in GDB.index:\n Vw = GDB[V_col][o]\n if not (np.isnan(Vw)) | (Vw == 0):\n try:\n cellidx = ix.intersect(GDB.geometry[o]).cellids[0][0]\n cellidy = ix.intersect(GDB.geometry[o]).cellids[0][1]\n cellid = (layer,cellidx,cellidy)\n if cellid in lst_domain:\n stress_data_well.append((cellid,-fac*Vw))\n except:\n pass\n return stress_data_well\n\n#11\ndef coor_convert(x,y,epsgin,epsgout):\n \n \"\"\"\n Function that converts coordinates\n x,y : coordinates from epsgin\n epsgin : actual epsg system \n epsgout : the epsg goal\n \"\"\"\n \n from pyproj import Proj, transform\n inproj = Proj(init=\"epsg:{}\".format(epsgin))\n outproj = Proj(init=\"epsg:{}\".format(epsgout))\n xp,yp = transform(inproj,outproj,x,y)\n return xp,yp\n\n#12\ndef chd2riv(riv_chd,cond,rdepth,stage_var=1):\n \n \"\"\"\n Transform a chd stress period data into a riv stress period data\n riv_chd : list, chd spd (cellid,stage)\n cond : float, conducance of the riverbed\n rdepth : float, depth of the river botom (from the stage)\n \"\"\"\n \n Riv=[]\n for cellid,stage in riv_chd:\n Riv.append((cellid,(stage-rdepth)+rdepth*stage_var,cond,stage-rdepth))\n riv_chd[:] = Riv\n\n#13\ndef nn2kij(n,nlay,nrow,ncol):\n \n \"\"\"\n from a node number to ilay,irow and icol (dis)\n \"\"\"\n \n return fp.utils.gridintersect.ModflowGridIndices.kij_from_nn0(n,nlay,nrow,ncol)\n\n#14\ndef get_Total_Budget(model_name,model_dir,kstpkper=(0,0)):\n\n \"\"\"\n Return a DF containing Budget data for the entire model by searching in the LST file. Budget should have been Printed in Output Control\n model_name : str, name of the model given in the gwf pack\n model_dir : str, path to workspace\n \"\"\"\n \n file = os.path.join(model_dir,\"{}.lst\".format(model_name)) \n with open(file) as f:\n doc = f.readlines()\n i=-1\n tmstp=0;sp=0;inf=0\n for ilin in doc: # iterate through lines\n i += 1 # idx line\n info=\"\"\n try:\n tmstp = int(ilin[52:58].split(\",\")[0])\n sp = int(ilin[73:-1])\n info = ilin[2:15]\n except:\n pass\n if (info == \"VOLUME BUDGET\") & (tmstp == kstpkper[0]+1) & (sp == kstpkper[1]+1): #if this line is encountered --> break\n break\n \n if i == len(doc):\n raise Exception (\"No Budget info found ! Check Output Control or stress period \")\n \n ###number of packages\n npack=0\n for o in range(1000):\n if doc[i+8+o]==\"\\n\":\n break\n npack += 1\n ###number of packages\n \n # retrieve data\n lst_val_IN =[]\n lst_val_OUT = []\n lst_nam_pak = []\n pak_type=[]\n for ipak in range(npack): # ipak --> line indice for a specific package\n ipak += 8 # packages begin 8 lines after i\n\n lst_nam_pak.append(doc[i+ipak][85:96].rstrip()) # Package name\n lst_val_IN.append(float(doc[i+ipak][63:80])) # value IN\n lst_val_OUT.append(float(doc[i+ipak+npack+5][63:80])) # Value OUT\n pak_type.append(doc[i+ipak][55:62]) # Package type\n\n Budget = pd.DataFrame({\"Pack\":lst_nam_pak,\n \"IN\":lst_val_IN,\n \"OUT\":lst_val_OUT,\n \"Type\":pak_type})\n\n return Budget\n\n#15\ndef arr2ascii(arr,filename,x0,y0,res,nodata=-9999):\n \n \"\"\"\n Create an ascii raster file from an array as a base. Left corner origin and resolution must be provided.\n arr : 2D numpy arr\n filename : the path/name for the new ascii file\n x0,y0 : left corner origin of the array\n res : Ascii resolution\n nodata : no data value\n \"\"\"\n \n ncol = arr.shape[1]\n nrow = arr.shape[0]\n with open(filename,\"w\") as file:\n file.write(\"ncols {}\\n\".format(ncol))\n file.write(\"nrows {}\\n\".format(nrow))\n file.write(\"xllcorner {}\\n\".format(x0))\n file.write(\"yllcorner {}\\n\".format(y0))\n file.write(\"cellsize {}\\n\".format(res))\n file.write(\"nodata_value {}\\n\".format(nodata))\n for irow in range(nrow):\n for icol in range(ncol):\n file.write(str(arr[irow,icol])+\" \")\n\n#16\ndef rspl_rast(rast_path,grid,band=1):\n \n \"\"\"\n Use the resample_to_grid method from flopy Raster. \n rast_path : path to the raster\n grid : modelgrid (gwf.modelgrid or flopy.discretisation)\n \"\"\"\n \n rast = Raster.load(rast_path)\n arr = rast.resample_to_grid(grid,band)\n return arr\n\n#17\ndef k_zones(k,z1,layer,kn,ix): \n \n \"\"\"\n Change value in a numpy 3D array location based on a certain zone (format: [(x1,y1),(x2,y2), ...])\n Design for update permeability array but can be used for any other purpose that imply modifying an array in a specific zone\n \n z1: list of tuples, zone (format: [(x1,y1),(x2,y2), ...])\n layer : list or int, layers on which to apply changes\n kn : float, the new value of k\n ix : gridintersect object --> ix = GridIntersect(grid) as grid the modelgrid\n \"\"\"\n \n poly = Polygon(z1)\n res = ix.intersect(poly)\n if type(layer) != int:\n for ilay in layer:\n for cellid in res.cellids:\n irow = cellid[0]\n icol = cellid[1]\n k[ilay,irow,icol] = kn \n \n elif type(layer) == int:\n for cellid in res.cellids:\n irow = cellid[0]\n icol = cellid[1]\n k[layer,irow,icol] = kn \n \n else :\n raise Exception (\"layer must be an int or a list of int\")\n \n#18\ndef liss_mob(arr,n,null_v = 0):\n \n \"\"\"\n Apply a moving average (with 2*n numbers) on 2D array.\n arr : 2D numpy array\n n : number of elements (in one of the four direction) to take into account for the moving average (n=2 --> average of a specific number will be calculated with the surroundings 5x5 elements)\n return a 2D array and replace null value by 0\n \"\"\"\n \n \n arr[arr==null_v]=None\n for irow in range(n,arr.shape[0]-n):\n for icol in range(n,arr.shape[1]-n):\n if not np.isnan(arr[irow,icol]):\n bloc = arr[irow-n:irow+n+1,icol-n:icol+n+1]\n arr[irow,icol] = np.nanmean(bloc)\n arr = np.nan_to_num(arr)\n return arr","sub_path":"codes_flopy/modules/Rouss.py","file_name":"Rouss.py","file_ext":"py","file_size_in_byte":18869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"530863365","text":"import numpy as np\nfrom keras.models import Input, Model\nfrom keras import backend as K\nimport tensorflow as tf\n\nfrom lights import Lights\nfrom camera import Camera\nfrom model import Geometry\nfrom warp import Warp\nfrom render import Render\n\nBATCH_SIZE = 10\nNUM_WARPS = 5\nnum_vertices = 100\nnum_faces = 150\nnum_lights = 1\n\n\nbase_model = Geometry(vertices=np.random.randn(num_vertices, 3), faces=np.random.randint(0, num_vertices, size=[num_faces, 3]))\nlights = Lights(positions=np.random.randn(num_lights, 3), intensities=np.random.randn(num_lights, 3))\ncamera = Camera(eye=np.random.randn(1, 3), center=np.random.randn(1, 3), world_up=np.random.randn(1, 3))\ntrans = Transform(batch_size=BATCH_SIZE)\n\nwarp_params = Input(shape=[NUM_WARPS, 1])\nwarped_vertices = Warp(num_warps=NUM_WARPS)([K.identity(base_model.vertices), warp_params])\nworld_coords = trans(warped_vertices)\ncolors = K.constant(np.random.randn(BATCH_SIZE, num_vertices, 3))\nrendered = Render(512, 512)([world_coords, base_model.faces],\n base_model.calculate_normals(world_coords),\n colors,\n [camera.eye, camera.center, camera.world_up],\n [lights.positions, lights.intensities])\n\n#model = Model(inputs=[warp_params], outputs=[renderer])\n\nsess = K.get_session()\nsess.run(tf.global_variables_initializer())\nsess.run([rendered], feed_dict={warp_params : np.random.randn(BATCH_SIZE, NUM_WARPS, 1)})\n\n","sub_path":"mesh_renderer/keras_api/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"253909529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 04 18:55:31 2018\n\n@author: Hardik Galiawala\n\"\"\"\n\nimport csv\nimport numpy as np\nimport re\n\n\n\ndictionary = np.array([[]])\n\nwith open('lexicon_easy.csv', 'rb') as textfile:\n sentiWordNet = csv.reader(textfile, delimiter=',')\n for i in sentiWordNet:\n dictionary = np.append(dictionary, i[0].lower())\n dictionary = np.append(dictionary, i[1])\n dictionary = dictionary.reshape(len(dictionary)/2,2)\n\nfinal_result = np.array([[]])\n\nwith open('tweetsUnencoded.csv', 'rb') as csvfile:\n tweet_unencoded = csv.reader(csvfile, delimiter=',')\n j = 0\n for tweet in tweet_unencoded:\n words = re.sub(\"[^\\w]\", \" \", tweet[-1]).split()\n score = np.array([])\n #print(words)\n for word in words:\n my_string=\"skewed#1 distorted#2\"\n pattern = re.compile(r''+word.lower())\n for k in range(len(dictionary)):\n if(pattern.findall(dictionary[k][0]) != []):\n score = np.append(score, dictionary[k][1])\n break\n \n final_result = np.append(final_result, tweet[3])\n final_result = np.append(final_result, (np.sum(score.astype(int))))\n final_result = final_result.reshape(len(final_result) / 2, 2)\n \n\nwith open('sentimentAnalysis.csv', 'wb') as outfile:\n \n writer = csv.writer(outfile)\n writer.writerow(['twitter tweet', 'sentiment', 'score'])\n for count in range(len(final_result)):\n \n sentiment = \"\"\n if (int(final_result[count][1]) > 0):\n sentiment = \"Positive\"\n elif(int(final_result[count][1]) < 0):\n sentiment = \"Negative\"\n else:\n sentiment = \"Neutral\"\n writer.writerow([final_result[count][0], sentiment, final_result[count][1]])\n","sub_path":"Lexicon_text_analysis/lexical_sentiment_analysis.py","file_name":"lexical_sentiment_analysis.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"105561066","text":"\r\nimport sly \r\n\r\n\r\nclass Lexer(sly.Lexer):\r\n\ttokens = {\r\n\t\t\tNUMBER,AND,BREAK,DO,ELSE,ELSEIF,END,FALSE,FOR,FUNCTION,\r\n IF,IN,LOCAL,NIL,NOT,OR,REPEAT,RETURN,THEN,TRUE,\r\n UNTIL,WHILE,NAME,STRING,EQ,NE,LE,GE,LT,GT,TDOT,APPEND,\r\n FUNCTION,LOCAL,CONCAT,UMINUS,VARARG,\r\n }\t\r\n\t\r\n\r\n\tliterals = \"+-*/%^#=(){}[];:,.><~_^|\"\r\n\r\n\r\n\tignore = ' \\t\\r'\r\n\tignore_newline = r'\\n+'\r\n \r\n\tdef ignore_newline(self, t):\r\n\t\tself.lineno += t.value.count('\\n')\r\n\r\n\t@_(r'0x[0-9a-fA-F]+',r'(\\d+\\.\\d*|\\d+)([eE][-+]?\\d+)?')\r\n\tdef NUMBER(self, t):\r\n\t\tif t.value.startswith('0x'):\r\n\t\t\tt.value = int(t.value[2:], 16)\r\n\t\telse: \r\n\t\t\tt.value = int(t.value)\r\n\t\treturn t\r\n\r\n #IDENTIFICADORES\r\n\tNAME = r'[a-zA-Z_][a-zA-Z0-9_]*'\r\n\tNAME[\"and\"]=\"AND\"\r\n\tNAME[\"break\"]=\"BREAK\"\r\n\tNAME[\"do\"]=\"DO\" \r\n\tNAME[\"else\"]=\"ELSE\"\r\n\tNAME[\"elseif\"]=\"ELSEIF\"\r\n\tNAME[\"end\"]=\"END\"\r\n\tNAME[\"false\"]=\"FALSE\"\r\n\tNAME[\"for\"]=\"FOR\"\r\n\tNAME[\"function\"]=\"FUNCTION\"\r\n\tNAME[\"if\"]=\"IF\"\r\n\tNAME[\"in\"]=\"IN\"\r\n\tNAME[\"local\"]=\"LOCAL\"\r\n\tNAME[\"nil\"]=\"NIL\"\r\n\tNAME[\"not\"]=\"NOT\"\r\n\tNAME[\"or\"]=\"OR\"\r\n\tNAME[\"repeat\"]=\"REPEAT\"\r\n\tNAME[\"return\"]=\"RETURN\"\r\n\tNAME[\"then\"]=\"THEN\"\r\n\tNAME[\"true\"]=\"TRUE\"\r\n\tNAME[\"until\"]=\"UNTIL\"\r\n\tNAME[\"while\"]=\"WHILE\"\r\n\t#NAME[\"function\"]=\"FUNCTION\"\r\n\tNAME[\"concat\"]=\"CONCAT\"\r\n\tNAME[\"vararg\"]=\"VARARG\"\r\n\r\n\t#cadenas\r\n\tSTRING = r'\".*\"'\r\n\t\r\n #operadores de comparacion\r\n\tEQ = r\"==\"\r\n\tNE = r\"~=\"\r\n\tLE = \"<=\"\r\n\tGE = \">=\"\r\n\tLT = \"<\"\r\n\tGT = \">\"\r\n\tTDOT = r'(\\.\\.\\.)'\r\n\tAPPEND = r'(\\.\\.)'\r\n\t\r\n\t\r\n\t@_(r'\\-\\-\\[\\[(.|\\n)*?\\]\\]')\r\n\tdef COMMENTCORCHETE(self , t) :\r\n\t\tself.lineno += len(t.value)\r\n\r\n\r\n\t@_(r'\\-\\-[^\\[].*')\r\n\tdef COMMENTLINEAL(self , t) :\r\n\t\tself.lineno += len(t.value)\r\n\r\n\t@_(r'\\-\\-\\[\\=+\\[(.|\\n)*?\\]\\=+\\]',)\r\n\tdef COMMENT(self , t) :\r\n\t\tcadena = t.value.replace('\\t','xx').replace('\\n','xx').replace(' ','x')\r\n\t\tcorIzquierda = 0\r\n\t\tigualIzquierda = 0 \r\n\t\tcorDerecha = 0\r\n\t\tigualDerecha = 0 \r\n\t\ti = 0\r\n\t\tj = len(cadena)-1\r\n\t\twhile corIzquierda < 2 or corDerecha < 2:\r\n\t\t\tif corIzquierda < 2:\r\n\t\t\t\tif cadena[i] == '[':\r\n\t\t\t\t\tcorIzquierda+=1\r\n\t\t\t\tif cadena[i] == '=':\r\n\t\t\t\t\tigualIzquierda += 1\r\n\t\t\t\ti+=1\r\n\r\n\t\t\t\tif cadena[j]== ']':\r\n\t\t\t\t\tcorDerecha+=1\r\n\t\t\t\tif cadena[j]=='=':\r\n\t\t\t\t\tigualDerecha+=1\r\n\t\t\t\tj-=1\r\n\t\tif igualDerecha == igualIzquierda:\r\n\t\t\tself.lineno += len(t.value)\r\n\t\telse:\r\n\t\t\tprint('*****catidad \"=\" inicio/final diferente %r , Linea %d' % (t.value, self.lineno))\r\n\t\t\tself.index +=1\r\n\r\n\r\n\t@_(r'\\-\\-\\[\\=+\\[(.|\\n)*?\\]',r'\\-\\-\\[\\=+\\[(.|\\n)*?',r'\\-\\-\\[\\[(.|\\n)*?\\]',r'\\-\\-\\[\\[(.|\\n)*?',\r\n\t\tr'\\-\\-\\[ +\\[(.|\\n)*?\\]\\]',r'\\-\\-\\[\\[(.|\\n)*?\\] +\\]',\r\n\t\tr'\\-\\-\\[(.|\\n)*?\\]\\]'\r\n\t\t)\r\n\tdef errorComentarios (self, t):\r\n\t\tprint('*****comentario mal cerrado %r , Linea %d' % (t.value, self.lineno))\r\n\t\tself.index += 1\r\n\r\n\tdef error(self, t):\r\n\t\tprint('***** Linea %d: Caracter ilegal %r' % (self.lineno, t.value[0]))\r\n\t\tself.index += 1\r\n \r\n","sub_path":"AnalizadorLua/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"273043194","text":"from Content.Beings import Player, Enemy\r\nfrom Content.Items.Weapons import Weapons, Swords, Wands, Staffs, Hammers, Daggers, BattleStaffs, Bows\r\nfrom Content.Items.Armor import Armor, HeadArmor, TorsoArmor, LegArmor\r\nfrom Content.Items import Items, Food, Tiers\r\nfrom Content.Towns import Towns, Shops\r\nfrom Content.Modifiers import Modifiers, WeaponModifiers\r\nfrom Content.GUIs import GUI, MainMenu, CharacterCreation, TownScreen, GUIIndexes, LevelUp, PlayerInventory\r\nfrom Content.GUIs.ShopGUIs import BlacksmithScreen, ChallengeScreen, StoreScreen\r\nfrom Content.Enemies import Enemies\r\nfrom Content.Dungeons import Dungeons, Rooms\r\nfrom math import *\r\nimport pickle\r\nimport os\r\nfrom os.path import dirname, abspath\r\nimport random\r\nd = dirname(abspath(__file__))\r\n\r\n\r\nclass Game(object):\r\n def __init__(self):\r\n self.guiHandler = GUI.GUIHandler(self)\r\n\r\n self.player = Player()\r\n\r\n self.level = 1\r\n\r\n self.towns = [Towns.Town(game=self, level=self.level)]\r\n self.currentTown = 0\r\n self.towns[0].newDungeon()\r\n #self.nextTown()\r\n\r\n self.g = 0\r\n\r\n self.guiHandler.start()\r\n\r\n def prevTown(self):\r\n self.currentTown -= 1\r\n\r\n def nextTown(self):\r\n if(self.towns[self.currentTown] == self.towns[-1]):\r\n self.level += random.uniform(0.3, 0.8)\r\n self.towns.append(Towns.Town(game=self, level=self.level))\r\n self.towns[-1].newDungeon()\r\n self.currentTown += 1\r\n\r\n def getCurrentTown(self):\r\n return self.towns[self.currentTown]\r\n\r\n def addXPToPlayer(self, amount):\r\n if(self.player.addXP(amount)):\r\n self.player.levelUp()\r\n self.guiHandler.swapGUI(GUIIndexes.LEVEL_UP)\r\n\r\n def save(self, event):\r\n if(event.char == 's'):\r\n gameFile = open(d + \"\\saves\\{}.player\".format(self.player.name), \"w+b\")\r\n self.g = self.guiHandler.unloadGUIs()\r\n pickle.dump(self, gameFile)\r\n self.guiHandler.loadGUIs(self.g)\r\n\r\n def load(self, newGame):\r\n self.guiHandler.currentGUI.destroy()\r\n self = newGame\r\n self.guiHandler.loadGUIs(self.g)\r\n\r\n def getSaveFiles(self):\r\n files = []\r\n for file in os.listdir(d + \"\\saves\"):\r\n if file.endswith(\".player\"):\r\n files.append(file)\r\n return d, files\r\n\r\ngame = Game()\r\n\r\n\r\n\"\"\"\r\nTESTING FUNCTIONS\r\n\"\"\"\r\ndef swordDamage():\r\n #---DAMAGE OF SWORD---\r\n s = Swords.WoodenSword()\r\n four = 0\r\n five = 0\r\n six = 0\r\n seven = 0\r\n\r\n\r\n values = [0]*100\r\n\r\n\r\n for i in range(100):\r\n damage = s.getDamage(Player())\r\n print(\"{}: {}\".format(i, damage))\r\n values[damage] += 1\r\n\r\n for l in range(0, len(values)):\r\n if(values[l] != 0):\r\n print(\"{}: {}\".format(l, values[l]))\r\n\r\n\r\ndef buffsOfSwords(weapon=Swords.StoneSword):\r\n #---BUFFS OF RANDOM SWORDS---\r\n for i in range(25):\r\n \tl = weapon()\r\n \tprint(\"Sword #: {}\\nstr: {}\\nagi: {}\\nint: {}\\n\".format(i, l.strBuff, l.agiBuff, l.intBuff))\r\n\r\n\r\ndef modifierStats():\r\n #---MODIFIERS OF RANDOM SWORDS---\r\n vals = [0]*len(WeaponModifiers.WEAPON_MODIFIERS)\r\n swords = 1000\r\n\r\n for p in range(swords):\r\n m = Swords.StoneSword()\r\n m.equipModifiers = []\r\n #CHANCES = PLAYERLEVEL//3\r\n m.generateModifiers(4)\r\n\r\n for mod in m.equipModifiers:\r\n vals[WeaponModifiers.WEAPON_MODIFIERS.index(mod)] += 1\r\n m.addModifiers()\r\n print(m.name)\r\n\r\n chanceForMod=0\r\n for l in range(len(WeaponModifiers.WEAPON_MODIFIERS)):\r\n total = sum(vals)\r\n percent = round(100*(vals[l]/total), 2)\r\n totalChance = round(100*(vals[l]/swords), 2)\r\n chanceForMod += totalChance\r\n print(\"{}: {} | {}% of modifiers, | {}% of all chances.\".format(WeaponModifiers.WEAPON_MODIFIERS[l].prefix, vals[l], percent, totalChance))\r\n print(\"Final chance for any modifier: {}\".format(round(chanceForMod,2)))\r\n\r\n\r\ndef randomModifierStats():\r\n for mod in WeaponModifiers.WEAPON_MODIFIERS:\r\n print(\"{}: Value: {} | Weight: {} | str: {} | agi: {} | int: {}\".format(mod.prefix, mod.valueBuff, mod.weightBuff, mod.strBuff, mod.agiBuff, mod.intBuff))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"576427331","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 22 11:22:12 2018\r\n\r\n@author: caenglish\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport scipy.stats\r\nfrom sklearn import linear_model\r\nimport numpy as np\r\n\r\n#Load proposal data all the way back to 2012\r\ndata=pd.read_csv('CEDAallData_proposals4.csv')\r\ndata.drop(['DATE','CSD','Multi_MeasID','MEASTYPE','LTR','RECTYPE'],inplace=True,axis=1)\r\n\r\n#Load information on how many voters of each party exist in each county\r\nvoter=pd.read_csv('CAcountry_voters.txt',sep='\\t')\r\ndata_bus_tax=data[data['RECTYPENAME']=='Business Tax']\r\n\r\n#Combine each data frame so D-R spread is placed according to the county of the vote\r\ndata_tax=pd.merge(data_bus_tax,voter, on='CNTYNAME', how='left')\r\n\r\n#Let's do a linear regression to see if there is a relationship between which party has more voters in the county and whether they are more likely to vote for tax hikes at the polls\r\nx=[];y=[]\r\nfor i in range(len(data_tax['DRspread'])):\r\n x.append([data_tax['DRspread'].iloc[i]])\r\n y.append((data_tax['Percent_sum'].iloc[i]-0.5)*100.0)\r\n\r\n \r\nlinear_regression = linear_model.LinearRegression()\r\nlinear_regression.fit(x,y)\r\n\r\n #Calculate correlation coefficients\r\ncorr_p=scipy.stats.pearsonr(data_tax['DRspread']/100.0, data_tax['Percent_sum'])\r\ncorr_s=scipy.stats.spearmanr(data_tax['DRspread']/100.0, data_tax['Percent_sum'])\r\nprint('Pearson CC: ',corr_p)\r\nprint('Spearman CC: ',corr_s)\r\n\r\n#Plot the relationship or lack thereof\r\nx_test=np.arange(-30,60,10).reshape(-1,1)\r\n\r\ny_predict=linear_regression.predict(x_test)\r\n\r\nplt.xlim(-30,50)\r\nplt.xlabel('Democrat Voter Advantage Margin[%]')\r\nplt.ylabel('Vote Success Margin[%]')\r\nplt.scatter(x,y)\r\nplt.plot(x_test,y_predict,linewidth=2)\r\nplt.savefig('BusinessTax_Scatter.png',dpi=500)","sub_path":"pol_analysis_business.py","file_name":"pol_analysis_business.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"4011870","text":"'''SudoSimu - Module techchrcc - Technique de résolution \"chiffre/rang-colonne\"\nlocale pour un certain chiffre et sur une certaine colonne de carrés.\n\nScript d'import dans techchrcc.py de fonctions et méthodes privées de la classe\nTechChRCcol. Il s'agit des fonctions qui gèrent les états d'avancement\nd'application de la technique.\n\nDernière mise à jour : 11/10/2017\nVérification de complétude des modifications -suppr-mem- et -split-,\nparallèlement à la mise à jour de techchrcg.py.\nComplément d'harmonisation du nommage pour _finish_apply()\n'''\n\n\n#imports des modules de la simulation\nif __name__ in (\"__main__\", \"techchrcc2\", \"techchrc.techchrcc2\"):\n import sudoui as ui\n import sudorules as rules\n from sudorules import Sudoku_Error\n from sudomemory import SudoMemory\n import sudogridview as gridview\n from sudotest import *\nelif __name__ == \"sudosimu.techchrc.techchrcc2\":\n from sudosimu import sudoui as ui\n from sudosimu import sudorules as rules\n from sudosimu.sudorules import Sudoku_Error\n from sudosimu.sudomemory import SudoMemory\n from sudosimu import sudogridview as gridview\n from sudosimu.sudotest import *\nelse:\n raise Exception(\"Impossible de faire les imports dans le module techchrcc2.\")\n\n\ndef _start_apply(self):\n '''Début de la résolution. Initialisation des informations en mémoire\n et lancement de la résolution des colonnes.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _start_apply()\")\n TEST.display(\"techchrccol\", 2, \"TechChRCcol : début de résolution.\")\n assert self._initOk\n mem = self._mem\n mem.memorize(\"techchrccol_encours\", True, self)\n mem.memorize(\"techchrccol_stepcol\", 0, self)\n \n #comptages\n mem.memorize(\"techchrccol_nbplccol\", 0, self)\n #étape #0\n r = self._solve_debut()\n return r\n \ndef _solve_debut(self):\n '''Résolution pour une colonne de carrés - Début, et demande de la 1ère\n observation : quelles sont les carrés de cette colonne qui ne contiennent\n pas le chiffre.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_debut()\")\n assert self._initOk\n mem = self._mem\n #se rappeler le chiffre à placer et dans quelle colonne de carrés\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqcol = mem.recall(\"techchrccol_isqcol\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_debut() - \"\\\n \"Début de résolution de la colonne n° {0}\".format(icol))\n #1ère étape : dans quels carrés est-ce que est absent\n obsPattern = (gridview.OBS_SQRSINSQRCOL_NOTCONTAIN, (isqcol, chiffre) )\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #memoriser l'avancement et la fonction pour l'opération suivante\n mem.memorize(\"techchrccol_stepcol\", 1, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite1, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 1\", self)\n #retourner en indiquant la demande d'observation\n TEST.display(\"techchrccol\", 3, \"_solve_debut() - Demande 1ère \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite1(self):\n '''Résolution pour une colonne (Col) - Retour 1ère observation = les\n carrés qui ne contiennent pas le chiffre traité.\n S'il y a 1 carré sans le chiffre, passe à l'observation suivante = quel\n est la colonne sur lequel le chiffre n'est pas.\n Dans les autre cas, fin avec succès ou échec.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite1()\")\n assert self._initOk\n mem = self._mem\n #se rappeler le chiffre à placer et dans quelle colonne de carrés\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() - Colonne de carrés \"\\\n \"n° {0} : retour 1ère observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser\n #c'est un tuple de nombre et liste de carrés\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbSqr, listSqr) = found\n \n #si tous les carrés de la colonne ont le chiffre, il n'y a rien d'autre\n #à faire et la technique est terminée\n if nbSqr <= 0:\n TEST.display(\"techchrccol\", 3, \"_solve_suite1 - Pas de carré à \"\\\n \"remplir, fin de la technique locale avec succès.\")\n r = self._solve_fin(\"noplace\")\n \n #s'il y a 2 ou 3 carrés, on ne le fait pas pour le moment (complexité)\n #donc le résultat de la technique = abandon\n elif nbSqr > 1:\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() - Plus d'1 carré à \"\\\n \"remplir, trop complexe. La technique locale est \"\\\n \"abandonnée.\")\n r = self._solve_fin(\"quit\")\n\n #1 carré où le chiffre manque => la technique continue\n #algorithme : on cherche maintenant une seule colonne où le chiffre manque\n else: #sbSqr==1\n isqr = listSqr[0] #le carré en question\n TEST.display(\"techchrccol\", 3, \"_solve_suite1 - 1 carré à \"\\\n \"remplir, la technique passe à l'étape suivante.\")\n #observation suivante : dans quelles colonnes de ce carré est-ce que\n # n'est pas ?\n obsPattern = (gridview.OBS_COLSBYSQR_NOTCONTAIN, (isqr, chiffre))\n #mémoriser les informations pour l'étape suivante\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n mem.memorize(\"techchrccol_isqr\", isqr, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #état d'avancement et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_stepcol\", 2, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite2, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 2\", self)\n #retourner en indiquant la demande d'observation à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() : demande de 2ème \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite2(self):\n '''Résolution pour une colonne (Col) - Retour 2ème observation = la\n colonne où le chiffre n'est pas. Il y en a forcément exactement 1.\n 3ème observation à faire = les rangs de ce carré où le chiffre\n n'est pas.\n La seule cause possible d'échec est un fail mémoire, en particulier\n un fail de mémoire du résultat d'observation.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite2()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqr = mem.recall(\"techchrccol_isqr\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite1() - Colonne de carrés \"\\\n \"n° {0} : retour 2ème observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser\n #c'est un tuple de nombre et liste de colonnes\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbCol, listCol) = found\n #détection d'erreur : il doit y avoir une seule colonne\n if not nbCol == 1:\n raise(Sudoku_Error, \"Erreur d'observation dans TechChRCcol\"\\\n \"._solve_suite2(), nombre de colonnes invalide.\")\n colmiss = listCol[0]\n TEST.display(\"techchrccol\", 3, \"_solve_suite2() - Le chiffre {0}\"\\\n .format(chiffre) + \"n'est pas dans la colonne {0}.\"\\\n .format(colmiss))\n \n #algorithme : on cherche maintenant dans quelles rangs du même carré\n #le chiffre n'est pas\n obsPattern = (gridview.OBS_ROWSBYSQR_NOTCONTAIN, (isqr, chiffre))\n r = (\"observe\", obsPattern)\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n mem.memorize(\"techchrccol_colmiss\", colmiss, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #avancement de la technique et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_stepcol\", 3, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite3, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 3\", self)\n #retourner en indiquant la demande d'observation à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite2() : demande de 3ème \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite3(self):\n '''Résolution pour une colonne (Col) - Retour 3ème observation = les\n rangs du carré où le chiffre n'est pas. Il y en a forcément au moins\n un (puisqu'il y a un chiffre à mettre dans ce carré.\n 4ème observation à faire = les cases vides à l'intersection des rangs et\n de la colonne sans le chiffre.\n La seule cause possible d'échec est un fail mémoire.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite3()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqr = mem.recall(\"techchrccol_isqr\", self)\n colmiss = mem.recall(\"techchrccol_colmiss\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite3() - Colonne de carrés \"\\\n \"n° {0} : retour 3ème observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser\n #c'est un tuple de nombre et liste de rangs\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbRow, listRow) = found\n TEST.display(\"techchrccol\", 3, \"_solve_suite3() - Le chiffre {0}\"\\\n .format(chiffre) + \"est absent des {0} rang(s) : {1}.\"\\\n .format(nbRow, listRow))\n \n #algorithme : on cherche maintenant les cases vides à l'intersection\n #des rangs et de la colonne libres\n argRowCol =(listRow, (colmiss,))\n obsPattern = (gridview.OBS_EMPTYPLACES_RC, argRowCol)\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_rowsmiss\", listRow, self)\n mem.memorize(\"techchrccol_obspattern\", obsPattern, self)\n #incrémenter l'index d'observations\n mem.increment(\"techchrccol_indexobs\", self)\n #avancement de la technique et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_result\", \"observe\", self)\n mem.memorize(\"techchrccol_stepcol\", 4, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite4, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 4\", self)\n #retourner en indiquant la demande d'observation à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite3() - Demande 4ème \"\\\n \"observation : pattern = {0}\".format(obsPattern))\n r = (\"observe\", obsPattern)\n return r\n\ndef _solve_suite4(self):\n '''Résolution pour une colonne (Col) - Retour 4ème observation = les\n cases vides à l'intersection des lignes et colonnes disponibles.\n S'il y en a une seule, faire le placement. S'il y en a plusieurs, on\n considère que d'est trop complexe -> fin de la technique\n La seule cause possible d'échec est un fail mémoire.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite4()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n icol = mem.recall(\"techchrccol_icol\", self)\n isqr = mem.recall(\"techchrccol_isqr\", self)\n colmiss = mem.recall(\"techchrccol_colmiss\", self)\n colsmiss = mem.recall(\"techchrccol_colsmiss\", self)\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Colonne de carrés \"\\\n \"n° {0} : retour 4ème observation\".format(icol))\n #récupérer le résultat de l'observation et l'analyser.\n #c'est un tuple de nombre et liste de cases\n found = mem.recall(\"techchrccol_obsfound\", self)\n (nbPlc, listPlc) = found\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Les cases {0}\"\\\n .format(listPlc) + \"sont disponibles pour placer le {0}.\"\\\n .format(chiffre))\n #algorithme : il doit y a au moins une case libre, s'il y a une seule\n #c'est celle ou se fait le placement\n if nbPlc < 1:\n raise(Sudoku_Error, \"Erreur d'observation dans TechChRCcol\"\\\n \"._solve_suite4(), nombre de cases libres invalide.\")\n #s'il y en a plus d'une, trop complexe pour le moment -> abandon\n elif nbPlc > 1:\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Plus d'1 case à \"\\\n \"remplir, trop complexe. La technique locale est \"\\\n \"abandonnée.\")\n r = self._solve_fin(\"quit\")\n # ok 1 case, on fait le placement\n else:\n placement = (listPlc[0][0], listPlc[0][1], chiffre)\n #mémoriser les informations pour l'itération suivante\n mem.memorize(\"techchrccol_availplc\", listPlc, self)\n mem.memorize(\"techchrccol_result\", \"place\", self)\n mem.memorize(\"techchrccol_placement\", placement, self)\n #avancement de la technique et fonction pour l'opération suivante\n mem.memorize(\"techchrccol_stepcol\", 5, self)\n mem.memorize(\"techchrccol_action_suivante\", self._solve_suite5, self)\n mem.memorize(\"techchrccol_nom_action\", \"suite 5\", self)\n #retourner en indiquant la demande de placement à faire\n TEST.display(\"techchrccol\", 3, \"_solve_suite4() - Demande de \"\\\n \"placement de {0} en {1}\".format(chiffre, listPlc[0]))\n r = (\"place\", placement)\n return r\n\ndef _solve_suite5(self):\n '''Résolution pour une colonne (Col) - Retour de placement.\n Vérifier que le placement a été correct et terminer la technique.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_suite5()\")\n assert self._initOk\n mem = self._mem\n #se rappeler les infos en mémoire de l'étape précédente\n chiffre = mem.recall(\"techchrccol_chiffre\", self)\n placement = mem.recall(\"techchrccol_placement\", self)\n nbplc = mem.recall(\"techchrccol_nbplccol\", self)\n #vérifier que le placement a été bien réalisé. Sinon c'est une erreur\n #de mémoire, d'algorithme ou de cohérence de la grille.\n plcValid = mem.recall(\"techchrccol_placeok\", self)\n if plcValid is not True:\n raise Sudoku_Error (\"Erreur de placement dans TechChRCcol\"\\\n \"._solve_suite5(), le placement a échoué.\")\n #incrémenter le compteur de placement puis passer à la fin\n mem.increment(\"techchrccol_nbplccol\", 1, self)\n (row, col) = (placement[0], placement[1])\n TEST.display(\"techchrccol\", 3, \"_solve_suite5() - Le placement \"\\\n \"de {0} en {1} est validé.\".format(chiffre, (row, col)))\n #fin de la technique avec succès\n r = self._solve_fin(\"succeed\")\n return r\n\ndef _solve_fin(self, endResult=\"end\"):\n \"\"\"A la fin des étapes de résolution, commande la fin de la technique\n et construit la réponse à retourner à apply() - et donc à la fonction\n appelante du programme.\n \"\"\"\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _solve_fin()\")\n assert self._initOk\n mem = self._mem\n nbplccol = mem.recall(\"techchrccol_nbplccol\", self)\n TEST.display(\"techchrccol\", 2, \\\n \"Fin de la technique \\\"Chiffre-rang-colonne\\\" sur une \"\\\n \"colonne.\\nNombre de chiffres placés : {0}\" \\\n .format(nbplccol))\n #mettre à jour les données d'avancement\n self._finish_apply()\n #construire le tuple de détail de résultats\n endDetails = (endResult, nbplccol)\n TEST.display(\"techlplccol\", 2, \"La technique se termine avec le \"\\\n \"résultat : '{0}'\".format(endDetails))\n #retour à SudoThinking\n return (\"end\", endDetails)\n\ndef _finish_apply(self):\n '''Marque la technique comme terminée. Il faudra appeler 'reset()'\n pour la relancer.\n '''\n TEST.display(\"techchrccol\", 3, \"TechChRCcol - dans _finish_tech()\")\n assert self._initOk\n mem = self._mem\n mem.memorize(\"techchrccol_chiffre\", None, self)\n mem.memorize(\"techchrccol_icol\", None, self)\n mem.memorize(\"techchrccol_isqcol\", None, self)\n mem.memorize(\"techchrccol_finished\", True, self)\n mem.memorize(\"techchrccol_encours\", False, self)\n self._finished = True\n return \n","sub_path":"sudosimu/techchrc/techchrcc2.py","file_name":"techchrcc2.py","file_ext":"py","file_size_in_byte":16953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"313372608","text":"# Copyright (C) 2014 Glamping Hub (https://glampinghub.com)\n# License: BSD 3-Clause\n\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import activate, get_language\n\nfrom painlessseo import settings\n\n\nclass SeoMetadata(models.Model):\n content_type = models.ForeignKey(ContentType, null=True, blank=True)\n object_id = models.PositiveIntegerField(null=True, blank=True)\n content_object = generic.GenericForeignKey('content_type', 'object_id')\n path = models.CharField(verbose_name=_('Path'), max_length=200, db_index=True,\n help_text=_(\"This should be an absolute path, excluding the domain name. Example: '/foo/bar/'.\"))\n lang_code = models.CharField(verbose_name=_('Language'), max_length=2,\n choices=settings.SEO_LANGUAGES,\n default=settings.DEFAULT_LANG_CODE)\n title = models.CharField(verbose_name=_('Title'), max_length=68, blank=True)\n description = models.CharField(verbose_name=_('Description'), max_length=155, blank=True)\n override_path = models.BooleanField(default=False)\n\n class Meta:\n verbose_name = _('SEO metadata')\n verbose_name_plural = _('SEO metadata')\n db_table = 'seo_metadata'\n unique_together = (('path', 'lang_code'), )\n ordering = ('path', 'lang_code')\n\n def __unicode__(self):\n return \"Language: %s | URL: %s\" % (self.lang_code, self.path)\n\n def save(self, *args, **kwargs):\n if kwargs.pop('override_path', None):\n self.override_path = True\n else:\n try:\n old_seo = SeoMetadata.objects.get(pk=self.pk)\n if old_seo.override_path:\n self.path = old_seo.path\n except SeoMetadata.DoesNotExist:\n pass\n super(SeoMetadata, self).save(*args, **kwargs)\n\n\ndef update_seo(sender, instance, **kwargs):\n active_lang = get_language()\n if hasattr(instance, 'get_current_language') and callable(instance.get_current_language):\n active_lang = instance.get_current_language()\n for lang_code, lang_name in settings.SEO_LANGUAGES:\n if active_lang == lang_code:\n activate(lang_code)\n try:\n sm = SeoMetadata.objects.get(content_type=ContentType.objects.get_for_model(instance),\n object_id=instance.id, lang_code=lang_code)\n if instance.get_absolute_url() != sm.path:\n sm.path = instance.get_absolute_url()\n except SeoMetadata.DoesNotExist:\n sm = SeoMetadata(lang_code=lang_code, content_object=instance, path=instance.get_absolute_url())\n sm.save(override_path=True)\n activate(active_lang)\n\n\ndef delete_seo(sender, instance, **kwargs):\n ctype = ContentType.objects.get_for_model(instance)\n for sm in SeoMetadata.objects.filter(content_type=ctype, object_id=instance.id):\n sm.delete()\n\n\ndef register_seo_signals():\n for app, model in settings.SEO_MODELS:\n ctype = ContentType.objects.get(app_label=app, model=model)\n if not hasattr(ctype.model_class(), 'get_absolute_url'):\n raise ImproperlyConfigured(\"Needed get_absolute_url method not defined on %s.%s model.\" % (app, model))\n models.signals.post_save.connect(update_seo, sender=ctype.model_class(), weak=False)\n models.signals.pre_delete.connect(delete_seo, sender=ctype.model_class(), weak=False)\n","sub_path":"painlessseo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"407126015","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 17 15:17:37 2016\n\n@author: MichaelEK\n\"\"\"\n\nfrom pandas import read_table, DataFrame, concat, merge, Timedelta, datetime, to_datetime, DateOffset, date_range, Timestamp, read_csv, to_numeric\nfrom misc_functions_v01 import printf\nfrom WA_analysis_fun import stream_nat\nfrom linear_reg_v02 import lin_reg\nfrom seaborn import regplot\n\n########################################\n### Parameters\n\nusage_path = 'C:/ecan/base_data/usage/usage_takes_mon_series_all_est_SD_with_cav.csv'\nflow_path = 'C:/ecan/Projects/otop/flow/otop_flow_recorders.CSV'\nwap_num_path = 'C:/ecan/Projects/Pareora/analysis/usage/wap_sites/'\n\nflow_site_name = 70103\nwap_sites = [70103, 70105]\n\nexport = False\n\nexport_mon_path = 'C:/ecan/Projects/Pareora/analysis/data/nat_mon_vol_70103.csv'\nexport_nat_path = 'C:/ecan/Projects/Pareora/analysis/data/nat_flow_70103.csv'\n\n########################################\n### Run naturalization function\n\nnat_mon_vol, nat_flow = stream_nat(usage_path, flow_path, wap_num_path, flow_site_name, wap_sites, export=export, export_mon_path=export_mon_path, export_nat_path=export_nat_path)\n\n#######################################\n### Testing section\n\nboth2 = concat([flow4, new_flow], axis=1)\n\nboth2.plot(ylim=[0, 5])\n\nnat_flow.plot(ylim=[0, 5])\n\nhuts = nat_flow\nlower = nat_flow\n\nboth3 = concat([huts, lower], axis=1)\n\nboth3.plot(ylim=[0, 5])\n\nlin_reg(nat_mon_vol[70103], nat_mon_vol['nat'], log_axis=True)[0]\nlin_reg(nat_mon_vol[70103], nat_mon_vol['sd_usage_est'])[0]\nreg1 = lin_reg(nat_mon_vol[70103], nat_mon_vol['nat'])[0]\n\n\nregplot(nat_mon_vol[70103], nat_mon_vol['nat'], truncate=True)\n\nx = nat_mon_vol[70103]\ny = nat_mon_vol['nat']\n\nnat_mon_vol['nat_est2'] = nat_mon_vol[70103] * reg1['Slope'][0] + reg1['Intercept'][0]\n\nnat_mon_vol['nat_est2_ratio'] = (nat_mon_vol['nat_est2'] -nat_mon_vol['nat'])/nat_mon_vol['nat']\n\n\nlow1 = nat_mon_vol[nat_mon_vol[70103] < 2000000]\nlin_reg(low1[70103], low1['nat'])[0]\n\n\net_nat = concat([nat_mon_vol['sd_usage_est'], et_mon], axis=1, join='inner')\net_nat = concat([nat_mon_vol['sd_usage_est'], dem_mon], axis=1, join='inner')\n\nlin_reg(et_nat[0], et_nat['sd_usage_est'])\n\nregplot(et_nat[0], et_nat['sd_usage_est'])\n\nregplot(log(et_nat[0]), log(et_nat['sd_usage_est']))\n\n\n\n\n\n\n\n","sub_path":"python_scripts/hydro/naturalization.py","file_name":"naturalization.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"243317006","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 30 10:58:44 2019\n\n@author: riteshsharma\n\"\"\"\nimport bcrypt as bc\nimport hashlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\ndef numberTwo(t):\n F1 = open(\"D1.txt\", \"r\")\n F2 = open(\"D2.txt\", \"r\")\n \n \n data1 = F1.readlines()\n data2 = F2.readlines()\n \n wholeString1 = list()\n wholeString2 = list()\n \n kGramsStorage1 = set()\n kGramsStorage2 = set()\n \n for text1 in data1:\n wholeString1 += text1\n \n for text2 in data2:\n wholeString2 += text2\n \n\n for j in range(0, len(wholeString1) - 2):\n kGramsStorage1.add(wholeString1[j] + wholeString1[j+1] + wholeString1[j+2])\n \n for k in range(0, len(wholeString2) - 2):\n kGramsStorage2.add(wholeString2[k] + wholeString2[k+1] + wholeString2[k+2])\n \n \n #Fast Min Hash Algorithm\n \n\n\n salt = list()\n vector = list()\n \n for i in range(t):\n vector.append(np.Inf)\n \n def sha(salt, value):\n hashValue = hashlib.sha1(salt.encode() + value.encode()).hexdigest()\n return int(hashValue, 16) % 10_000\n \n\n for i in range(t):\n salt.append(str(bc.gensalt()))\n \n for kGram in kGramsStorage1:\n for i in range(t):\n if (sha(salt[i], kGram) < vector[i]):\n vector[i] = sha(salt[i], kGram)\n \n jaccardSimilaritya = 0\n \n ##vector2\n vector1 = list()\n \n for i1 in range(t):\n vector1.append(np.Inf)\n \n\n for kGram in kGramsStorage2:\n for i1 in range(t):\n if (sha(salt[i1], kGram) < vector1[i1]):\n vector1[i1] = sha(salt[i1], kGram)\n \n \n for i in range(t):\n if(vector[i] == vector1[i]):\n jaccardSimilaritya += 1\n \n jaccardSimilaritya = jaccardSimilaritya/t\n \n return jaccardSimilaritya\n \n \ndef graph():\n a = list()\n ti = list()\n \n for i in range(20, 1000, 20):\n start = time.clock()\n a.append(numberTwo(i))\n stop = time.clock()\n b = stop - start\n ti.append(b)\n \n \n\n plt.plot(ti)\n plt.ylabel(\"time\")\n plt.xlabel(\"t\")\n plt.show\n \ngraph()\n","sub_path":"untitled2.py","file_name":"untitled2.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"47432062","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pyaudio\n\nfrom echomesh.base import Config\nfrom echomesh.sound import GetFormatName\nfrom echomesh.sound import Sound\nfrom echomesh.sound.Input import Input\nfrom echomesh.util import Log\nfrom echomesh.util.thread.ThreadLoop import ThreadLoop\n\nLOGGER = Log.logger(__name__)\n\nMIN_CHUNK_SIZE = 16\nMAX_CHUNK_SIZE = 2048\n\nclass InputThread(ThreadLoop):\n def __init__(self, device_index, sample_bytes, rates):\n super(InputThread, self).__init__(name='InputThread')\n self.chunk_size = max(MIN_CHUNK_SIZE,\n min(MAX_CHUNK_SIZE,\n Config.get('audio', 'input', 'chunk_size')))\n\n self.input = Input()\n fmt = GetFormatName.get_format_name(sample_bytes)\n try:\n len(rates)\n except TypeError:\n rates = [rates]\n\n self.clients = set()\n pyaud = Sound.PYAUDIO()\n for rate in rates:\n try:\n self.stream = pyaud.open(format=fmt, channels=1, rate=rate,\n input_device_index=device_index, input=True)\n break\n except IOError as e:\n if 'Invalid sample rate' not in str(e):\n raise\n else:\n raise Exception(\"Couldn't open audio device named %s.\" % device_index)\n\n def single_loop(self):\n try:\n self.input.receive(self.stream.read(self.chunk_size))\n except:\n LOGGER.error()\n else:\n for client in self.clients:\n client(self.input)\n\n def add_client(self, client):\n self.clients.add(client)\n\n def remove_client(self, client):\n self.clients.remove(client)\n\n def _after_thread_pause(self):\n self.stream.close()\n self.stream = None\n","sub_path":"code/python/echomesh/sound/InputThread.py","file_name":"InputThread.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"342794001","text":"\"\"\"mpeaks URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom django.views.generic import RedirectView\nfrom mpeaks.mpeaksapp.views import PeakList, PeakDetail, PeakFilterList\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Mpeaks API\",\n default_version='v1',\n description=\"API Documentation\",\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n url(r'^doc(?P\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n url(r'^doc/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n url(r'^$', RedirectView.as_view(url='doc/')),\n path('peaks/', PeakList),\n path('peaks//', PeakDetail),\n path('peaks/filter/', PeakFilterList),\n]\n","sub_path":"mpeaks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"351917253","text":"\r\nimport smtplib\r\nimport socket\r\n\r\nsender = 'from@fromdomain.com'\r\nreceivers = 'to@todomain.com'\r\n\r\nhost = ''\r\nport = 25\r\nlocal_hostname = 'localhost'\r\n\r\nmessage = '''From: From Person\r\n\r\nTo: To Person \r\nMIME-Version: 1.0\r\nContent-type: text/html\r\nSubject: SMTP HTML e-mail test\r\n\r\n\r\nThis is an e-mail message to be sent in HTML\r\nformat\r\n\r\nThis is HTML message\r\n
This is headline.
\r\n\r\n'''\r\n\r\ntry:\r\n \r\n smtpObj = smtplib.SMTP(local_hostname)\r\n smtpObj.sendmail(sender, receivers, message)\r\n\r\n print('Successfully sent e-mail')\r\n\r\nexcept Exception as msg:\r\n print('Error: Unable to send e-mail due to [ %s ]' % (msg))\r\n","sub_path":"smtp--mailing.py","file_name":"smtp--mailing.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"333202896","text":"__author__ = 'thomas.ballas'\n\nimport time\nfrom crc_ccitt import crc16xmodem\nimport serial\nimport utilities\nimport servo_controller\n\n# CONSTANTS\nMAST_MIN = 210\nMAST_MAX = 2100\nMAST_HEIGHT = 1400\nNAME = \"zippermast\"\n\n\ndef mast_up(commander):\n servo = servo_controller.Servo()\n servo.open_lid()\n set_mast_height(commander, MAST_HEIGHT)\n\n\ndef mast_down(commander):\n set_mast_height(commander, 0)\n servo = servo_controller.Servo()\n servo.close_lid()\n\n\ndef set_mast_height(commander, target_height):\n commander.relay_on(\"zipmast_relay\")\n time.sleep(.1)\n commander.relay_on(\"zipmast_wake\")\n time.sleep(.5)\n commander.relay_off(\"zipmast_wake\")\n cur_ht = check_zippermast(commander)\n if \"NO RESPONSE\" not in cur_ht:\n commander.device_stats[\"mast\"] = cur_ht\n target_mm = int(target_height) + MAST_MIN\n cur_mm = int(commander.device_stats[\"mast\"])\n # height change below granularity of movement; doing nothing\n if in_range(target_mm, cur_mm, 25):\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] User tried to set mast height to current mast height.\"))\n # recurse with height set to zero - function will set to MAST_MIN\n elif target_mm < MAST_MIN:\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] User tried to set mast height below {} mm threshold.\".format(MAST_MIN)))\n return set_mast_height(commander, str(0))\n # recurse with target height set to greatest acceptable input: (MAST_MAX - MAST_MIN)\n elif target_mm > MAST_MAX:\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] User tried to set mast height above {} mm threshold.\".format(MAST_MAX)))\n return set_mast_height(commander, str(MAST_MAX - MAST_MIN))\n # input was within acceptable parameters; continue with raise mast\n elif target_mm > cur_mm:\n travel_mm = target_mm - cur_mm\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Raising mast for {} mm\".format(travel_mm)))\n if travel_mm < 100:\n mast_speed = \"25\"\n else:\n mast_speed = \"100\"\n raise_mast(commander, mast_speed)\n done = False\n fail_ctr = 0\n while not done and fail_ctr < 3:\n cur_ht = check_zippermast(commander)\n if \"NO RESPONSE\" not in cur_ht:\n commander.device_stats[\"mast\"] = cur_ht\n fail_ctr = 0\n else:\n fail_ctr += 1\n continue\n if int(cur_ht) >= target_mm:\n done = True\n else:\n if (target_mm - int(cur_ht)) < 100:\n raise_mast(commander, \"25\")\n time.sleep(0.1)\n stop_raise_mast(commander)\n # input was within acceptable parameters, begin lower mast\n else:\n travel_mm = cur_mm - target_mm\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Lowering mast for {} mm\".format(travel_mm)))\n if travel_mm < 100:\n mast_speed = \"25\"\n else:\n mast_speed = \"100\"\n lower_mast(commander, mast_speed)\n done = False\n fail_ctr = 0\n while not done and fail_ctr < 3:\n cur_ht = check_zippermast(commander)\n if \"NO RESPONSE\" not in cur_ht:\n commander.device_stats[\"mast\"] = cur_ht\n fail_ctr = 0\n else:\n fail_ctr += 1\n if float(cur_ht) <= target_mm:\n done = True\n else:\n if (int(cur_ht) - target_mm) < 100:\n lower_mast(commander, \"25\")\n time.sleep(0.25)\n stop_lower_mast(commander)\n commander.relay_off(\"zipmast_relay\")\n return \"{}\".format(int(commander.device_stats[\"mast\"]) - MAST_MIN)\n\n\ndef raise_mast(commander, speed):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'raise_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"rmt\")\n zip_mast_pcmov(commander, \"up\", speed)\n return \"MAST_UP\"\n\n\ndef stop_raise_mast(commander):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'raise_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"srmt\")\n # sleep appropriate amount of time for boot\n response = \"NO RESPONSE\"\n second_check = \"\"\n fail_ctr = 0\n while not in_range(response, second_check, 10) and fail_ctr < 5:\n response = zip_mast_pcmov(commander, \"up\", \"0\")\n if response is not \"NO RESPONSE\":\n commander.device_stats[\"mast\"] = response\n time.sleep(0.1)\n second_check = check_zippermast(commander)\n else:\n fail_ctr += 1\n response = zip_mast_pcmov(commander, \"up\", \"0\")\n return \"MAST_UP\"\n\n\ndef in_range(response, second_check, ranger):\n if response is \"NO RESPONSE\" or second_check is \"NO RESPONSE\" or response == \"\" or second_check == \"\":\n return False\n else:\n ht_one = int(response)\n ht_two = int(second_check)\n if (ht_one - ranger) <= ht_two <= (ht_one + ranger):\n return True\n return False\n\n\ndef lower_mast(commander, speed):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'lower_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"lmt\")\n # send signal to zippermast relay - circuit closed\n zip_mast_pcmov(commander, \"down\", speed)\n return \"MAST_DOWN\"\n\n\ndef stop_lower_mast(commander):\n if commander.device != \"zipbox\":\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Tried to execute 'lower_mast' from outside zippermast box. Attempting to forward.\"))\n return commander.forward_command(commander.device_ip[\"zipbox\"], commander.server_port, \"slmt\")\n response = \"NO RESPONSE\"\n second_check = \"\"\n fail_ctr = 0\n while not in_range(response, second_check, 10) and fail_ctr < 5:\n response = zip_mast_pcmov(commander, \"down\", \"0\")\n if response is not \"NO RESPONSE\":\n commander.device_stats[\"mast\"] = response\n time.sleep(0.1)\n second_check = check_zippermast(commander)\n else:\n fail_ctr += 1\n return \"MAST_DOWN\"\n\n\ndef check_zippermast(commander):\n was_on = True\n if commander.relay_stats[\"zipmast_relay\"] != \"1\":\n was_on = False\n commander.relay_on(\"zipmast_relay\")\n time.sleep(.5)\n commander.relay_on(\"zipmast_wake\")\n time.sleep(.5)\n commander.relay_off(\"zipmast_wake\")\n\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Attempting to query zippermast for height.\"))\n mast_response = \"NO RESPONSE\"\n # transmit pcmov_down to zippermast\n try:\n modem = serial.Serial(port=commander.physical_ports['zm_port'], baudrate=38400, rtscts=True, timeout=1.0)\n commander.debug_logger(utilities.logger(NAME, \"{}\".format(modem)))\n modem.flushInput()\n modem.flushOutput()\n mast_response = modem.readline()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Zippermast response: {}\".format(mast_response)))\n modem.flushInput()\n modem.flushOutput()\n modem.close()\n except:\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Failed to write command to zippermast\"))\n\n if not was_on:\n commander.relay_off(\"zipmast_relay\")\n\n if mast_response is not \"\" and \"$PPSST\" in mast_response:\n mast_resps = mast_response.split(\",\")\n if mast_resps > 3:\n commander.debug_logger(utilities.logger(NAME, \"{}\".format(mast_resps)))\n height_mm = mast_resps[2]\n return height_mm\n return \"NO RESPONSE\"\n\n\ndef zip_mast_pcmov(commander, direction, speed):\n directions = {\"down\": \"0\",\n \"up\": \"1\"}\n # generate PCMOV message\n zm_pcmov = \"PCMOV\"\n zm_id = \"1\" # value unknown\n zm_dir = directions[direction]\n zm_speed = speed # 0-100, equiv. to mm/sec\n zm_delay = \"0\" # time delay in seconds\n pcmov = \"$\" + \",\".join([zm_pcmov, zm_id, zm_dir, zm_speed, zm_delay])\n crc = hex(crc16xmodem(pcmov[1:]))[2:]\n pcmov = pcmov + \"*{}\\r\\n\".format(crc)\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Generated PCMOV instruction: {}\".format(pcmov)))\n mast_response = \"NO RESPONSE\"\n # transmit pcmov_down to zippermast\n try:\n modem = serial.Serial(port=commander.physical_ports['zm_port'], baudrate=38400, rtscts=True, timeout=1.0)\n commander.debug_logger(utilities.logger(NAME, \"{}\".format(modem)))\n # for i in range(15):\n modem.flushInput()\n modem.flushOutput()\n modem.write(pcmov)\n mast_response = modem.readline()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Zippermast response: {}\".format(mast_response)))\n modem.flushInput()\n modem.flushOutput()\n modem.close()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Wrote message to port {}\".format(commander.zm_port)))\n except:\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Failed to write command to zippermast\"))\n if mast_response is not \"\" and \"$PPSST\" in mast_response:\n mast_resps = mast_response.split(\",\")\n if mast_resps > 3:\n height_mm = mast_resps[2]\n return height_mm\n return \"NO RESPONSE\"\n\n\ndef zippermast_on(commander, zm_port):\n mast_response = \"\"\n try:\n modem = serial.Serial(port=zm_port, baudrate=38400, rtscts=True, timeout=.5)\n modem.flushInput()\n modem.flushOutput()\n time.sleep(.5)\n mast_response = modem.readline()\n mast_response += modem.readline()\n mast_response += modem.readline()\n modem.flushInput()\n modem.flushOutput()\n modem.close()\n commander.debug_logger(utilities.logger(NAME, \"[STATUS] Read from zippermast: '{}'\".format(mast_response)))\n except:\n commander.debug_logger(utilities.logger(NAME, \"[ERROR] Failed to read from zippermast\"))\n if \"$PPSST\" in mast_response:\n return True\n return\n","sub_path":"arduino/archive/RESILIENTVIPER_v2/zippermast.py","file_name":"zippermast.py","file_ext":"py","file_size_in_byte":10416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"533640212","text":"# -*- coding:utf-8 -*- \n__author__ = 'John 2018/3/5 15:19'\n\n\"\"\"\n对于一个给定的 source 字符串和一个 target 字符串,\n你应该在 source 字符串中找出 target 字符串出现的第一个位置(从0开始)。如果不存在,则返回 -1。\n样例\n如果 source = \"source\" 和 target = \"target\",返回 -1。\n\n如果 source = \"abcdabcdefg\" 和 target = \"bcd\",返回 1。\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param: source: source string to be scanned.\n @param: target: target string containing the sequence of characters to match\n @return: a index to the first occurrence of target in source, or -1 if target is not part of source.\n \"\"\"\n\n def strStr(self, source, target):\n # write your code here\n if target == '':\n return 0\n else:\n if target in source:\n for n, i in enumerate(source):\n if i == target[0]:\n return n\n else:\n return -1\n\n\nS = Solution()\n","sub_path":"lintcode/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"132215249","text":"#!/usr/bin/env pytest\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Test WEBP driver\n# Author: Even Rouault, \n#\n###############################################################################\n# Copyright (c) 2011-2013, Even Rouault \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nfrom osgeo import gdal\n\n\nimport gdaltest\nimport pytest\n\n###############################################################################\n# Test if WEBP driver is present\n\n\ndef test_webp_1():\n\n gdaltest.webp_drv = gdal.GetDriverByName('WEBP')\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n \n###############################################################################\n# Open() test\n\n\ndef test_webp_2():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n ds = gdal.Open('data/rgbsmall.webp')\n cs = ds.GetRasterBand(1).Checksum()\n assert cs == 21464 or cs == 21450 or cs == 21459, \\\n 'did not get expected checksum on band 1'\n\n###############################################################################\n# CreateCopy() test\n\n\ndef test_webp_3():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n src_ds = gdal.Open('data/rgbsmall.tif')\n out_ds = gdaltest.webp_drv.CreateCopy('/vsimem/webp_3.webp', src_ds, options=['QUALITY=80'])\n src_ds = None\n cs1 = out_ds.GetRasterBand(1).Checksum()\n out_ds = None\n gdal.Unlink('/vsimem/webp_3.webp')\n gdal.Unlink('/vsimem/webp_3.webp.aux.xml')\n\n # 21502 is for libwebp 0.3.0\n assert cs1 == 21464 or cs1 == 21502 or cs1 == 21695 or cs1 == 21700, \\\n 'did not get expected checksum on band 1'\n\n###############################################################################\n# CreateCopy() on RGBA\n\n\ndef test_webp_4():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n md = gdaltest.webp_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LOSSLESS') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('../gcore/data/stefan_full_rgba.tif')\n out_ds = gdaltest.webp_drv.CreateCopy('/vsimem/webp_4.webp', src_ds)\n src_ds = None\n cs1 = out_ds.GetRasterBand(1).Checksum()\n cs4 = out_ds.GetRasterBand(4).Checksum()\n out_ds = None\n gdal.Unlink('/vsimem/webp_4.webp')\n\n # 22849 is for libwebp 0.3.0\n assert cs1 in (22001, 22849, 34422, 36652, 36658, 45319), \\\n 'did not get expected checksum on band 1'\n\n assert cs4 == 10807, 'did not get expected checksum on band 4'\n\n###############################################################################\n# CreateCopy() on RGBA with lossless compression\n\n\ndef test_webp_5():\n\n if gdaltest.webp_drv is None:\n pytest.skip()\n\n md = gdaltest.webp_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LOSSLESS') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('../gcore/data/stefan_full_rgba.tif')\n out_ds = gdaltest.webp_drv.CreateCopy('/vsimem/webp_5.webp', src_ds, options=['LOSSLESS=YES'])\n src_ds = None\n cs1 = out_ds.GetRasterBand(1).Checksum()\n cs4 = out_ds.GetRasterBand(4).Checksum()\n out_ds = None\n gdal.Unlink('/vsimem/webp_5.webp')\n\n assert cs1 == 12603 or cs1 == 18536 or cs1 == 14800, \\\n 'did not get expected checksum on band 1'\n\n assert cs4 == 10807, 'did not get expected checksum on band 4'\n\n\n\n","sub_path":"autotest/gdrivers/webp.py","file_name":"webp.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"460720074","text":"#!/usr/bin/env python\n\"\"\"RF2_reducer.py\"\"\"\n\nfrom operator import itemgetter\nimport sys\n#Reducer para el RF2 del Taller 1\n\n\n# input comes from STDIN\n# Input follows the format: \n# number_of_month tab sum_of_total_costs tab number_of_trips\n# Output format\n# Same\n\ntotals = {}\n\nfor line in sys.stdin:\n # remove leading and trailing whitespace\n line = line.strip()\n\n # parse the input we got from mapper.py\n key, count = line.split('\\t')\n\n #Updates Structures\n if(key in totals):\n totals[key] = totals[key] + int(count) \n else:\n totals[key] = int(count)\n \n\n\nfor key in totals.keys():\n print('%s\\t%s' % (key, totals[key]))\n\n","sub_path":"Taller 1/MapReduceScripts/splitter_tester/splitter_reducer.py","file_name":"splitter_reducer.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"5131852","text":"d, n = map(int, input().split())\ndef division(x):\n if x % 100 != 0:\n return 0\n else:\n return division(x//100) +1\ncount= 0\nfor i in range(1, 1010001):\n if division(i) == d:\n count +=1\n if count == n:\n print(i)\n break","sub_path":"Python_codes/p03324/s424594570.py","file_name":"s424594570.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"638630555","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CNN(nn.Module):\n def __init__(self, opt):\n super(CNN, self).__init__()\n\n self.MODEL = opt[\"MODEL\"]\n self.BATCH_SIZE = opt[\"BATCH_SIZE\"]\n self.MAX_SENT_LEN = opt[\"MAX_SENT_LEN\"]\n self.WORD_DIM = opt[\"WORD_DIM\"]\n self.VOCAB_SIZE = opt[\"VOCAB_SIZE\"]\n self.CLASS_SIZE = opt[\"CLASS_SIZE\"]\n self.FILTERS = opt[\"FILTERS\"]\n self.FILTER_NUM = opt[\"FILTER_NUM\"]\n self.DROPOUT_PROB = opt[\"DROPOUT_PROB\"]\n self.IN_CHANNEL = 1\n\n assert (len(self.FILTERS) == len(self.FILTER_NUM))\n\n # one for UNK and one for zero padding\n self.embedding = nn.Embedding(self.VOCAB_SIZE + 2, self.WORD_DIM, padding_idx=self.VOCAB_SIZE + 1)\n if self.MODEL == \"static\" or self.MODEL == \"non-static\" or self.MODEL == \"multichannel\":\n self.WV_MATRIX = opt[\"WV_MATRIX\"]\n self.embedding.weight.data.copy_(torch.from_numpy(self.WV_MATRIX))\n if self.MODEL == \"static\":\n self.embedding.weight.requires_grad = False\n elif self.MODEL == \"multichannel\":\n self.embedding2 = nn.Embedding(self.VOCAB_SIZE + 2, self.WORD_DIM, padding_idx=self.VOCAB_SIZE + 1)\n self.embedding2.weight.data.copy_(torch.from_numpy(self.WV_MATRIX))\n self.embedding2.weight.requires_grad = False\n self.IN_CHANNEL = 2\n\n for i in range(len(self.FILTERS)):\n conv = nn.Conv1d(self.IN_CHANNEL, self.FILTER_NUM[i], self.WORD_DIM * self.FILTERS[i], stride=self.WORD_DIM)\n setattr(self, 'conv_%d'%i, conv)\n\n self.fc = nn.Linear(sum(self.FILTER_NUM), self.CLASS_SIZE)\n\n def get_conv(self, i):\n return getattr(self, 'conv_%d'%i)\n\n def forward(self, inp):\n x = self.embedding(inp).view(-1, 1, self.WORD_DIM * self.MAX_SENT_LEN)\n if self.MODEL == \"multichannel\":\n x2 = self.embedding2(inp).view(-1, 1, self.WORD_DIM * self.MAX_SENT_LEN)\n x = torch.cat((x, x2), 1)\n\n conv_results = [\n F.max_pool1d(F.relu(self.get_conv(i)(x)), self.MAX_SENT_LEN - self.FILTERS[i] + 1)\n .view(-1, self.FILTER_NUM[i])\n for i in range(len(self.FILTERS))]\n\n x = torch.cat(conv_results, 1)\n x = F.dropout(x, p=self.DROPOUT_PROB, training=self.training)\n x = self.fc(x)\n return x\n\n\n\n\nclass CNN1(nn.Module):\n \n def __init__(self, opt):\n super(CNN1,self).__init__()\n self.opt = opt\n \n V = opt.embed_num\n D = opt.embed_dim\n C = opt.class_num\n Ci = 1\n Co = opt.kernel_num\n Ks = opt.kernel_sizes\n\n self.embed = nn.Embedding(V, D)\n #self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]\n self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])\n '''\n self.conv13 = nn.Conv2d(Ci, Co, (3, D))\n self.conv14 = nn.Conv2d(Ci, Co, (4, D))\n self.conv15 = nn.Conv2d(Ci, Co, (5, D))\n '''\n self.dropout = nn.Dropout(opt.dropout)\n self.fc1 = nn.Linear(len(Ks)*Co, C)\n\n def conv_and_pool(self, x, conv):\n x = F.relu(conv(x)).squeeze(3) #(N,Co,W)\n x = F.max_pool1d(x, x.size(2)).squeeze(2)\n return x\n\n\n def forward(self, x):\n x = self.embed(x) # (N,W,D)\n \n if self.args.static:\n x = Variable(x)\n\n x = x.unsqueeze(1) # (N,Ci,W,D)\n\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)\n\n\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)\n\n x = torch.cat(x, 1)\n\n '''\n x1 = self.conv_and_pool(x,self.conv13) #(N,Co)\n x2 = self.conv_and_pool(x,self.conv14) #(N,Co)\n x3 = self.conv_and_pool(x,self.conv15) #(N,Co)\n x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)\n '''\n x = self.dropout(x) # (N,len(Ks)*Co)\n logit = self.fc1(x) # (N,C)\n return logit\n\n","sub_path":"models/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"138211632","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# ---------------------------------------------------------------------\n# Copyright (c) 2012 Michael Hull.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ----------------------------------------------------------------------\n\nfrom morphforge.core import is_iterable\nfrom morphforge.units import qty\nfrom morphforge.simulation.base import SimulationResult\nfrom mhlibs.quantities_plot import QuantitiesFigure\nfrom morphforge.simulationanalysis.tagviewer.plotspecs import TagPlot\nfrom morphforge.traces import TraceFixedDT\nfrom morphforge.traces import TraceVariableDT\nfrom morphforge.traces import TracePiecewise\nfrom morphforge.traces.eventset import EventSet\nimport morphforge.units as units\n\n# pylint: disable=W0108\n# (Suppress warning about 'unnessesary lambda functions')\n\n\nfrom decimal import Decimal\n\nclass DefaultTagPlots(object):\n Voltage = TagPlot(\"Voltage\", ylabel='Voltage', yrange=(-80*units.mV, 50*units.mV), yunit=units.mV )\n CurrentDensity = TagPlot(\"CurrentDensity\", ylabel='Current\\nDensity', yunit=units.picoamp / units.um2 )\n Current = TagPlot(\"Current\", ylabel='Current', yunit=units.picoamp)\n Conductance = TagPlot(\"Conductance\", ylabel=\"Conductance\", yunit=units.pS)\n ConductanceDensity = TagPlot(\"ConductanceDensity\", ylabel=\"ConductanceDensity\", yunit=units.mS / units.cm2 )\n StateVariable = TagPlot(\"StateVariable\", ylabel=\"StateVariable\")\n StateVariableTau = TagPlot(\"StateTimeConstant\", yunit=units.millisecond, ylabel=\"Time Constant\")\n StateVariableInf = TagPlot(\"StateSteadyState\", ylabel=\"Steady State\")\n Event = TagPlot(\"Event\", ylabel=\"Events\")\n\n\n\n\n\nclass TagViewer(object):\n\n MPL_AUTO_SHOW = True\n\n _default_plot_specs = (\n DefaultTagPlots.Voltage,\n DefaultTagPlots.CurrentDensity,\n DefaultTagPlots.Current,\n DefaultTagPlots.Conductance,\n DefaultTagPlots.ConductanceDensity,\n DefaultTagPlots.StateVariable,\n DefaultTagPlots.StateVariableTau,\n DefaultTagPlots.StateVariableInf,\n DefaultTagPlots.Event,\n )\n\n _default_fig_kwargs = {'figsize': (12, 8) }\n\n _options_show_xlabel = ('only-once','all', False)\n _options_show_xticklabels=('only-once','all', False)\n _options_show_xticklabels_with_units=(True,False)\n _options_show_xaxis_position = ('bottom','top')\n\n def __init__(\n self,\n srcs,\n plots=None,\n additional_plots=None,\n figtitle=None,\n fig_kwargs=None,\n show=True,\n linkage=None,\n timerange=None,\n mpl_tight_bounds=False,\n decimate_points=False,\n\n share_x_labels=True,\n\n nxticks=4,\n show_xlabel='only-once',\n show_xticklabels='only-once',\n show_xticklabels_with_units=True,\n show_xaxis_position='bottom',\n xticklabel_quantisation = False,\n xticks=None,\n xlabel='Time'\n ):\n\n self.xlabel = xlabel\n\n if fig_kwargs is None:\n fig_kwargs = self._default_fig_kwargs\n\n self.linkage = linkage\n self.decimate_points = decimate_points\n\n\n if not is_iterable(srcs):\n srcs = [srcs]\n\n # For each type of input (in 'srcs'); this should return a list of traces:\n self.all_trace_objs = []\n self.all_event_set_objs = []\n trace_extractors = {\n SimulationResult: lambda obj: (self.all_trace_objs.extend(obj.traces),self.all_event_set_objs.extend(obj.evsets)),\n TraceFixedDT: lambda obj: self.all_trace_objs.append(obj),\n TraceVariableDT: lambda obj: self.all_trace_objs.append(obj),\n TracePiecewise: lambda obj: self.all_trace_objs.append(obj),\n EventSet: lambda obj: self.all_event_set_objs.append(obj)\n }\n\n for obj in srcs:\n tr_extractor = trace_extractors[type(obj)]\n tr_extractor(obj)\n\n # Use the new PlotSpec architecture:\n # Filter out which plots are actually going to display something,\n # and filter out the rest:\n plots = plots if plots is not None else TagViewer._default_plot_specs\n\n if additional_plots:\n plots = tuple(list(plots) + list(additional_plots))\n\n self.plot_specs = [plotspec for plotspec in plots if\n [tr for tr in self.all_trace_objs if plotspec.addtrace_predicate(tr)] or \\\n [evset for evset in self.all_event_set_objs if plotspec.addeventset_predicate(evset)] \\\n ]\n\n\n self.fig_kwargs = fig_kwargs\n self.figtitle = figtitle\n self.mpl_tight_bounds = mpl_tight_bounds\n\n self.timerange = timerange\n #self.share_x_labels = share_x_labels\n self.nxticks = nxticks\n\n\n # X-axis configuration:\n self.show_xlabel = show_xlabel\n self.show_xticklabels = show_xticklabels\n self.show_xticklabels_with_units = show_xticklabels_with_units\n self.show_xaxis_position = show_xaxis_position\n #self.xticks_as_ints = xticks_as_ints\n self.xticklabel_quantisation = xticklabel_quantisation\n self.xticks=xticks\n assert self.show_xlabel in self._options_show_xlabel, 'Invalid'\n assert self.show_xticklabels in self._options_show_xticklabels, 'Invalid: %s' % show_xticklabels\n assert self.show_xticklabels_with_units in self._options_show_xticklabels_with_units\n assert self.show_xaxis_position in self._options_show_xaxis_position\n if is_iterable( self.xticks ) and all( [isinstance(xtick, (int, float)) for xtick in self.xticks]):\n self.xticks = [ xtick*units.ms for xtick in self.xticks]\n assert self.xticks is None or isinstance(self.xticks, int) or ( is_iterable(self.xticks) and [ qty(xtick) for xtick in self.xticks] )\n\n\n self.fig = None\n self.subaxes = []\n self.create_figure()\n\n\n if TagViewer.MPL_AUTO_SHOW and show:\n import pylab\n pylab.show()\n\n def create_figure(self):\n self.fig = QuantitiesFigure(**self.fig_kwargs)\n\n # Add a title to the plot:\n if self.figtitle:\n self.fig.suptitle(self.figtitle)\n\n # Work out what traces are on what graphs:\n plotspec_to_traces = dict([(plot_spec, [tr for tr in self.all_trace_objs if plot_spec.addtrace_predicate(tr)]) for plot_spec in self.plot_specs ])\n if self.linkage:\n self.linkage.process(plotspec_to_traces)\n\n n_plots = len(self.plot_specs)\n\n\n import matplotlib.gridspec as gridspec\n height_ratios = [ps.height_ratio for ps in self.plot_specs]\n gs = list(gridspec.GridSpec(n_plots, 1, height_ratios=height_ratios,) )\n\n ## Lets share a commonn x-axis:\n #axes0 = self.fig.add_axes( gs[0].get_position(self.fig) ) \n #axesoneplus = [ self.fig.add_axes( ss.get_position(self.fig), sharex=axes0 ) for ss in gs[1:]]\n #axes = [axes0] + axesoneplus\n\n axes = [ self.fig.add_axes( ss.get_position(self.fig) ) for ss in gs]\n\n for (i, (plot_spec,ax)) in enumerate(zip(self.plot_specs,axes)):\n\n # Create the axis:\n #ax = self.fig.add_subplot(n_plots, 1, i + 1)\n ax.set_xunit(units.millisecond)\n ax.set_xmargin(0.05)\n ax.set_ymargin(0.05)\n\n ax.set_xaxis_maxnlocator(self.nxticks)\n\n # Leave the plotting to the tag-plot object\n plot_spec.plot( ax=ax,\n all_traces=self.all_trace_objs,\n all_eventsets=self.all_event_set_objs,\n time_range=self.timerange,\n linkage=self.linkage,\n decimate_points=self.decimate_points,\n show_xlabel = self.show_xlabel,\n show_xticklabels = self.show_xticklabels,\n show_xticklabels_with_units = self.show_xticklabels_with_units,\n show_xaxis_position = self.show_xaxis_position,\n xticklabel_quantisation=self.xticklabel_quantisation,\n is_top_plot = (i==0),\n is_bottom_plot = (i==n_plots-1),\n xticks = self.xticks,\n xlabel=self.xlabel,\n\n )\n\n\n # Save the Axis:\n self.subaxes.append(ax)\n\n if self.mpl_tight_bounds:\n import pylab\n try:\n pylab.tight_layout()\n except AttributeError:\n pass # This is version specfic\n except ValueError:\n pass # Top can't be less than bottom\n\n\n","sub_path":"src/morphforge/simulationanalysis/tagviewer/tagviewer.py","file_name":"tagviewer.py","file_ext":"py","file_size_in_byte":10091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"10771899","text":"# -*- coding: utf-8 -*-\nimport pandas as pd \nimport numpy as np\nimport math\nimport os\n\nstop_word_list = [\"a\",\"able\",\"about\",\"across\",\"after\",\"all\",\"almost\",\"also\",\"am\",\"among\",\"an\",\"and\",\"any\",\"are\",\"as\",\"at\",\"be\",\"because\",\"been\",\"but\",\"by\",\"can\",\"cannot\",\"could\",\"dear\",\"did\",\"do\",\"does\",\"either\",\"else\",\"ever\",\"every\",\"for\",\"from\",\"get\",\"got\",\"had\",\"has\",\"have\",\"he\",\"her\",\"hers\",\"him\",\"his\",\"how\",\"however\",\"i\",\"if\",\"in\",\"into\",\"is\",\"it\",\"its\",\"just\",\"least\",\"let\",\"like\",\"likely\",\"may\",\"me\",\"might\",\"most\",\"must\",\"my\",\"neither\",\"no\",\"nor\",\"not\",\"of\",\"off\",\"often\",\"on\",\"only\",\"or\",\"other\",\"our\",\"own\",\"rather\",\"said\",\"say\",\"says\",\"she\",\"should\",\"since\",\"so\",\"some\",\"than\",\"that\",\"the\",\"their\",\"them\",\"then\",\"there\",\"these\",\"they\",\"this\",\"tis\",\"to\",\"too\",\"twas\",\"us\",\"wants\",\"was\",\"we\",\"were\",\"what\",\"when\",\"where\",\"which\",\"while\",\"who\",\"whom\",\"why\",\"will\",\"with\",\"would\",\"yet\",\"you\",\"your\"]\n\n\"\"\"\n texturizer.simple: Basic text feature calculation\n Calculate statistics such as the average length of words, max word length\n proportion of non stop-words.\n\n Stop-word list taken from: https://www.textfixer.com/tutorials/common-english-words.txt \n\n\"\"\"\n\n########################################################################################\ndef add_text_summary_features(df, columns):\n \"\"\"\n Given a pandas dataframe and a set of column names.\n calculate the simple text summary features and add them.\n \"\"\"\n rez = df.copy()\n for col in columns:\n rez = add_text_features(rez, col)\n return rez\n\n########################################################################################\ndef add_text_features(df, col):\n \"\"\"\n Given a pandas dataframe and a column name.\n calculate the simple text summary features and add them.\n \"\"\"\n col_len = col + \"_len\"\n df[col_len] = df[col].apply(null_tolerant_len)\n def cal_features(x, col):\n if x[col]!=x[col]:\n word_count = 0 \n avg_word_len = 0 \n content_wd = 0 \n capital_d = 0\n else:\n chars = null_tolerant_len(x[col])\n capitals = sum(1 for c in x[col] if c.isupper())\n capital_d = capitals/chars\n word_array = x[col].lower().split()\n non_stop_words = list(set(word_array) - set(stop_word_list))\n word_count = len(word_array)\n word_lengths = list(map(len, word_array))\n avg_word_len = sum(word_lengths)/word_count\n content_wd = len(non_stop_words)/len(word_array)\n return word_count, avg_word_len, content_wd, capital_d\n\n df[[col+'_wc', col+'_avg_wl', col+'_cwd', col+'_caps']] = df.apply(cal_features, col=col, axis=1, result_type=\"expand\")\n\n return df\n\n########################################################################################\ndef null_tolerant_len(x):\n if x != x:\n return 0\n else:\n return len(x)\n\n \n########################################################################################\n","sub_path":"texturizer/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"557150311","text":"# -*- Mode: Python -*-\n#\n# General log emission class\n#\n\nimport array, inspect, os, sys, traceback, __main__\nfrom time import (strftime, gmtime)\n\ntry:\n from thread import get_ident\nexcept:\n from _thread import get_ident\n\n#\n# Logger -- general routines for submitting information to a log - syslog or \n# file.\n#\nclass Logger:\n\n kLevelMap = { 'fatal': 0,\n 'error': 1,\n 'warning': 2,\n 'info': 3,\n 'verbose': 4,\n 'debug': 5 }\n\n #\n # Define entry and exit tags used by LogProcEntry and LogProcExit\n #\n kEntryTag = 'BEG'\n kExitTag = 'END'\n\n #\n # Default output to nothing.\n #\n def __init__( self ):\n for name, level in list(self.kLevelMap.items()):\n setattr(self, 'k' + name.capitalize(), level)\n\n self._out = self._flush = self._close = self._nothing\n self.fd = None # File name or descriptor logging to\n self._showTime = 1 # If TRUE, print timestamp\n self._showThread = 0 # If TRUE, print thread ID\n self._showFile = 1 # If TRUE, print defining file name\n self._showFunc = 1 # If TRUE, print function name\n self._showSelf = 1 # If TRUE, print first method arg\n self.maxLength = None\n self.setLevel(self.kError)\n\n #\n # If logging to a file, close it and reopen it. Used for rolling log files.\n #\n def reset(self):\n if isinstance(self.fd, str):\n self.useFile(self.fd, self._showTime, self._showThread, self._showFile)\n\n def _log(self, level, logMsg):\n self._out(logMsg)\n self._flush()\n\n def _fatal(self, *args): self._log(self.kFatal, self._formatOutput(self.kFatal, args))\n def _error(self, *args): self._log(self.kError, self._formatOutput(self.kError, args))\n def _warning(self, *args): self._log(self.kWarning, self._formatOutput(self.kWarning, args))\n def _info(self, *args): self._log(self.kInfo, self._formatOutput(self.kInfo, args))\n def _verbose(self, *args): self._log(self.kVerbose, self._formatOutput(self.kVerbose, args))\n def _debug(self, *args): self._log(self.kDebug, self._formatOutput(self.kDebug, args))\n def _begin(self, *args): self._log(self.kVerbose, self._formatOutput(self.kVerbose, args, self.kEntryTag))\n def _end(self, *args): self._log(self.kVerbose, self._formatOutput(self.kVerbose, args, self.kExitTag))\n\n #\n # Issues a log message at the given severity level.\n #\n def log(self, level, *args):\n self._log(level, self._formatOutput(level, args))\n\n #\n # Does not log anything\n #\n def _nothing(self, *ignored):\n return\n\n #\n # Set the log threshold level. For faster processing, update methods to\n # either emit something or return silently.\n #\n def setLevel(self, maxLevel):\n if isinstance(maxLevel, str):\n tmp = self.kLevelMap.get(maxLevel.lower())\n if tmp is None:\n raise ValueError(maxLevel)\n maxLevel = tmp\n \n if maxLevel > self.kDebug:\n maxLevel = self.kDebug\n elif maxLevel < self.kFatal:\n maxLevel = self.kFatal\n\n #\n # Turn on/off levels\n #\n procs = self.__class__.__dict__\n for name, level in list(self.kLevelMap.items()):\n if level <= maxLevel:\n setattr(self, name, getattr(self, '_' + name))\n else:\n setattr(self, name, self._nothing)\n\n #\n # Special handling for entry/exit methods.\n #\n if maxLevel >= self.kVerbose:\n self.begin = self._begin\n self.end = self._end\n else:\n self.begin = self.end = self._nothing\n\n #\n # Flush any pending output and revert to doing nothing\n #\n def close(self):\n self._flush()\n self._close()\n self._out = self._flush = self._close = self._nothing\n\n #\n # Setup to use the given file object for log writing\n #\n def useFile(self, fd, showTime = 1, showThread = 0, showFile = 1):\n self.close()\n self.fd = fd\n if isinstance(fd, str):\n fd = open(fd, 'w+')\n self._close = fd.close # Only close file desc. we open\n self._out = fd.write\n self._flush = fd.flush\n self._showTime = showTime\n self._showThread = showThread\n self._showFile = showFile\n\n #\n # Setup to use Python syslog module.\n #\n def useSyslog(self, ident, opts, facility, showTime = 0, showThread = 0, showFile = 1):\n import syslog\n self.close()\n syslog.openlog(ident, opts, facility)\n self._out = syslog.syslog\n self._close = syslog.closelog\n self._showTime = showTime\n self._showThread = showThread\n self._showFile = showFile\n\n #\n # Setup to use whatever is set in sys.stderr.\n #\n def useStdErr(self, showTime = 1, showThread = 0, showFile = 1):\n self.useFile(sys.stderr, showTime, showThread, showFile)\n\n #\n # Setup to use whatever is set in sys.stdout.\n #\n def useStdOut(self, showTime = 1, showThread = 0, showFile = 1):\n self.useFile(sys.stdout, showTime, showThread, showFile)\n\n #\n # Set flag that determines if timestamp is printed in output.\n #\n def showTime(self, showTime = 1):\n self._showTime = showTime\n \n #\n # Set flag that determines if thread ID is printed in output.\n #\n def showThread(self, showThread = 1):\n self._showThread = showThread\n\n #\n # Set flag that determines if filename that contains the log statement is\n # printed.\n #\n def showFile(self, showFile = 1):\n self._showFile = showFile\n\n #\n # set flag that determines if function name is printed in output.\n #\n def showFunction(self, showFunc = 1):\n self._showFunc = showFunc\n \n #\n # Set flag that determines if the first argument (self) of a method is\n # printed in Begin() output.\n #\n def showSelf(self, showSelf = 1):\n self._showSelf = showSelf\n\n #\n # Internal function that builds the string that is ultimately sent to the\n # current sink device.\n #\n def _formatOutput(self, level, args, tag = \"\"):\n\n # Try to get context information. Looking for the name of the file we\n # are in, the name of the function (with possible class name\n # prepended), and if we are formatting a Begin() call, a list of\n # argnames and values that describe what is being passed into the\n # function we are logging.\n #\n doBegin = tag == self.kEntryTag and len(args) == 0\n fileName, proc, bArgs = self._procInfo(doBegin)\n if len(bArgs) > 0:\n args = bArgs\n \n # Generate timestamp\n #\n bits = []\n if self._showTime:\n bits.append(strftime(\"%Y%m%d.%H%M%S\", gmtime()))\n\n #\n # Generate thread ID\n #\n if self._showThread:\n bits.append('#{} '.format(get_ident()))\n\n #\n # Print file name containing the log statement\n #\n if self._showFile:\n bits.append(fileName)\n\n #\n # Print the function name containing the log statement. May also have a\n # class name if this is a method.\n #\n if self._showFunc:\n bits.append(proc)\n\n #\n # Print BEG/END tag\n #\n if len(tag) > 0:\n bits.append(tag)\n\n #\n # Append each argument to message string\n #\n bits.extend([str(z) for z in args])\n return ' '.join(bits) + '\\n'\n\n #\n # Return the name of the class that defines the function being logged. We\n # walk the class hierarchy just like Python does in order to locate the\n # actual defining class.\n #\n def _definingClass(self, theClass, codeObj):\n classDict = theClass.__dict__\n name = codeObj.co_name\n if name in classDict:\n tmp = classDict[name]\n if tmp.__code__ == codeObj:\n return theClass.__name__\n for eachClass in theClass.__bases__:\n name = self._definingClass(eachClass, codeObj)\n if name != None:\n return name\n return None\n\n #\n # Returns a tuple containing information about the function being logged:\n # file name, caller name, argument list\n #\n def _procInfo(self, genArgs = 0):\n fileName = '__main__'\n procName = '?'\n args = []\n\n frame = inspect.currentframe()\n frame = frame.f_back # Get out of _procInfo\n if frame:\n frame = frame.f_back # Get out of _formatOutput\n if frame:\n frame = frame.f_back # Get ouf of _log\n\n if frame:\n\n #\n # Extract the code object that contains the call to our log method.\n #\n code = frame.f_code\n fileName = os.path.split(code.co_filename)[1]\n procName = code.co_name\n numArgs = code.co_argcount\n if numArgs > 0:\n \n #\n # Assume we will display first argument. If we determine that\n # we are logging a method, obey the setting for showSelf.\n #\n firstArg = 0\n \n #\n # Get first argument and see if it is an object (ala self)\n #\n frameLocals = frame.f_locals\n varNames = code.co_varnames\n obj = frameLocals[varNames[0]]\n if hasattr(obj, '__class__'):\n className = None\n for each in inspect.getmro(type(obj)):\n if each.__dict__.get(code.co_name):\n className = each.__name__\n break\n if className:\n procName = className + '.' + procName\n if not self._showSelf:\n firstArg = 1\n \n #\n # Create a list of argument names and their runtime values.\n # Only done if we are in a Begin() log method.\n #\n if genArgs:\n for each in varNames[firstArg : numArgs]:\n value = frameLocals[each]\n if isinstance(value, str):\n arg = each + ': ' + value\n else:\n arg = each + ': ' + repr(value)\n args.append(arg)\n each = None\n\n obj = frameLocals = None\n frame = code = None\n return (fileName, procName, args)\n\nclass Foo(object):\n def __init__(self):\n gLog.begin()\n gLog.end()\n \n def bar(self):\n gLog.begin()\n gLog.end()\n\ndef test():\n def a():\n gLog.begin()\n gLog.debug('this is a test')\n gLog.end()\n a()\n f = Foo()\n f.bar()\n\ndef DelLog():\n delattr(__main__.__builtins__, 'gLog')\n\n#\n# First time we are imported, install a global variable `gLog' for everyone to\n# see and use. By default, use stderr.\n#\nif not hasattr(__main__.__builtins__, 'gLog'):\n __main__.__builtins__.gLog = Logger()\n gLog.useStdErr()\n","sub_path":"Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":11357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"635532086","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Cpu, Core, Ram, Swap, Measure\nfrom django.db.models import Q\nimport cStringIO as StringIO\nimport psutil\nimport json\nfrom datetime import datetime\n\n\n# Create your views here.\n\ndef json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")\n\n\ndef generate_pc_data():\n cpu = psutil.cpu_percent(interval=3, percpu=True)\n swap = [param / (10 ** 6) for param in psutil.swap_memory()] # swap total, used, free in MB\n swap[3] *= 10 ** 6\n ram = [param / (10 ** 6) for param in psutil.virtual_memory()] # memory active and inactive (as occupied and free)\n ram[2] *= 10 ** 6\n disk = [param / (10 ** 6) for param in psutil.disk_usage('/')] # disk occupiance\n disk[-1] *= 10 ** 6\n return cpu, swap, ram, disk\n\n\ndef clear_first_entries(user):\n if len(Cpu.objects.filter(owner=user)) > 1000:\n Cpu.objects.filter(owner=user).first().delete()\n Cpu.objects.filter(~Q(owner=user)).delete() # Delete all data not belonging to logged user\n Ram.objects.filter(owner=user).first().delete()\n Ram.objects.filter(~Q(owner=user)).delete()\n Swap.objects.filter(owner=user).first().delete()\n Swap.objects.filter(~Q(owner=user)).delete()\n Measure.objects.filter(owner=user).first().delete()\n Measure.objects.filter(~Q(owner=user)).delete()\n\n\ndef record_data(user, cpu, ram, swap):\n if user.is_authenticated():\n\n clear_first_entries(user)\n\n own_cpu = Cpu()\n own_ram = Ram()\n own_swap = Swap()\n\n own_cpu.owner = user\n own_cpu.save()\n\n for c in cpu:\n own_core = Core()\n own_core.cpu_id = own_cpu\n own_core.core_load = c\n own_core.save()\n\n own_ram.owner = user\n own_ram.ram_load_free = ram[4]\n own_ram.ram_load_used = ram[3]\n\n own_swap.owner = user\n own_swap.swap_load_total = swap[0]\n own_swap.swap_load_used = swap[1]\n own_swap.swap_load_free = swap[2]\n\n own_ram.save()\n own_swap.save()\n\n measure = Measure()\n measure.user_cpu = own_cpu\n measure.user_ram = own_ram\n measure.user_swap = own_swap\n measure.owner = user\n\n measure.save()\n\n\ndef monitor_list(request):\n cpu, swap, ram, disk = generate_pc_data()\n json_response = {'cpu_parameters': cpu, 'swap_parameters': swap, 'ram_parameters': ram,\n 'disk_parameters': disk}\n\n record_data(request.user, cpu=cpu, swap=swap, ram=ram)\n if request.is_ajax():\n return HttpResponse(json.dumps({'user':json_response}))\n return render(request, 'monitors/hw.html', {'cpu_parameters': cpu, 'swap_parameters': swap, 'ram_parameters': ram,\n 'disk_parameters': disk})\n\n\ndef generate_json():\n cpu = psutil.cpu_percent(percpu=True, interval=3)\n cpu_dict = {}\n for i, core in enumerate(cpu):\n cpu_dict[\"core\" + str(i)] = core\n ram = psutil.virtual_memory()._asdict()\n swap = psutil.swap_memory()._asdict()\n disk = psutil.disk_usage('/')._asdict()\n data = {\"stats\": [{\"cpu_parameters\": cpu_dict}, {\"ram_parameters\": ram}, {\"swap_parameters\": swap},\n {\"disk_parameters\": disk}]}\n return json.dumps(data, indent=4, sort_keys=True)\n\n\ndef monitor_json(request):\n json_file = StringIO.StringIO()\n json_file.write(generate_json())\n response = HttpResponse(content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=json_stats'\n response.write(json_file.getvalue())\n return response\n\n\ndef clear_database(request):\n Cpu.objects.all().delete()\n Ram.objects.all().delete()\n Swap.objects.all().delete()\n Measure.objects.all().delete()\n Core.objects.all().delete()\n return render(request, 'monitors/hw.html')\n\n\ndef chart_list(request):\n measures_data = Measure.objects.filter(owner=request.user).order_by('measure_date')\n cpus_data = [Cpu.objects.get(pk=m.user_cpu.pk) for m in measures_data]\n ram_data = [Ram.objects.get(pk=m.user_ram.pk) for m in measures_data]\n swap_data = [Swap.objects.get(pk=m.user_swap.pk) for m in measures_data]\n\n ram_load_used_data = [r.ram_load_used for r in ram_data]\n ram_load_free_data = [r.ram_load_free for r in ram_data]\n\n swap_load_total_data = [s.swap_load_total for s in swap_data]\n swap_load_used_data = [s.swap_load_used for s in swap_data]\n swap_load_free_data = [s.swap_load_free for s in swap_data]\n\n measures_dates = [m.measure_date for m in measures_data]\n\n cores_data = []\n\n for cpu in cpus_data:\n temp_cores = []\n for core in Core.objects.filter(cpu_id=cpu.pk):\n temp_cores.append(core.core_load)\n cores_data.append(temp_cores)\n\n return render(request, 'monitors/charts.html',\n {'cores': json.dumps(cores_data),\n 'ram_free': json.dumps(ram_load_free_data),\n 'ram_used': json.dumps(ram_load_used_data),\n 'swap_total': json.dumps(swap_load_total_data),\n 'swap_used': json.dumps(swap_load_used_data),\n 'swap_free': json.dumps(swap_load_free_data),\n 'measures': json.dumps(measures_dates, default=json_serial, ensure_ascii=False)})\n","sub_path":"monitors/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"49534517","text":"from django.conf import settings\nimport os\nfrom datetime import datetime, date, time\nfrom lxml import etree, objectify\nimport uuid\nimport glob\nfrom zipfile import ZipFile\nimport datetime\nimport time\nimport dateutil.parser\nfrom decimal import Decimal\nfrom cost_cadastr import xmlfirload, xmllistcreate\nimport requests\nimport shutil\nfrom lxml import etree, objectify\n\ndef validateXML(xmlfile):\n \"\"\"\n проверка на соовтетствие XML-схеме\n \"\"\"\n try:\n xml_doc = etree.parse(xmlfile)\n except Exception as e:\n raise Exception(\"Ошибка парсинга XML-файла {0}\".format(xmlfile))\n else:\n return True\n xml_schema_filename = os.path.normcase(os.path.join(settings.STATICFILES_DIRS[0], \n 'scheme/ListForRating_v04/', 'ListForRating_v04.xsd '))\n xml_schema_doc = etree.parse(xml_schema_filename)\n xmlschema = etree.XMLSchema(xml_schema_doc)\n if xmlschema.validate(xml_doc):\n return True\n else:\n return False\n#--------------------------\ndef chekfiles(fileName, zuoptionslist, oksoptionslist, loadmifoption):\n \"\"\"\n проверка файлана условия соответствия критериям выбранных объектов\n \"\"\"\n try:\n xml_doc = etree.parse(fileName)\n except Exception as e:\n raise Exception(\"Ошибка парсинга XML-файла {0}\".format(fileName))\n else:\n objecttype = xml_doc.xpath('/ListForRating/ListInfo/ObjectsType/ObjectType')[0].text\n #если объект ОКС проверяем на вхождение в список указанных пользователем\n if objecttype in oksoptionslist:\n cadnums = []\n if objecttype == '002001002000':\n buildingNodes = xml_doc.xpath('/ListForRating/Objects/Buildings/Building')\n for building in buildingNodes:\n cadnums.append(building.get('CadastralNumber'))\n if loadmifoption:\n xmllistcreate.loadMifMid(os.path.normpath(os.path.dirname(fileName)), cadnums)\n elif objecttype == '002001004000':\n constrNodes = xml_doc.xpath('/ListForRating/Objects/Constructions/Construction')\n for constr in constrNodes:\n cadnums.append(constr.get('CadastralNumber'))\n if loadmifoption:\n xmllistcreate.loadMifMid(os.path.normpath(os.path.dirname(fileName)), cadnums)\n elif objecttype == '002001005000':\n unconstrNodes = xml_doc.xpath('/ListForRating/Objects/Uncompleteds/Uncompleted')\n for unconstr in unconstrNodes:\n cadnums.append(unconstr.get('CadastralNumber'))\n if loadmifoption:\n xmllistcreate.loadMifMid(os.path.normpath(os.path.dirname(fileName)), cadnums)\n elif objecttype == '002001003000':\n flatsNodes = xml_doc.xpath('/ListForRating/Objects/Flats/Flat')\n for flat in flatsNodes:\n cadnums.append(flat.get('CadastralNumber'))\n elif objecttype == '002001009000':\n carsNodes = xml_doc.xpath('/ListForRating/Objects/CarParkingSpaces/CarParkingSpace')\n for car in carsNodes:\n cadnums.append(car.get('CadastralNumber'))\n return len(cadnums), xml_doc.xpath('/ListForRating/ListInfo')[0].get('DateForm')\n #если объект ЗУ\n elif objecttype == '002001001000' and zuoptionslist:\n categories = xml_doc.xpath('//ListForRating/ListInfo/Categories/Category')\n chek_cat = False\n for item in categories:\n if item.text in zuoptionslist:\n chek_cat = True\n if not chek_cat:\n return False\n else: \n parcels = xml_doc.xpath('//ListForRating/Objects/Parcels/Parcel')\n count = int(xml_doc.xpath('//ListForRating/ListInfo/Quantity')[0].text)\n for item in parcels:\n item_category = item.xpath('./Category')[0].text\n if item_category not in zuoptionslist:\n item.getparent().remove(item)\n count -= 1\n if count > 0:\n xml_doc.xpath('//ListForRating/ListInfo/Quantity')[0].text = str(count)\n xml_doc.write(fileName, encoding='UTF-8')\n return count, xml_doc.xpath('/ListForRating/ListInfo')[0].get('DateForm')\n else:\n return False\n else:\n return False\n \n#------------------------------------\ndef convertList(filelist, zuoptionslist, oksoptionslist, loadmifoption):\n \"\"\"\n переформатирование перечня \n \"\"\"\n #распаковываем перечень\n try:\n xml_files_list = xmlfirload.exctractZip(filelist)\n except Exception as e:\n raise Exception(\"Ошибка распаковки архивного файла\")\n else:\n #создадим директорию для загрузки графики\n# if loadmifoption:\n# try:\n# normal_dir_path = os.path.normpath(os.path.dirname(xml_files_list[0]))\n# os.mkdir(os.path.normpath(normal_dir_path))\n# except:\n# raise Exception(\"Ошибка создания директории для загрузки MIF файлов\")\n \n totalobjectscount = 0\n datecreate = ''\n for item in xml_files_list:\n count = chekfiles(item, zuoptionslist, oksoptionslist, loadmifoption)\n if not count:\n os.remove(item)\n else:\n totalobjectscount += count[0]\n datecreate = count[1]\n out_dir = xmlfirload.createDir(settings.MEDIA_ROOT + '/cost_cadastr/temp')\n file_out = xmllistcreate.packToZIP2(out_dir, os.path.dirname(xml_files_list[0]))\n return totalobjectscount, file_out, datecreate\n \n\n","sub_path":"cost_cadastr/pskoload.py","file_name":"pskoload.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"81025200","text":"import copy\nimport datetime\nimport json\nimport logging\nimport os\nfrom operator import itemgetter\n\nfrom ourcup import basedir\nfrom ourcup.fixtures.country_codes import CountryCodes\n\nFIXTURES_JSON_FILE = os.path.join(basedir, 'ourcup', 'fixtures', 'data', 'world-cup-2019-matches.json')\n\nlogger = logging.getLogger(__name__)\n\n\nclass MatchPicker(object):\n\n def __init__(self):\n self._logger = logging.getLogger(__name__)\n self._json_file_path = FIXTURES_JSON_FILE\n self._fixtures = MatchPicker._process_matches(json.load(open(self._json_file_path)))\n self._logger.info(\"loading fixtures from \"+self._json_file_path)\n self._translator = CountryCodes()\n\n @staticmethod\n def _process_matches(raw_match_data):\n matches = []\n for m in raw_match_data:\n team1_alpha3 = m['home_team']['code']\n team2_alpha3 = m['away_team']['code']\n # skip matches that we don't know the teams of yet\n if (team1_alpha3 != 'TBD') and (team2_alpha3 != 'TBD'):\n match = {\n 'team1': team1_alpha3,\n 'team2': team2_alpha3,\n 'date': m['datetime'][0:10]\n }\n matches.append(match)\n return matches\n\n def by_population(self, country_pop_data):\n all_games = copy.deepcopy(self._fixtures)\n # change the score so US doesn't show up very much, commented out because USA isn't in it!\n # country_alpha3_to_pop_map['USA'] = 1\n country_alpha3_to_pop_map = {m['country']: m['population'] for m in country_pop_data}\n for game in all_games:\n logger.debug(\"{} vs. {}\".format(game['team1'], game['team2']))\n try:\n team1_pop = country_alpha3_to_pop_map[self._translator.getByFifaAlpha3(game['team1']).iso]\n except KeyError:\n logger.warning(\"Can't find population data for {}\".format(game['team1']))\n team1_pop = 0 # the country isn't our list of\n try:\n team2_pop = country_alpha3_to_pop_map[self._translator.getByFifaAlpha3(game['team2']).iso]\n except KeyError:\n logger.warning(\"Can't find population data for {}\".format(game['team2']))\n team2_pop = 0 # the country isn't our list of\n game['score'] = team1_pop + team2_pop\n\n prioritized_games = sorted(all_games, key=itemgetter('score'), reverse=True)\n for game in prioritized_games:\n game['team1Country'] = self._translator.getByFifaAlpha3(game['team1'])\n game['team2Country'] = self._translator.getByFifaAlpha3(game['team2'])\n game['date'] = datetime.datetime.strptime(game['date'], '%Y-%m-%d')\n return prioritized_games\n\n def participating_country_codes(self):\n team1_codes = set([game['team1'] for game in self._fixtures])\n team2_codes = set([game['team2'] for game in self._fixtures])\n combined = team1_codes.union(team2_codes)\n return combined\n","sub_path":"ourcup/fixtures/match_picker.py","file_name":"match_picker.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"71377752","text":"#!/usr/bin/env python3\n#you need this above to get ROS to execute the script - points to the python executable\n#use python 3 here due to the RL libraries being written in python 3\n\n'''\nThis script -----------\n\nAuthor: Charles (Chuck) Sullivan\nLast update: 9-13-2020\n'''\n\n\nfrom gym import spaces\nimport gym\nimport numpy as np\nimport time\nimport random\nfrom math import sqrt\nimport datetime\nimport os\nimport pprint\nimport sys\n\n#import necessary stable baselines TD3 libraries for learning purposes\nfrom stable_baselines import TD3\nfrom stable_baselines.td3.policies import MlpPolicy as Td3MlpPolicy\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EveryNTimesteps, BaseCallback\nfrom stable_baselines.common.evaluation import evaluate_policy\n#import ROS specific libraries and custom message types\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Empty\nfrom rl_robotics_framework.msg import sensor_processing\nfrom rl_robotics_framework.msg import gcode_packager\nfrom rl_robotics_framework.msg import apriltag_data\n\n\nmodel_dir = str(sys.argv[1])\nprint (model_dir)\n\npp = pprint.PrettyPrinter(indent = 4)\n\n#global variables\ntestString = \"\"\n\nxCommand = 0.\nyCommand = 0.\n#sensor readings\nxState_global = 0.\nyState_global = 0.\n#apriltag readings\nxPos_global = 0.\nyPos_global = 0.\n\nxZero = 0. #zero values for each episode\nyZero = 0.\n\naction_done = False\naction_done_flag = False\n\n#define constants\nNUM_STEPS_EPISODE = 24 #index at 0 - (desirednum - 1)\nTOTAL_STEPS = 8000\nTIME_PER_STEP = 1 #this could be variable depending on hardware\nrobotName = \"robot_1\"\nNOISE_CONSTANT = .03 #how do i update this inside the TD3.learn function\nSIGMA = .44\n#just going to put this at a rate large enough taht we liikkely wont get hardware hangups\n\n#here are the constants for the TD3 Model\nGAMMA = 0.99\nLEARNING_RATE = .05\nBUFFER_SIZE = 25000\nLEARNING_STARTS = 1000 #1024\nGRADIENT_STEPS = 3\nBATCH_SIZE = 64 #512\nTRAIN_FREQ = 64 #64\nTAU = .05\nPOLICY_DELAY = 25 #32\n# ACTION_NOISE = None | this is set later when you build the noise generator\nTARGET_POLICY_NOISE = 0.1\nTARGET_NOISE_CLIP = 0.25\nRANDOM_EXPLORATION = 0.0\nVERBOSE = 0\nTENSORBOARD_LOG = \"./logs/model_log\"\n_INIT_SETUP_MODEL = True\nPOLICY_KWARGS = None\nFULL_TENSORBOARD_LOG = 'tensorboadr_full'\nSEED = None\nN_CPU_TF_SESS = None\n\n#set up a log file for the printed output\nbaseDir = 'logs/terminal_output' + robotName + '/learining_run_TD3_'\n\nt = datetime.datetime.now()\nt = t.strftime(\"%m_%d_%y_%X/\")\ndirName = baseDir + t \nif not os.path.exists(dirName):\n\tos.makedirs(dirName)\n\tprint(dirName + \" created successfully\")\n\tdone = True\nelse:\n\tprint(dirName + \" exists\")\n\t\n\t\t\nfile = open(os.path.join(dirName, \"learn.txt\"), \"w\")\nfile.close()\n\n\n# define ROS publisher nodes\ncmd_pub = rospy.Publisher('/actuator_commands', gcode_packager, queue_size = 30)\ndirect_cmd_publisher = rospy.Publisher('/grbl_commands', String, queue_size = 30)\ngrbl_reset_pub = rospy.Publisher('/grbl_rst', Empty, queue_size=1)\n#test pub to update the subscribed node. for testing\ntestpub = rospy.Publisher('/testData', String, queue_size = 20)\n\n#define ROS subscriber callback methods\ndef robot_state_callback(data):\n\t#print(\"Updating State\")\n\tglobal xState_global\n\tglobal yState_global\n\txState_global = data.xSensor\n\tyState_global = data.ySensor\n\t#print(\"x sensor: {}\", format(xState))\n\t#print(\"y sensor: {}\", format(yState))\t\n\ndef gnd_truth_callback(data):\n\t#print(\"updating Ground Truth Data\")\n\tglobal xPos_global, yPos_global\n\txPos_global = data.x_pos_gnd*1000\n\tyPos_global = data.y_pos_gnd*1000\n\t#print(\"x position: {}\", format(xPosition))\n\t#print(\"y position: {}\", format(yPosition))\n\ndef action_done_callback(data):\n\tglobal action_done\n\taction_done = data.data\n\ndef testData_callback(data):\n\t#print(\"test data recieved\")\n\tglobal testString\n\ttestString = data.data + '\\n'\n\t\n\n\n#Define ROS subscriber nodes\ndef RL_subscribers():\n rospy.init_node('learning_node')\n rospy.Subscriber(\"/robot_state\", sensor_processing, robot_state_callback)\n rospy.Subscriber(\"/gnd_pos_truth\", apriltag_data, gnd_truth_callback)\n rospy.Subscriber(\"/action_done\", Bool, action_done_callback)\n rospy.Subscriber(\"/testData\", String, testData_callback)\n\n # skipping spin to see if the subscriber works without spinnign whle the thin is running\n #rospy.spin()\n\t# \n\t# \n#Define various functions used throughout script\t\n\ndef rewardCalculation(x_current, y_current, x_startstep, y_startstep):\n\t# xDist = x_current - x_startstep\n\tyDist = y_current - y_startstep\n\n\t# reward = yDist\n\n\tif yDist > .5:\n\t\t#movement positive\n\t\treward = 10*yDist\n\telif yDist <= .5 and yDist >=-.5:\n\t\t#movement doesnt change much?\n\t\t#this discourages the same movement\n\t\treward = -100\n\telse:\n\t\treward = yDist*2\n\n\treturn reward\n\ndef wait_for_action(is_homing):\n\tglobal action_done\n\tcount = 0\n\ttimeout = 10 #seconds\n\tif is_homing:\n\t\ttimeout = 20\n\tstart = time.time()\n\twhile not action_done:\n\t\tcount = count + 1\n\t\t# Sometimes if the commands are too close the system completes the action so fast the system doesnt get\n\t\t# a chance to register it. no action should take more than 10 seconds so if greater than that assume the \n\t\t# action is done\n\t\tif ((time.time()-start) > timeout): #this is 10 seconds\n\t\t\taction_done = True\n\t\t\n\t\t\n\t\t\n\tprint(\"\\t--Action Complete--\\t|\")\n\ttime.sleep(1) #this ensures things tont compute too quickly and action done works for the rest of the episode meaning only one action complete\n\n# def screenCmdData(generated, xPrev, yPrev):\n# \tglobal step_count\n# \tcheckRange = 1\n# \tchangeAmount = 1.5\n\t\n\t\n# \txGen = generated[0]\n# \tyGen = generated[1]\n\n\t\n\n# \t#find difference between generaged and previous command\n# \txDiff = xGen-xPrev\n# \tyDiff = yGen-yPrev\n\n# \t#a for loop would be better/more scalable here but i just want to see if it works\n\n# \tif (abs(xDiff) < checkRange):\n# \t\tif xGen > (100. - changeAmount): #this would result in a greator than 100 command\n# \t\t\txScreened = xGen - random.uniform(2,5)\n# \t\telse:\n# \t\t\txScreened = xGen + random.uniform(2,5)\n# \telse:\n# \t\txScreened = xGen\n\n# \tif (abs(yDiff) < checkRange):\n# \t\tif yGen > (100. - changeAmount): #this would result in a greator than 100 command\n# \t\t\tyScreened = yGen - random.uniform(2,5)\n# \t\telse:\n# \t\t\tyScreened = yGen + random.uniform(2,5)\n# \telse:\n# \t\tyScreened = yGen\n\n# \t# print ('\\tScreening command data -- xCmd: ' +str(xScreened) + '\\t yCmd: ' + str(yScreened))\n\n\n\n# \treturn [xScreened, yScreened]\n\n\ndef homeGrblController():\n\tprint(\"Homing System....\")\n\tdirect_cmd_publisher.publish('$h')\n\twait_for_action(True)\n\ttime.sleep(.5)\n\tdirect_cmd_publisher.publish('G92 X0 Y0')\n\tprint(\"Homing Complete - moving to robot 0 state\")\n\t# cmd_message = gcode_packager()\n\t#publish action\n\t# cmd_message.x_percentage = 0.\n\t# cmd_message.y_percentage = 0.\n\t# # time.sleep(1)\n\t# cmd_pub.publish(cmd_message)\n\t# wait_for_action(False)\n\ndef initGrblController():\n\tprint(\"Initializing Grbl System...\")\n\tdirect_cmd_publisher.publish('$X')\n\thomeGrblController()\n\ndef resetGrblController():\n\tprint(\"Resetting grbl controller...\")\n\tmsg = Empty()\n\tgrbl_reset_pub.publish(msg)\n\ttime.sleep(4) #wait for reset to take plae\n\tinitGrblController()\n\tprint(\"grbl controller reset!\")\n\ndef homeRobot():\n\tprint(\"Sending Robot Home\")\n\tdirect_cmd_publisher.publish('G0 X0 Y0')\n\twait_for_action()\n\tprint(\"Robot Home\")\n\t\n\n\nclass soft_learner():\n\tdef __init__(self):\n\t\t#init position variables\n\t\tself.testS = \"\" #test string for testing data passing\n\t\tself.xCmd = 0\n\t\tself.yCmd = 0\n\t\tself.xCmdPrev = 0\n\t\tself.yCmdPrev = 0\n\t\tself.xPos = 0\n\t\tself.yPos = 0\n\t\tself.xPosPrev = 0\n\t\tself.yPosPrev = 0\n\t\tself.xZero = 0\n\t\tself.yZero = 0\n\t\tself.xState = 0\n\t\tself.yState = 0\n\t\tself.xStatePrev = 0\n\t\tself.yStatePrev = 0\n\t\tself.xZero = 0 \n\t\tself.yZero = 0\n\n\n\t\tself.log_file = None\n\n\t\tself.state = np.array([0,0])\n\t\tself.statePrev = np.array([0,0])\n\n\t\tself.FirstCommand = True\n\n\t\tself.TotalStepCount = 0\n\t\tself.TotalEpisodeCount = 0\n\t\t#init steps and dt (time per step)\n\t\tself.n_steps = 0\n\t\tself.dt = TIME_PER_STEP #1=1second - play with this variable\n\t\t#initialize proper spaces and metadata \n\t\t#both the action and state space are bounded by 0-100 for %of actuation\n\t\t#mapping these to real world actiona dnnand sensors are handeled in another script\n\t\t# self.observation_space = spaces.Box(low=np.array([0.,0.]), high=np.array([100.,100.])) #obs space = continuous, \n\t\t\n\t\t#remapping observations -1 to 1 so debug \"convergence problem\"\n\t\tself.observation_space = spaces.Box(low=np.array([-1.,-1.]), high=np.array([1.,1.])) #obs space = continuous, \n\n\t\tself.action_space = spaces.Box(low=np.array([-1.,-1.]), high=np.array([1.,1.]))\n\t\t\n\t\tself.metadata = 0\n\n\t\t#send initial commands to grbl (home, set 0 all that)\n\n\tdef reset(self):\n\t\tglobal xZero, yZero, xPos_global, yPos_global\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.n_steps = 0\n\t\tself.reward = 0\n\n\t\tself.xCmd = 0\n\t\tself.yCmd = 0\n\t\tself.xCmdPrev = 0\n\t\tself.yCmdPrev = 0\n\t\tself.xPos = 0\n\t\tself.yPos = 0\n\t\tself.xPosPrev = 0\n\t\tself.yPosPrev = 0\n\t\tself.xZero = 0\n\t\tself.yZero = 0\n\t\tself.xState = 0\n\t\tself.yState = 0\n\t\tself.xStatePrev = 0\n\t\tself.yStatePrev = 0\n\n\t\tself.FirstCommand = True\n\n\t\t\n\t\t#reset grbl controller\n\t\tresetGrblController()\n\t\tprint('==== BEGINNING EPISODE ' + str(self.TotalEpisodeCount) + ' ====')\n\n\t\t#re calibrate and set x and y zero values for each new run for april tags data\n\t\tself.xZero = xPos_global\n\t\tself.yZero = yPos_global\n\n\t\txReturn = self.x/50-1\n\t\tyReturn = self.y/50-1\n\n\t\t#run calibration function here\n\t\treturn xReturn, yReturn\n\n\tdef step(self, generated_cmd_array):\n\t\t\n\t\tglobal xPos_global, yPos_global, xState_global, yState_global\n\t\t\n\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\tself.log_file = f\n\n\t\txPos_startstep = xPos_global #try to take a 0 so every stpe knows where it starts and us it to find reward\n\t\tyPos_startstep = yPos_global\n\t\t\n\t\tprint('---------------------| Total Steps: ' + str(self.TotalStepCount) + ' | Episode: ' + str(self.TotalEpisodeCount) + ' | Episode Step: ' + str(self.n_steps) + ' |-----------------------')\n\t\tself.log_file.write('---------------------| Total Steps: ' + str(self.TotalStepCount) + ' | Episode: ' + str(self.TotalEpisodeCount) + ' | Episode Step: ' + str(self.n_steps) + ' |-----------------------\\n')\n\n\t\tgenerated_cmd_array = np.clip(generated_cmd_array, self.action_space.low, self.action_space.high)\n\n\t\t#declare message type\n\t\tcmd_message = gcode_packager()\n\t\t\n\t\t#preprocess generated commands to make sure they are sufficiently far enough away from last command to not freeze system and within 0-100\n\t\t# screened_cmd_array = screenCmdData(generated_cmd_array, self.xCmdPrev, self.yCmdPrev)\n\t\t# self.xCmd = screened_cmd_array[0]\n\t\t# self.yCmd = screened_cmd_array[1]\n\n\t\tself.xCmd = (generated_cmd_array[0]+1)*50\n\t\tself.yCmd = (generated_cmd_array[1]+1)*50\n\n\t\t# self.yCmd = generated_cmd_array[1]\n\t\tprint(\"\\tCommand Generated\\t| \\t xCmd: %6.3f \\t yCmd: %6.3f\" %(self.xCmd, self.yCmd))\n\t\tself.log_file.write(\"\\tCommand Generated\\t| \\t xCmd: %6.3f \\t yCmd: %6.3f\\n\" %(self.xCmd, self.yCmd))\n\t\t#publish action\n\t\tcmd_message.x_percentage = self.xCmd\n\t\tcmd_message.y_percentage = self.yCmd\n\t\t# time.sleep(1)\n\t\tcmd_pub.publish(cmd_message)\n\t\tprint(\"\\t--Command Sent --\\t|\")\n\t\t\n\t\t\n\n\t\t#wait for hardware to complete action\n\t\twait_for_action(False)\n\t\t# print(\"\\t--Action Complete--\\t|\")\n\t\t# print(\"\\t--Done waiting --\\t|\")\n\t\t\n\t\t#subscribe/read state\n\t\tself.state = [xState_global, yState_global]\n\t\t# print(\"\\t--state data got --\\t|\")\n\t\tself.xState = self.state[0]\n\t\t# print(\"\\t--state 0 set --\\t|\")\n\t\tself.yState = self.state[1]\n\t\t# print(\"\\t--state 1 set --\\t|\")\n\t\tprint(\"\\tState Information\\t| \\t xState: %6.3f \\t yState: %6.3f\" %(self.xState, self.yState))\n\t\tprint(\"\\t \\t| \\t xSPrev: %6.3f \\t ySPrev: %6.3f\" %(self.xStatePrev, self.yStatePrev))\n\t\tself.log_file.write(\"\\tState Information\\t| \\t xState: %6.3f \\t yState: %6.3f\\n\" %(self.xState, self.yState))\n\t\tself.log_file.write(\"\\t \\t| \\t xSPrev: %6.3f \\t ySPrev: %6.3f\\n\" %(self.xStatePrev, self.yStatePrev))\n\t\t#compute reward\n\t\t#use beginning of episode as zero\n\t\t# self.reward, self.xPos, self.yPos = rewardCalculation(xPos_global, yPos_global, self.xZero, self.yZero, self.xPosPrev, self.yPosPrev)\n\t\t#use each steps starting position to find reward\n\t\tself.reward = rewardCalculation(xPos_global, yPos_global, xPos_startstep, yPos_startstep)\n\t\t\n\n\t\tself.xPos = xPos_global\n\t\tself.yPos = yPos_global\n\n\t\tprint(\"\\tPosition Information\\t| \\t xPos: %6.3f \\t yPos: %6.3f\" %(self.xPos, self.yPos))\n\t\tprint(\"\\t \\t| \\txPosPrev: %6.3f \\tyPosPrev: %6.3f\" %(self.xPosPrev, self.yPosPrev))\n\t\tprint(\"\\t \\t| \\t xZero: %6.3f \\t yZero: %6.3f\" %(self.xZero, self.yZero))\n\t\tprint(\"\\tReward Information \\t| \\t Reward: %6.3f\" %(self.reward))\n\t\tself.log_file.write(\"\\tPosition Information\\t| \\t xPos: %6.3f \\t yPos: %6.3f\\n\" %(self.xPos, self.yPos))\n\t\tself.log_file.write(\"\\t \\t| \\txPosPrev: %6.3f \\tyPosPrev: %6.3f\\n\" %(self.xPosPrev, self.yPosPrev))\n\t\tself.log_file.write(\"\\t \\t| \\t xZero: %6.3f \\t yZero: %6.3f\\n\" %(self.xZero, self.yZero))\n\t\tself.log_file.write(\"\\tReward Information \\t| \\t Reward: %6.3f\\n\" %(self.reward))\n\n\t\t#assign all current data to previous data containers for next state\n\t\tself.xStatePrev = self.xState\n\t\tself.yStatePrev = self.yState\n\t\tself.xCmdPrev = self.xCmd\n\t\tself.yCmdPrev = self.yCmd\n\t\tself.xPosPrev = self.xPos\n\t\tself.yPosPrev = self.yPos\n\n\t\t#increment and finish step\n\t\tself.TotalStepCount = self.TotalStepCount + 1\n\t\t# step_count = step_count +1\n\t\t# print (\"step count: {}\", format(step_count))\n\t\t\n\t\tself.n_steps += 1\n\t\tif self.n_steps > NUM_STEPS_EPISODE:\n\t\t\tself.TotalEpisodeCount = self.TotalEpisodeCount + 1\n\t\t\tprint('====END OF EPISODE====')\n\t\t\tself.log_file.write('====END OF EPISODE====\\n')\n\t\tdone = self.n_steps > NUM_STEPS_EPISODE\n\n\t\tself.log_file.close()\n\t\t\n\t\tleft_data = open(os.path.join(dirName,\"left_data.txt\"), \"a\")\n\t\tleft_data.write(str(self.xCmd) + '\\n')\n\t\tleft_data.close()\n\t\tright_data = open(os.path.join(dirName,\"right_data.txt\"),\"a\")\n\t\tright_data.write(str(self.yCmd) + '\\n')\n\t\tright_data.close()\n\n\t\t#remap states from -1 to 1 - this was after much debugging\n\t\tself.state[0] = (self.state[0]/50)-1\n\t\tself.state[1] = (self.state[1]/50)-1\n\t\treturn self.state, self.reward, done, {}\n\n\nclass customCallback(BaseCallback):\n\t\"\"\"\n\tA custom callback that derives from ``BaseCallback``.\n\n\t:param verbose: (int) Verbosity level 0: not output 1: info 2: debug\n\t\"\"\"\n\n\tdef __init__(self, verbose=0):\n\t\tsuper(customCallback, self).__init__(verbose)\n\t\t# Those variables will be accessible in the callback\n\t\t# (they are defined in the base class)\n\t\t# The RL model\n\t\t# self.model = None # type: BaseRLModel\n\t\t# An alias for self.model.get_env(), the environment used for training\n\t\t# self.training_env = None # type: Union[gym.Env, VecEnv, None]\n\t\t# Number of time the callback was called\n\t\t# self.n_calls = 0 # type: int\n\t\t# self.num_timesteps = 0 # type: int\n\t\t# local and global variables\n\t\t# self.locals = None # type: Dict[str, Any]\n\t\t# self.globals = None # type: Dict[str, Any]\n\t\t# The logger object, used to report things in the terminal\n\t\t# self.logger = None # type: logger.Logger\n\t\t# # Sometimes, for event callback, it is useful\n\t\t# # to have access to the parent object\n\t\t# self.parent = None # type: Optional[BaseCallback]\n\t\tself.startTime = None\n\t\tself.endTime = None\n\n\tdef _on_training_start(self) -> None:\n\t\t\"\"\"\n\t\tThis method is called before the first rollout starts.\n\t\t\"\"\"\n\t\tself.startTime = time.time()\n\t\tprint(\"Begin training\")\n\t\tpass\n\n\tdef _on_rollout_start(self) -> None:\n\t\t\"\"\"\n\t\tA rollout is the collection of environment interaction\n\t\tusing the current policy.\n\t\tThis event is triggered before collecting new samples.\n\n\t\t\"\"\"\n\n\t\tprint(\"\\t--Rollout Strt --\\t|\")\n\t\tpass\n\n\tdef _on_step(self) -> bool:\n\t\t\"\"\"\n\t\tThis method will be called by the model after each call to `env.step()`.\n\n\t\tFor child callback (of an `EventCallback`), this will be called\n\t\twhen the event is triggered.\n\n\t\t:return: (bool) If the callback returns False, training is aborted early.\n\t\t\"\"\"\n\t\tglobal SIGMA, LEARNING_RATE\n\t\tif (self.num_timesteps % 1000) ==0:\n\t\t\t# import pdb; pdb.set_trace()\n\t\t\tt = time.time()\n\t\t\ttime_elapsed = t-self.startTime #seconds\n\t\t\tself.model.save(\"td3_model_int_test\")\n\t\t\tSIGMA = SIGMA*.9\n\t\t\t# LEARNING_RATE = LEARNING_RATE*.9\n\t\t\tprint(\"---------\" + str(self.num_timesteps) +\" steps complete | SIGMA = \" + str(SIGMA) + \" | Learning Rate: \" + str(LEARNING_RATE) + \"|----------\")\n\t\t\tprint(\"---------------Time Elapsed: \" + str(time_elapsed) + \" seconds\")\n\t\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\t\tf.write(\"---------\" + str(self.num_timesteps) +\" steps complete | SIGMA = \" + str(SIGMA) + \" | Learning Rate: \" + str(LEARNING_RATE) + \"|----------\\n\")\n\t\t\tf.write(\"--------- Time Elapsed: \" + str(time_elapsed) + \" seconds -----------\\n\")\n\t\t\tf.close()\n\t\t\t\n\t\t\tself.model.action_noise = NormalActionNoise(0,SIGMA) #annealed noise\n\t\t\t# self.model.learning_rate = LEARNING_RATE\n\t\t\t# td3_noise = OrnsteinUhlenbeckActionNoise(np.zeros(a_dim), sigma*np.ones(a_dim)) \n\t\t\t\n\n\t\tprint(\"\\t--Step Done --\\t|\")\n\t\tif yPos_global > 200:\n\t\t\tinput(\"Please reset the robot to start and press enter key to continue..\")\n\n\t\treturn True\n\n\tdef _on_rollout_end(self) -> None:\n\t\t\"\"\"\n\t\tThis event is triggered before updating the policy.\n\t\t\"\"\"\n\t\tprint(\"\\t--Updte Ploicy --\\t|\")\n\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\tf.write(\"\\t--Updte Ploicy --\\t|\\n\")\n\t\tf.close()\n\t\tpass\n\n\tdef _on_training_end(self) -> None:\n\t\t\"\"\"\n\t\tThis event is triggered before exiting the `learn()` method.\n\t\t\"\"\"\n\t\tself.endTime = time.time()\n\t\ttime_elapsed = (self.endTime - self.startTime)/60 #minutes\n\t\tavg = self.num_timesteps/time_elapsed\n\t\tprint(\"\\t--Train Complt --\\t|\")\n\t\tprint(\"\\t elapsed time: \" + str(time_elapsed) + \" min\\tavg tiem/step: \" + str(avg) + \" sec\")\n\n\t\tf = open(os.path.join(dirName, \"learn.txt\"), \"a\")\n\t\tf.write(\"\\t--Train Complt --\\t|\\n\")\n\t\tf.write(\"\\t--Elapsed time --\\t| \" + str(time_elapsed) + \" min\\tavg Time/Step: \" + str(avg) + \" sec\\n\")\n\t\tf.close()\n\t\tpass\n\n# Use deterministic actions for evaluation\n\n\n\nif __name__ == '__main__':\n\t\n\t#run suscriber nodes\n\tRL_subscribers()\n\tprint(\"Starting...\")\n\n\ttime.sleep(3) #give ros time to set up\n\n\t#init environmnet\n\tenv = soft_learner()\n\tenv = DummyVecEnv([lambda: env])\n\t\n\tprint('done')\n\n\tmodel = TD3.load(model_dir)\n\t\n\t# Evaluate the agentm\n\t# mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)\n\n\tobs = env.reset()\n\tfor i in range(1000):\n\t\taction, _states = model.predict(obs)\n\t\tobs, rewards, dones, info = env.step(action)\n\n\t# a_dim = env.action_space.shape[0]\n\t# # td3_noise = OrnsteinUhlenbeckActionNoise(np.zeros(a_dim), .9*np.ones(a_dim)) \n\t# td3_noise = NormalActionNoise(0,SIGMA)\n\t# td3_env = DummyVecEnv([lambda: env])\n\t# # td3_env = env\n\t\n\t# checkpoint_on_event = CheckpointCallback(save_freq=1000, save_path= \"./logs/model_checkpoints\",\n # name_prefix='rl_model')\n\t# event_callback = EveryNTimesteps(n_steps=500, callback=checkpoint_on_event)\n\n\t# eval_callback = EvalCallback(td3_env, best_model_save_path='./logs/',\n # log_path='./logs/', eval_freq=100,\n # deterministic=True, render=False)\n\n\t\n\n\t# # td3_model.learning_starts = 100\n\t\n\t# custom_callback = customCallback(verbose=0)\n\t# callback = CallbackList([custom_callback, checkpoint_on_event])\n\n\t# td3_model = TD3(Td3MlpPolicy, td3_env,\n\t# \t\t\t\tgamma = GAMMA,\n\t# \t\t\t\tlearning_rate = LEARNING_RATE,\n\t# \t\t\t\tbuffer_size = BUFFER_SIZE,\n\t# \t\t\t\tlearning_starts = LEARNING_STARTS,\n\t# \t\t\t\ttrain_freq = TRAIN_FREQ,\n\t# \t\t\t\tgradient_steps = GRADIENT_STEPS,\n\t# \t\t\t\tbatch_size = BATCH_SIZE,\n\t# \t\t\t\ttau = TAU,\n\t# \t\t\t\tpolicy_delay = POLICY_DELAY,\n\t# \t\t\t\taction_noise = td3_noise,\n\t# \t\t\t\ttarget_policy_noise = TARGET_POLICY_NOISE,\n\t# \t\t\t\ttarget_noise_clip = TARGET_NOISE_CLIP,\n\t# \t\t\t\trandom_exploration = RANDOM_EXPLORATION,\n\t# \t\t\t\tverbose = VERBOSE,\n\t# \t\t\t\ttensorboard_log = TENSORBOARD_LOG,\n\t# \t\t\t\t_init_setup_model = _INIT_SETUP_MODEL,\n\t# \t\t\t\tpolicy_kwargs = POLICY_KWARGS,\n\t# \t\t\t\tfull_tensorboard_log = FULL_TENSORBOARD_LOG,\n\t# \t\t\t\tseed = SEED,\n\t# \t\t\t\tn_cpu_tf_sess = N_CPU_TF_SESS)\n\n\t# #every x episodes fun the model for y amount of episodes and evaluate it\n\t\n\t# td3_model.learn(total_timesteps = TOTAL_STEPS, callback=callback)\n\t# td3_model.save(\"td3_model\")\n\n\n\t# print(\"learning complete\")\n\n\n\n\n\n","sub_path":"src/rl_robotics_framework/src/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":20717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"369802227","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\npub_columns_filter.py -- add needed columns, remove unused columns\n\"\"\"\n\n__author__ = \"Alex Loiacono and Nicholas Rejack\"\n__copyright__ = \"Copyright 2016 (c) Alex Loiacono and Nicholas Rejack\"\n__license__ = \"New BSD License\"\n__version__ = \"0.01\"\n\nfrom vivopump import read_csv_fp, write_csv_fp, get_parms, vivo_query\nimport sys\nimport utils\nimport time\n\ndef get_vivo_academic_articles(parms):\n \"\"\"\n Query VIVO and return a list of all the academic articles.\n @see uf_examples/publications/filters/pub_match_filter.py\n @see https://wiki.duraspace.org/display/VIVO/VIVO-ISF+1.6+relationship+diagrams%3A+Authorship\n\n :param: parms: vivo_query params\n :return: dictionary of uri keyed by DOI\n \"\"\"\n query = \"\"\"\n SELECT\n ?uri ?doi\n WHERE {\n ?uri a vivo:InformationResource .\n ?uri bibo:doi ?doi .\n }\n \"\"\"\n results = vivo_query(query, parms)\n bindings = results['results']['bindings']\n doi_list = [b['doi']['value'] for b in bindings]\n uri_list = [b['uri']['value'] for b in bindings]\n return dict(zip(doi_list, uri_list))\n\ndate = time.strftime(\"%Y_%m_%d\")\n\nfile_name = 'vivo_author_list.csv'\nutils.print_err(\"Using static disambiguation file: {}\".format(file_name))\n\ndisamb_file = open('data_out/disambiguation_'+date+'.txt', 'w+')\n\nauthors_missing_pubs_file = open('data_out/authors_missing_pubs_'+date+'.txt', 'w+')\n\nauthors_missing_pubs_dict = {}\n\n# get dictionaries of authors keyed by name parts\nvivo_auth_disambig_data = utils.get_vivo_disambiguation_data_from_csv(\n file_name)\n\nparms = get_parms()\ndata_in = read_csv_fp(sys.stdin)\nutils.print_err(\"{} rows in the input\".format(len(data_in)))\n\ndata_out = {}\n# get dictionary of pub uri keyed by doi\nvivo_pubs = get_vivo_academic_articles(parms)\n\nutils.print_err('{} publications found in VIVO'.format(len(vivo_pubs)))\n# print >>sys.stderr, vivo_pubs\n\nrow_out = 1\n\ndisamb_dict = []\n\nfor row, data in data_in.items():\n\n if data['doi'] not in vivo_pubs:\n #data_out[row]['pub_uri'] = ''\n authors_missing_pubs_dict[row] = data\n continue\n\n data_out[row] = data\n\n utils.print_err(\"data is: \\n{}\".format(data))\n utils.print_err(\"row_out: {} ||| row: {}\".format(row_out,row))\n\n data_out[row]['pub_uri'] = vivo_pubs[data['doi']]\n\n if data['uf'] == 'false':\n # Always put in the non-UF author as new\n #row_out += 1\n data_out[row] = data\n data_out[row]['uri'] = ''\n #utils.print_err(\"UF entry is false {}\".format(row_index))\n else:\n author_uris = utils.get_author_disambiguation_data(\n vivo_auth_disambig_data,\n data['last'],\n data['first'],\n data['middle'])\n\n count = len(author_uris)\n utils.print_err(\"author_uris: {}\".format(author_uris))\n if count == 0:\n # There is no match in the current VIVO ==> add a new UF author\n #row_out += 1\n data_out[row] = data\n data_out[row]['uri'] = ''\n elif count == 1:\n data_out[row]['uri'] = author_uris[0]\n else:\n utils.print_err(\"Disamb: {}\".format(author_uris))\n data_out[row]['uri'] = author_uris[0]\n disamb_dict.append(\"Paper: {} -- written by {} has uris : \\n{}\\n\\n\".format(data['pub_uri'], data['display_name'], author_uris))\n row_out += 1\n\nutils.print_err('{} rows in the output'.format(len(data_out)))\n\nfor line in disamb_dict:\n disamb_file.write(line)\n\ndisamb_file.close()\n\nwrite_csv_fp(authors_missing_pubs_file, authors_missing_pubs_dict)\nwrite_csv_fp(sys.stdout, data_out)","sub_path":"uf_examples/publications/filters/author_pubs_match_filter.py","file_name":"author_pubs_match_filter.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"332945721","text":"import hashlib\nimport logging\n\nimport utils\nfrom settings import CAPTURE_DEVICE, SSD_FPN_OPENVINO_CLASSES_PATH, \\\n SSD_FPN_OPENVINO_INFO_PATH, SSD_FPN_OPENVINO_WEIGHTS_PATH, SSD_FPN_OPENVINO_CONFIG_PATH, \\\n SSD_FPN_OPENVINO_DIMENSIONS_PATH, SSD_FPN_OPENVINO_CLASSES_PATH_CPU, \\\n SSD_FPN_OPENVINO_INFO_PATH_CPU, SSD_FPN_OPENVINO_WEIGHTS_PATH_CPU, SSD_FPN_OPENVINO_CONFIG_PATH_CPU, \\\n SSD_FPN_OPENVINO_DIMENSIONS_PATH_CPU, PARALLEL_IMAGES\n\nlogger = logging.getLogger(__name__)\n\n\nclass ModelService:\n\n def __init__(self, model_arch, run_on_cpu=False):\n if model_arch == 'ssd_fpn_openvino':\n from model.ssd_openvino_detector import BinDetectorOpenVino\n\n if run_on_cpu:\n logger.info(\"Running on CPU\")\n self.classes = utils.parse_classes_file(SSD_FPN_OPENVINO_CLASSES_PATH_CPU)\n model_info = utils.parse_info_file(SSD_FPN_OPENVINO_INFO_PATH_CPU)\n weights_path = SSD_FPN_OPENVINO_WEIGHTS_PATH_CPU\n config_path = SSD_FPN_OPENVINO_CONFIG_PATH_CPU\n dimension_path = SSD_FPN_OPENVINO_DIMENSIONS_PATH_CPU\n else:\n logger.info(\"running on VPU\")\n self.classes = utils.parse_classes_file(SSD_FPN_OPENVINO_CLASSES_PATH)\n model_info = utils.parse_info_file(SSD_FPN_OPENVINO_INFO_PATH)\n weights_path = SSD_FPN_OPENVINO_WEIGHTS_PATH\n config_path = SSD_FPN_OPENVINO_CONFIG_PATH\n dimension_path = SSD_FPN_OPENVINO_DIMENSIONS_PATH\n\n h = ModelService.get_model_hash(weights_path)\n if 'FRAMEWORK_VERSION' in model_info:\n ModelService.check_framework('openvino', model_info['FRAMEWORK_VERSION'], 'inference_engine')\n if 'SHA1' in model_info:\n ModelService.check_hash(h, model_info['SHA1'])\n self.bifocal = model_info['BIFOCAL']\n self.model = BinDetectorOpenVino(config_path,\n weights_path,\n num_requests=PARALLEL_IMAGES * 2 if self.bifocal else PARALLEL_IMAGES,\n classes=self.classes,\n model_version=str(model_info['MODEL_VERSION']),\n threshold=model_info['THRESHOLD'],\n box_area_limit=model_info['BOX_AREA_LIMIT'],\n estimate_distance=True,\n dimensions_json=dimension_path,\n capture_device=CAPTURE_DEVICE,\n resize_h=349, resize_w=349,\n run_on_cpu=run_on_cpu)\n\n else:\n raise ValueError(\n \"Invalid model type identifier: \" + model_arch +\n \"Available formats are 'ssd_fpn_openvino'\"\n )\n logger.info(model_info)\n logger.info(\"Detector SHA1 %s\" % h)\n\n @staticmethod\n def add_object_coordinates(results, locations):\n def is_nan(n):\n try:\n float(n)\n return True\n except ValueError:\n return False\n\n for result, location in zip(results, locations):\n for r in result:\n if is_nan(location['lat']) or is_nan(location['long']) or is_nan(location['bearing']):\n lat, long = 0, 0 # we no longer support strings on this field, float only\n else:\n # adjust bearing of object relative to bearing of the truck\n bearing_obj = location['bearing'] + r['angle']\n if bearing_obj > 360:\n bearing_obj -= 360\n if bearing_obj < 0:\n bearing_obj += 360\n\n lat, long = utils.calculate_dpos(latitude=location['lat'], longitude=location['long'],\n head=bearing_obj,\n dist=r['distance'])\n\n r['latitude'], r['longitude'] = lat, long\n\n # Runs the provided image through the model, and returns an (image, result_dict) tuple\n def apply_model(self, images, locations):\n results = self.model.predict_on_image(images, bifocal=self.bifocal)\n self.add_object_coordinates(results, locations)\n return results\n\n # check the framework for correct version, if submodule name provided the version is checked for the submodule\n @staticmethod\n def check_framework(framework_name, required_version, submodule=None):\n framework = __import__(framework_name)\n if submodule:\n framework = framework.__dict__[submodule]\n if framework.__version__ != required_version:\n raise ValueError(\n \"Invalid framework version for {}. {} found, but {} is required\".format(\n framework_name, framework.__version__, required_version))\n\n @staticmethod\n def check_hash(computed_hash, info_hash):\n if computed_hash != info_hash:\n raise ValueError(\n \"Invalid model binary. Hash failed checked. Expected {} but {} was found\".format(\n info_hash, computed_hash))\n\n @staticmethod\n def get_model_hash(model_path):\n BUF_SIZE = 65536\n\n sha1 = hashlib.sha1()\n\n with open(model_path, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n sha1.update(data)\n\n return str(sha1.hexdigest())\n","sub_path":"pothole/map_scripts/model_service.py","file_name":"model_service.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"20765037","text":"class Node:\n def __init__(self, data, next1):\n self.data = data\n self.next = next1\n\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n self.size = 0\n\n def length(self):\n return self.size\n\n def is_empty(self):\n return self.size == 0\n\n def insert_at_the_beginning(self, data):\n self.insert_with_index(0, data)\n\n def insert_at_the_ending(self, data):\n self.insert_with_index(self.size, data)\n\n def insert_with_index(self, index, data):\n if index > self.size or index < 0:\n print(\"check given\", index, \"index value and enter again\")\n return False\n if index == 0:\n self.head = Node(data, self.head)\n else:\n current = self.head\n for i in range(index - 1):\n current = current.next\n current.next = Node(data, current.next)\n self.size += 1\n\n def peek_top(self):\n return self.peek_index(0)\n\n def peek_bottom(self):\n return self.peek_index(self.size - 1)\n\n def peek_index(self, index):\n if index >= self.size or index < 0:\n print(\"check given\", index, \"index value and enter again\")\n return False\n current = self.head\n for i in range(index):\n current = current.next\n return current.data\n\n def peek_element(self, data):\n current = self.head\n while current.data != data:\n if current.next is None:\n print(\"element\", data, \"not found\")\n return False\n current = current.next\n print(\"element\", data, \"is found\")\n return True\n\n def delete_top_element(self):\n return self.delete_with_index(0)\n\n def delete_bottom_element(self):\n return self.delete_with_index(self.size - 1)\n\n def delete_with_index(self, index):\n if index >= self.size or index < 0:\n print(\"check given\", index, \"index value and enter again\")\n return False\n self.size -= 1\n if index == 0:\n temp = self.head\n self.head = self.head.next\n return temp.data\n current = self.head\n for i in range(index - 1):\n current = current.next\n temp = current.next\n current.next = current.next.next\n return temp.data\n\n def delete_with_value(self, data):\n current = self.head\n previous = current\n while current.data != data:\n if current.next is None:\n print(\"element\", data, \"not found\")\n return False\n previous = current\n current = current.next\n temp = previous.next\n previous.next = current.next\n print(\"element\", data, \"is found and deleted\")\n self.size -= 1\n return temp.data\n\n def print_val(self):\n current = self.head\n while current:\n print(current.data, \"\\b--->\", end=\"\")\n current = current.next\n print()\n\n\nlinked_list = Linkedlist()\n\n\ndef trail1():\n linked_list.insert_at_the_beginning(45)\n linked_list.insert_at_the_beginning(65)\n linked_list.insert_at_the_beginning(34)\n linked_list.insert_at_the_beginning(56)\n linked_list.insert_at_the_beginning(78)\n linked_list.insert_at_the_beginning(98)\n linked_list.insert_at_the_beginning(63)\n linked_list.insert_at_the_beginning(31)\n linked_list.print_val()\n\n\ndef trail2():\n linked_list.insert_at_the_beginning(78)\n linked_list.insert_at_the_ending(67778)\n linked_list.insert_at_the_ending(899)\n linked_list.insert_at_the_ending(99)\n linked_list.print_val()\n trail1()\n\n\ndef trail3():\n linked_list.insert_at_the_beginning(34)\n linked_list.insert_at_the_beginning(56)\n linked_list.insert_at_the_beginning(78)\n linked_list.insert_at_the_beginning(31)\n linked_list.insert_at_the_ending(12)\n linked_list.insert_at_the_ending(14)\n linked_list.insert_at_the_ending(56)\n linked_list.insert_with_index(90, 345)\n linked_list.insert_with_index(5, 23)\n print(linked_list.peek_index(2))\n print(linked_list.peek_bottom())\n print(linked_list.peek_top())\n linked_list.peek_element(16)\n linked_list.peek_element(33)\n linked_list.insert_at_the_beginning(128)\n linked_list.insert_at_the_beginning(784)\n linked_list.insert_at_the_beginning(314)\n linked_list.print_val()\n print(linked_list.delete_with_index(5))\n linked_list.print_val()\n print(linked_list.delete_top_element())\n linked_list.print_val()\n print(linked_list.delete_bottom_element())\n linked_list.print_val()\n linked_list.delete_with_value(12)\n linked_list.print_val()\n # trail2()\n\n # this is siva\n # signing off\n\n\nif __name__ == \"__main__\":\n trail3()","sub_path":"code-lab/DSA - Singly Linked List API.py","file_name":"DSA - Singly Linked List API.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"132829464","text":"#!/usr/bin/python3\n\n# This has routines which process the gadgets collected in get_text.\n# Only gadgets with length 2 are taken and classified into 8 categories. \n\nimport sys\n\nfrom general import *\n\n# This routine will collect gadgets with N assembly instructions in it(excluding ret).\n# If N = 1, then the gadget will be of the form \"Inst; ret\"\ndef getLNGadgets(GadgetList, n) : \n\n nGadgetsList = list()\n\n for gadget in GadgetList:\n\n # Only ret instruction, don't do anything. \n if len(gadget) == 1: \n continue\n \n # If gadget length is 2, just append.\n elif len(gadget) == 2 : \n nGadgetsList.append(gadget)\n \n # If gadget length is > 2, get only 2 instructions and append.\n elif len(gadget) > 2 : \n \n newgadget = list()\n newgadget.append(gadget[-2])\n newgadget.append(gadget[-1])\n nGadgetsList.append(newgadget)\n \n return nGadgetsList\n\n# Takes in a list of 2 strings and returns it.\ndef getStrippedOperands(operands) : \n\n a = \"\"\n b = \"\"\n operands[0].lstrip()\n operands[0].rstrip()\n operands[1].lstrip()\n operands[1].rstrip()\n\n for char in operands[0]:\n if(char != ' '):\n a+= char\n \n for char in operands[1]:\n if(char != ' '):\n b+= char\n\n return [a, b]\n\n# Takes in one string and strips it. \ndef getStrippedOperand(operand) : \n\n a = \"\"\n operand.rstrip()\n operand.lstrip()\n\n for char in operand:\n if(char != ' '):\n a+= char\n\n return a\n\n\n# This categorises all the 1-instruction gadgets present. \n# Returns a list of lists - there are 8 main lists. \n# Each of those 8 lists has gadgets of it's category. \n# Refer general.py to know what those categories are.\ndef categorize(TwoInstGadgets): \n\n ALLGADGETS = [[] for x in range(TOTAL_CATEGORIES)]\n print(\"Length of ALLGADGETS = \", len(ALLGADGETS))\n\n x = 0\n while x < len(TwoInstGadgets) : \n \n gadget = TwoInstGadgets[x]\n inst = gadget[0]\n \n # TODO: Add the derivatives of \"mov\"\n if inst.mnemonic == \"mov\" : \n \n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n if (operand[0] in REGISTERS) and (operands[1] in REGISTERS) : \n ALLGADGETS[MOVREGG].append(gadget)\n\n elif operand[0] in REGISTERS and operands[1].isnumeric() : \n ALLGADGETS[LOADCONSTG].append(gadget)\n\n elif operands[0] in REGISTERS and not(operands[1].isnumeric()) : \n ALLGADGETS[LOADMEMG].append(gadget)\n\n elif operands[0] not in REGISTERS and not(operands[1].isnumeric()): \n ALLGADGETS[STOREMEMG].append(gadget)\n\n # \n elif inst.mnemonic == \"pop\" : \n \n operand = inst.op_str\n operand = getStrippedOperand(operand)\n\n if operand in REGISTERS : \n ALLGADGETS[LOADCONSTG].append(gadget)\n \n else : \n ALLGADGETS[STOREMEMG].append(gadget)\n\n # TODO: Add \"div\"\n elif inst.mnemonic == \"add\" or inst.mnemonic == \"sub\" or inst.mnemonic == \"mul\": \n \n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n # Original condition: operands[0] in REGISTERS) and ((operands[1] in REGISTERS) or (operands[1].isnumeric())\n if (operands[0] in REGISTERS) and (operands[1].isnumeric()) : \n \n ALLGADGETS[ARITHMETICG].append(gadget)\n \n else : \n print(\"Found a add / sub / mul instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instructions\", end = '\\n\\n')\n \n\n elif inst.mnemonic == \"inc\" or inst.mnemonic == \"dec\": \n\n operand = inst.op_str\n operand = getStrippedOperand(operand)\n\n if operand in REGISTERS : \n ALLGADGETS[ARITHMETICG].append(gadget)\n \n else: \n print(\"Found a inc / dec instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instructions\", end = '\\n\\n')\n\n\n elif inst.mnemonic == \"xor\": \n \n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n if (operands[0] in REGISTERS) and (operands[0] in REGISTERS) :\n\n if (operands[0] == operands[1]) : \n ALLGADGETS[LOADCONSTG].append(gadget)\n \n else : \n ALLGADGETS[ARITHMETICG].append(gadget)\n \n else : \n print(\"Found an xor instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instruction\", end = '\\n\\n')\n\n elif inst.mnemonic == \"and\" : \n\n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n # TODO: if int(operands[1]) == 0xffffffffffffffff : \n # ALLGADGETS[LOADCONSTG].append(gadget)\n # This is like loading (-1) into operands[0]\n\n if (operands[0] in REGISTERS) and (operands[0] in REGISTERS) :\n ALLGADGETS[ARITHMETICG].append(gadget)\n\n else : \n print(\"Found an and instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instruction\", end = '\\n\\n')\n\n\n elif inst.mnemonic == \"or\" : \n\n operands = inst.op_str.split(',')\n operands = getStrippedOperands(operands)\n\n if (operands[0] in REGISTERS) and (operands[0] in REGISTERS) :\n ALLGADGETS[ARITHMETICG].append(gadget)\n\n else : \n print(\"Found an or instruction playing with memory\")\n print(\"As of now, not doing anything with memory-arithmetic instruction\", end = '\\n\\n')\n\n \n # Covering the special instructions without which we would have no job to do :P\n elif inst.mnemonic == \"int\" or inst.mnemonic == \"syscall\" : \n ALLGADGETS[SPECIAL_INST].append(gadget)\n\n else : \n \n print(\"Found a gadget who has not been categorized\")\n print(\"Need help in adding these!\", end = '\\n\\n')\n\n # Keep the loop going!\n x = x + 1\n\n\n # At this point, ALLGADGETS has duplicate gadgets also. \n \n # This will remove all duplicate gadgets\n UniqueGadgetsList = getSetOfGadgets(ALLGADGETS)\n\n return UniqueGadgetsList\n\n# This routine removes all repeating gadgets. \n# Example: \n # Suppose there is \"xor rax, rax; ret\" at 0x1234, 0x2345, 0x3456\n # This keeps only one instance and removes all others\n\ndef getSetOfGadgets(ListofLists) : \n\n # This function should be fixed first.\n\n # x = 0\n # while x < len(ListofLists) : \n\n # y = 0\n # while y < len(ListofLists[x]) : \n\n # gadget = ListofLists[x][y]\n # # ALLGADGETS[x].append(gadget)\n # z = 0\n # for z in ListofLists[x][y] : \n\n # if (gadget == z) or (gadget[0].address == z[0].address) or ((gadget[0].mnemonic == z[0].mnemonic) and (gadget[0].op_str == z[0].op_str)) :\n # ListofLists[x].remove(z)\n\n # z = z + 1\n\n # y = y + 1\n\n # x = x + 1 \n\n return ListofLists\n \n\n\n# From the categorized gadgets, this routine will return a list of gadgets belonging to the queried category and containing target register.\ndef queryGadgets(GadgetList, category, targetReg):\n\n # Basic error handling!\n if category < 0 and category > 7 : \n print(\"Error: category not present\")\n print(\"Exited in categorize.queryGadgets\")\n sys.exit()\n\n L = GadgetList[category]\n\n ReturnList = list()\n\n x = 0\n while x < len(L) : \n \n gadget = L[x]\n inst = gadget[0]\n\n operands = inst.op_str.split(',')\n if len(operands) == 2: \n operands = getStrippedOperands(operands)\n\n if operands[0] == targetReg : \n ReturnList.append(gadget)\n \n # Keep the loop going!\n x = x + 1\n \n return ReturnList\n\n# Returns a list of int gadgets if it is found.\n# If not found, it returns an empty list\ndef checkIfIntPresent(GadgetList) : \n\n specialList = GadgetList[SPECIAL_INST]\n intList = list()\n\n present = 0\n\n x = 0\n while x < len(specialList) : \n \n gadget = specialList[0]\n inst = gadget[0]\n if inst.mnemonic == \"int\" and inst.op_str == \"0x80\": \n intList.append(gadget)\n \n x = x + 1\n \n return intList\n\n\n# Returns a list of syscall gadgets if it is found.\n# If not found, it returns an empty list\ndef checkIfSyscallPresent(GadgetList) : \n\n specialList = GadgetList[SPECIAL_INST]\n syscallList = list()\n\n present = 0\n\n x = 0\n while x < len(specialList) : \n \n gadget = specialList[0]\n inst = gadget[0]\n if inst.mnemonic == \"syscall\": \n syscallList.append(gadget)\n \n x = x + 1\n \n return syscallList\n \n \n ","sub_path":"categorize.py","file_name":"categorize.py","file_ext":"py","file_size_in_byte":9284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"148889495","text":"#!/usr/bin/env python\r\n\r\nimport matplotlib\r\n#import matplotlib.pyplot as plt\r\nimport h5py\r\nimport numpy\r\nimport os, sys, inspect\r\nimport warnings\r\nimport matplotlib.ticker as ticker\r\n\r\n#show_rN = True\r\nshow_rN = False\r\n\r\n#makePDF = True\r\nmakePDF = False\r\nfor arg in sys.argv:\r\n if arg.lower()=='pdf':\r\n makePDF = True\r\n\r\nif makePDF:\r\n matplotlib.use('PDF')\r\nelse:\r\n matplotlib.use('qt5agg')\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nprint(\"This is \"+ inspect.getfile(inspect.currentframe()))\r\n\r\n#########\r\n##INPUT##\r\n#########\r\n\r\nquantityToPlot = \"BHat\"\r\n\r\nfilename = 'sfincsOutput.h5'\r\n\r\nFigSize = (12,10)\r\n\r\nfont = {'size':25}\r\nmatplotlib.rc('font', **font)\r\nmatplotlib.rc('lines',markeredgewidth=0,markersize=3,linewidth=2.5)\r\nmatplotlib.rc('axes',linewidth=1.5)\r\n\r\n#matplotlib.rcParams['mathtext.default'] = 'it'\r\n#matplotlib.rcParams['text.usetex'] = True\r\n\r\nzFactor = 1 ##T\r\n###W7-X##\r\n#xAxisTicks = [r'$0$', r'$\\pi/10$', r'$2\\pi/10$', r'$3\\pi/10$', r'$4\\pi/10$']\r\n###LHD\r\n##xAxisTicks = [r'$0$', r'$\\pi/20$', r'$2\\pi/20$', r'$3\\pi/20$', r'$4\\pi/20$']\r\n#\r\n#yAxisTicks = [r'$0$', r'$\\pi/2$', r'$\\pi$', r'$3\\pi/2$', r'$2\\pi$']\r\n\r\n\r\nfig = plt.figure(figsize=FigSize)\r\nfig.patch.set_facecolor('white')\r\nnumRows = 1\r\nnumCols = 1\r\n#iteration = 0\r\nnumContours = 100\r\n#ContourLevels = [2.7, 2.8, 2.9, 3.0, 3.1, 3.2]\r\nnumLevels = 5\r\n\r\nColorMap = 'rainbow'\r\n\r\n#############\r\n##END INPUT##\r\n#############\r\n\r\n#def fmt_cbar(x, pos):\r\n# if x == 0.0:\r\n# #return r'${}$'.format(x)\r\n# return r'{}'.format(x)\r\n# a, b = '{:.1e}'.format(x).split('e')\r\n# b = int(b)\r\n# #return r'${} \\cdot 10^{{{}}}$'.format(a, b)\r\n# return r'${} \\cdot 10^{{{}}}$'.format(a, b)\r\n\r\ndef fmt_xy_axis(x, pos):\r\n #return r'${}$'.format(x)\r\n #return r'${}$'.format('{:1.2f}'.format(x))\r\n return r'{}'.format('{:1.2f}'.format(x))\r\n\r\n#for i in range(6):\r\nprint (\"Processing file \",filename)\r\nf = h5py.File(filename,'r')\r\ntheta = f[\"theta\"][()]\r\nzeta = f[\"zeta\"][()]\r\nBHat = f[quantityToPlot][()]\r\nrN = f[\"rN\"][()]\r\nf.close()\r\n\r\nprint (\"theta max: \" + str(numpy.amax(theta)))\r\nprint (\"zeta max: \" + str(numpy.amax(zeta)))\r\n\r\nzMinData = zFactor*numpy.amin(BHat[:,:])\r\nzMaxData = zFactor*numpy.amax(BHat[:,:])\r\nprint (\"zMin = \" + str(zMinData))\r\nprint (\"zMax = \" + str(zMaxData))\r\n\r\n\r\ndelta = (numpy.amax(BHat) - numpy.amin(BHat)) / numLevels\r\nContourLevels = numpy.arange(numpy.amin(BHat), numpy.amax(BHat) + delta/2.0, delta)\r\nContourLevels = zFactor*ContourLevels\r\n \r\nax = plt.subplot(numRows,numCols,1)\r\n #plt.contourf(zeta,theta,1000*numpy.fliplr(BHat[:,:,iteration].transpose()),numContours)\r\nBPlot = plt.contourf(zeta,theta,zFactor*BHat.transpose(),numContours, cmap=plt.get_cmap(ColorMap))\r\nBPlot2 = plt.contour(BPlot,levels=ContourLevels, colors='k', hold='on')\r\n#BPlot2 = plt.contour(BPlot,levels=BPlot.levels[::2], colors='k', hold='on')\r\n#plt.xlabel(r'$\\zeta$' + \" \" + r'$\\mathrm{[rad]}$')\r\n#plt.ylabel(r'$\\theta$'+ \" \" + r'$\\mathrm{[rad]}$')\r\nplt.xlabel(r'zeta' + \" \" + r'[rad]')\r\nplt.ylabel(r'theta'+ \" \" + r'[rad]')\r\n#plt.zlabel(r'$B$'+ ' [T]')\r\nplt.xticks([0,max(zeta)/4,max(zeta)/2,3*max(zeta)/4,max(zeta)])\r\nplt.yticks([0,max(theta)/4,max(theta)/2,3*max(theta)/4,max(theta)])\r\n#plt.gca().axes.xaxis.set_ticklabels(xAxisTicks)\r\n#plt.gca().axes.yaxis.set_ticklabels(yAxisTicks)\r\n\r\n#plt.gca().axes.xaxis.set_label_coords(0.5,-0.09)\r\n#plt.gca().axes.yaxis.set_label_coords(-0.09,0.5)\r\nplt.gca().axes.xaxis.set_label_coords(0.5,-0.05)\r\nplt.gca().axes.yaxis.set_label_coords(-0.09,0.5)\r\n\r\n#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\r\n\r\nif show_rN:\r\n plt.title('rN = '+str(rN))\r\n\r\n#cbar = plt.colorbar(BPlot, label=r'$B$'+ ' [T]', ticks=ContourLevels)\r\n#cbar = plt.colorbar(BPlot, label=r'$\\Phi_1$'+ ' [V]', ticks=BPlot.levels[::2])\r\n#cbar.add_lines(BPlot2)\r\n#cbar = plt.colorbar(BPlot, format=ticker.FuncFormatter(fmt_xy_axis), ticks=ContourLevels)\r\ncbar = plt.colorbar(BPlot, ticks=ContourLevels)\r\n#cbar.ax.set_ylabel(r'$B$'+ \" \" + r'$\\mathrm{[T]}$', rotation=0, labelpad=10)\r\ncbar.ax.set_ylabel(r'B'+ \" \" + r'[T]', rotation=0, labelpad=10)\r\n\r\n#with warnings.catch_warnings():\r\n# warnings.simplefilter(\"always\")\r\n#plt.clabel(BPlot2, fmt='%2.1f', colors='k', fontsize=14)\r\n#plt.clabel(BPlot2, fmt=ticker.FuncFormatter(fmt_xy_axis), colors='k', fontsize=18, inline=False)\r\nplt.clabel(BPlot2, colors='k', fontsize=18, inline=False)\r\n\r\n#plt.subplots_adjust(wspace=0.27)\r\n\r\nprint (BHat.shape)\r\n\r\nif makePDF:\r\n print (\"Saving PDF\")\r\n\r\n if len(sys.argv)>2 : #Use the substituted name as file name\r\n print (\"Writing plot to \" + os.getcwd() + \"/\" + sys.argv[2] + \".pdf.\")\r\n plt.savefig(sys.argv[2] + \".pdf\", orientation = 'landscape', papertype='letter')\r\n else :\r\n head, tail = os.path.split(inspect.getfile(inspect.currentframe()))\r\n print (\"Writing plot to \" + os.getcwd() + \"/\" + tail + \".pdf.\")\r\n plt.savefig(tail+'.pdf', orientation = 'landscape', papertype='letter')\r\nelse:\r\n plt.show()\r\n","sub_path":"tools/Albert/version3/plot_tools/plotB_Python3.py","file_name":"plotB_Python3.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"98286061","text":"import os\nfrom csv import writer\nimport csv\nfrom datetime import datetime\nimport sounddevice as sd\nimport scipy.io.wavfile as wav\nfrom scipy.io.wavfile import write\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom time import sleep, perf_counter\nfrom threading import Thread\n\nprint(tf.version.VERSION)\n\n# Load the trained tensorflow Model\n# Define the class array\nmodel = tf.keras.models.load_model('saved_model/model')\nclass_names = ['motorbike', 'city', 'multiple-cars', 'single-cars']\n\n\n# Method that returns the current date & time to sort the collected data\ndef get_date_time():\n now = datetime.now()\n return now.strftime(\"%d-%m---%H-%M-%S\")\n\n\n# Define all Folders\n# wav_dir: collected audio files\n# png_dir: spectrograms as png of the audio file\n# classified_dir: spectrograms with label as name\n# csv_file: csv file to organize the collected data\nwav_dir = 'snippets/wav/' + get_date_time() + '/'\npng_dir = 'snippets/png/' + get_date_time() + '/'\nclassified_dir = 'snippets/classified/' + get_date_time() + '/'\ncsv_file = 'snippets/snippets.csv'\n\n\ndef get_current_couter(csvfilename):\n with open(csvfilename, \"r\", encoding=\"utf-8\", errors=\"ignore\") as scraped:\n line = scraped.readlines()[-1]\n return int(line[:1])\n\n\n# Snippet counter to give each audio file an ID\nsnippet_counter = get_current_couter(csv_file)\n\n\n# Create the needed folders to save the collected data\nos.mkdir(wav_dir)\nos.mkdir(png_dir)\nos.mkdir(classified_dir)\n\n\n# Get the current timestamp that is stored in the csv file\ndef get_timestamp():\n now = datetime.now()\n return now.strftime(\"%H:%M:%S %d-%m-%y\")\n\n\n# Write new collected data to the csv\n# Structure: TODO\ndef append_to_csv(list):\n with open(csv_file, 'a+', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow(list)\n\n\n# Rename the collected file, so that the label is in the filename\ndef rename_file(file, label):\n name = os.path.splitext(os.path.basename(file))[0]\n updated_name = classified_dir + name + '_' + label + '.png'\n os.rename(file, updated_name)\n return updated_name\n\n\n# Record a 2 seconds wav file with 44100Hz\ndef record(file):\n fs = 44100\n seconds = 2\n print('recording...')\n recording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)\n sd.wait()\n write(file, fs, recording)\n\n\n# Convert the recorded wav file to spectrogram with matplotlib\ndef wav_to_spectrogram(audio_path, save_path, dimensions=(128, 128), noverlap=16, cmap='gray_r'):\n sample_rate, samples = wav.read(audio_path)\n fig = plt.figure()\n fig.set_size_inches(\n (dimensions[0]/fig.get_dpi(), dimensions[1]/fig.get_dpi()))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.specgram(samples, Fs=2, noverlap=noverlap)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n fig.savefig(save_path, bbox_inches=\"tight\", pad_inches=0)\n plt.close(fig)\n\n\n# Get the prediction of a wav audio file that was recorded\ndef get_prediction(file, name_wav):\n img = keras.preprocessing.image.load_img(file, target_size=(128, 128))\n img_array = keras.preprocessing.image.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0)\n\n predictions = model.predict(img_array)\n score = tf.nn.softmax(predictions[0])\n\n print('-----------------------------------------------------------')\n print('')\n print(\n \"snippet {}: {} [{:.2f}]\"\n .format(snippet_counter, class_names[np.argmax(score)], 100 * np.max(score))\n )\n print('')\n label = class_names[np.argmax(score)]\n updated_name = rename_file(file, label)\n\n csv_list = [snippet_counter, get_timestamp(), name_wav,\n updated_name, label, 0]\n append_to_csv(csv_list)\n\n\n# Recording Task start of the script\n# 1. Record 2 second audio file\n# 2. Convert it to a spectrogram\n# 3. Get the AI prediction and save the label\ndef task():\n print('Starting record task...')\n\n while True:\n global snippet_counter\n snippet_counter += 1\n\n name_wav = wav_dir + 'snippet_' + str(snippet_counter) + '.wav'\n name_png = png_dir + 'snippet_' + str(snippet_counter) + '.png'\n\n record(name_wav)\n wav_to_spectrogram(name_wav, name_png)\n print(f'snippet {snippet_counter} done')\n\n Thread(target=get_prediction(name_png, name_wav)).start()\n\n\n# Start a new thread that records the audio data\ndef main():\n Thread(target=task).start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"realtime-classification.py","file_name":"realtime-classification.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"532741565","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\nfrom torchvision import transforms, utils\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport Class_OS.o1_获得当前工作目录\n\n# 参数列表\nEPOCH = 30\nBATCH_SIZE = 512\nLR = 0.01\nIMG_H = 30\nIMG_W = 15\npath = Class_OS.o1_获得当前工作目录.main()\n\n\n# 加载数据\nclass ReaderData(Dataset):\n def __init__(self, path):\n with open(path + \"data/ocrData.txt\", 'rt') as f:\n self.a = f.read()\n\n def __getitem__(self, index):\n im = Image.open(path + \"data/\" + str(index) + \".jpg\").convert('L')\n im = np.array(im).reshape(IMG_H, IMG_W).astype(np.float32)\n im = im / 255.0 * 2.0 - 1.0\n #im = torch.from_numpy(im)\n return im\n\n def __len__(self):\n return len(self.a)\n\n\n# 数据读取\ntrain_data = ReaderData(path=path)\ndata_loader = DataLoader(train_data, batch_size=512)\n\n# 定义网络\n\"\"\"\n抄莫烦的自解码网络\n\"\"\"\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Linear(IMG_H * IMG_W, 256),\n )\n # 解压\n self.decoder = nn.Sequential(\n nn.Linear(256, IMG_H * IMG_W),\n nn.Sigmoid(),\n )\n self.enZ = nn.Sequential(\n nn.Linear(256, 3), # 压缩成3个特征值方便画图\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n enZ = self.enZ(encoded)\n decoded = self.decoder(encoded)\n\n return enZ, decoded\n\n\n# 定义训练参数\nnet = Net()\noptimizer = torch.optim.Adam(net.parameters(), lr=LR)\nloss_func = nn.MSELoss()\n\nfor epoch in range(EPOCH):\n '''\n xList = [] # 记录绘图数据\n yList = []\n zList = []\n '''\n for step, x in enumerate(data_loader):\n b_x = x.view(-1, IMG_H * IMG_W)\n b_y = x.view(-1, IMG_H * IMG_W)\n\n enZ, decoded = net(b_x)\n\n loss = loss_func(decoded, b_y) # mean square error\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n\n print(\"EPOCH:\", epoch + 1, \"LOSS:\", loss.data)\n\n # 准确率图\n fig = plt.figure()\n plt.plot(step, loss.data,color='red', linewidth=1.0)\n plt.show()\n '''\n # x, y, z 的数据值\n X = enZ.data[:, 0].numpy()\n Y = enZ.data[:, 1].numpy()\n Z = enZ.data[:, 2].numpy()\n xList.append(X)\n yList.append(Y)\n zList.append(Z)\n '''\n '''\n # 制图\n fig = plt.figure()\n ax = Axes3D(fig) # 3D 图\n xList=np.array(xList)\n yList=np.array(yList)\n zList=np.array(zList)\n ax.plot_surface(xList, yList, zList, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))\n plt.show()\n '''\n\ntorch.save(net, path + 'save/net.pkl')\n","sub_path":"Class_Pytorch/p1_imgauto/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"237013413","text":"from decimal import Decimal\n\nfrom django import http\nfrom django.contrib.messages import constants, get_messages\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import ListView, DetailView\n\nfrom l10n.utils import moneyfmt\nfrom livesettings.functions import config_value\nfrom product.models import Category, Product\nfrom product.modules.configurable.models import ConfigurableProduct\nfrom product.signals import index_prerender\nfrom product.utils import find_best_auto_discount, display_featured, find_product_template, optionids_from_post\nfrom satchmo_utils.satchmo_json import json_encode\nfrom satchmo_utils.numbers import RoundedDecimalError, round_decimal\nfrom satchmo_utils.views import bad_or_missing\nimport logging\n\nlog = logging.getLogger('product.views')\n\n\nclass CategoryIndexView(ListView):\n model = Category\n template_name = \"product/category_index.html\"\n context_object_name = \"categorylist\"\n\n def get_queryset(self):\n return self.model.objects.root_categories()\n\ncategory_index = CategoryIndexView.as_view()\n \n# def category_index(request, template=\"product/category_index.html\", root_only=True):\n# \"\"\"Display all categories.\n\n# Parameters:\n# - root_only: If true, then only show root categories.\n# \"\"\"\n# cats = Category.objects.root_categories()\n# return render(request, template, { 'categorylist' : cats })\n\n\nclass CategoryView(DetailView):\n model = Category\n template_name = \"product/category.html\"\n context_object_name = \"category\"\n\n def get_queryset(self):\n return self.model.objects.by_site()\n \n def get_context_data(self, **kwargs):\n context = super(CategoryView, self).get_context_data(**kwargs)\n products = list(self.object.active_products())\n context['child_categories'] = self.object.get_all_children()\n context['sale'] = find_best_auto_discount(products)\n context['products'] = products\n index_prerender.send(Product, request=self.request, context=context, category=self.object, object_list=products)\n return context\n\ncategory_view = CategoryView.as_view()\n \n# def category_view(request, slug, parent_slugs='', template='product/category.html'):\n# \"\"\"Display the category, its child categories, and its products.\n\n# Parameters:\n# - slug: slug of category\n# - parent_slugs: ignored\n# \"\"\"\n# try:\n# category = Category.objects.get_by_site(slug=slug)\n# products = list(category.active_products())\n# sale = find_best_auto_discount(products)\n\n# except Category.DoesNotExist:\n# return bad_or_missing(request, _('The category you have requested does not exist.'))\n\n# child_categories = category.get_all_children()\n\n# ctx = {\n# 'category': category,\n# 'child_categories': child_categories,\n# 'sale' : sale,\n# 'products' : products,\n# }\n# index_prerender.send(Product, request=request, context=ctx, category=category, object_list=products)\n# return render(request, template, ctx)\n\n\ndef get_configurable_product_options(request, id):\n \"\"\"Used by admin views\"\"\"\n cp = get_object_or_404(ConfigurableProduct, product__id=id)\n options = ''\n for og in cp.option_group.all():\n for opt in og.option_set.all():\n options += '' % (opt.id, str(opt))\n if not options:\n return '' % cp.product.slug\n return http.HttpResponse(options, content_type=\"text/html\")\n\n\nclass ProductView(DetailView):\n model = Product\n context_object_name = \"current_product\"\n slug_url_kwarg = 'product_slug'\n default_view_tax = None\n selected_options = ()\n\n def get_template_names(self):\n template = find_product_template(self.product, producttypes=self.product.get_subtypes())\n return [template.template.name]\n\n def get_queryset(self):\n return self.model.objects.active_by_site()\n \n def get_context_data(self, **kwargs):\n self.product = self.get_product_variation()\n default_view_tax = self.get_default_view_tax()\n context = super(ProductView, self).get_context_data(**kwargs)\n context['product'] = self.product\n context['sale'] = find_best_auto_discount(self.product)\n context['error_message'] = self.get_error_message()\n context['default_view_tax'] = default_view_tax\n context = self.product.add_template_context(context=context, request=self.request,\n selected_options=self.selected_options,\n default_view_tax=default_view_tax)\n return context\n\n def get_error_message(self):\n errors = [m for m in get_messages(self.request) if m.level == constants.ERROR] \n try:\n return errors[0]\n except IndexError:\n pass\n\n def get_default_view_tax(self):\n return self.default_view_tax or config_value('TAX', 'DEFAULT_VIEW_TAX')\n\n def get_product_variation(self):\n product = self.object\n if 'ProductVariation' in self.object.get_subtypes():\n self.selected_options = product.productvariation.unique_option_ids\n product = product.productvariation.parent.product\n return product\n\nget_product = ProductView.as_view()\n \n# def get_product(request, product_slug=None, selected_options=(),\n# default_view_tax=None):\n# \"\"\"Basic product view\"\"\"\n\n# errors = [m for m in get_messages(request) if m.level == constants.ERROR]\n\n# try:\n# product = Product.objects.get_by_site(active=True, slug=product_slug)\n# except Product.DoesNotExist:\n# return bad_or_missing(request, _('The product you have requested does not exist.'))\n\n# if default_view_tax is None:\n# default_view_tax = config_value('TAX', 'DEFAULT_VIEW_TAX')\n\n# subtype_names = product.get_subtypes()\n\n# # Save product id for xheaders, in case we display a ConfigurableProduct\n# product_id = product.id\n\n# # Clone product object in order to have current product variations in context (extra_context)\n# current_product = product\n\n# if 'ProductVariation' in subtype_names:\n# selected_options = product.productvariation.unique_option_ids\n# #Display the ConfigurableProduct that this ProductVariation belongs to.\n# product = product.productvariation.parent.product\n# subtype_names = product.get_subtypes()\n\n# best_discount = find_best_auto_discount(product)\n\n# if errors:\n# error_message = errors[0]\n# else:\n# error_message = None\n\n# extra_context = {\n# 'product': product,\n# 'current_product' : current_product,\n# 'default_view_tax': default_view_tax,\n# 'sale': best_discount,\n# 'error_message' : error_message,\n# }\n\n# # Get the template context from the Product.\n# extra_context = product.add_template_context(context=extra_context,\n# request=request, selected_options=selected_options,\n# default_view_tax=default_view_tax)\n# template = find_product_template(product, producttypes=subtype_names)\n# response = render(request, template.template.name, extra_context)\n# try:\n# from django.core.xheaders import populate_xheaders\n# populate_xheaders(request, response, Product, product_id)\n# except ImportError:\n# pass\n# return response\n\n\ndef get_price(request, product_slug):\n \"\"\"Get base price for a product, returning the answer encoded as JSON.\"\"\"\n quantity = Decimal('1')\n\n try:\n product = Product.objects.get_by_site(active=True, slug=product_slug)\n except Product.DoesNotExist:\n return http.HttpResponseNotFound(json_encode(('', _(\"not available\"))), content_type=\"text/javascript\")\n\n prod_slug = product.slug\n\n if request.method == \"POST\" and 'quantity' in request.POST:\n try:\n quantity = round_decimal(request.POST['quantity'], places=2, roundfactor=.25)\n except RoundedDecimalError:\n quantity = Decimal('1.0')\n log.warn(\"Could not parse a decimal from '%s', returning '1.0'\", request.POST['quantity'])\n\n if 'ConfigurableProduct' in product.get_subtypes():\n cp = product.configurableproduct\n chosen_options = optionids_from_post(cp, request.POST)\n pvp = cp.get_product_from_options(chosen_options)\n\n if not pvp:\n return http.HttpResponse(json_encode(('', _(\"not available\"))), content_type=\"text/javascript\")\n prod_slug = pvp.slug\n price = moneyfmt(pvp.get_qty_price(quantity))\n else:\n price = moneyfmt(product.get_qty_price(quantity))\n\n if not price:\n return http.HttpResponse(json_encode(('', _(\"not available\"))), content_type=\"text/javascript\")\n\n return http.HttpResponse(json_encode((prod_slug, price)), content_type=\"text/javascript\")\n\n\ndef get_price_detail(request, product_slug):\n \"\"\"Get all price details for a product, returning the response encoded as JSON.\"\"\"\n results = {\n \"success\" : False,\n \"message\" : _(\"not available\")\n }\n price = None\n\n if request.method==\"POST\":\n reqdata = request.POST\n else:\n reqdata = request.GET\n\n try:\n product = Product.objects.get_by_site(active=True, slug=product_slug)\n found = True\n\n prod_slug = product.slug\n\n if 'quantity' in reqdata:\n try:\n quantity = round_decimal(reqdata['quantity'], places=2, roundfactor=.25)\n except RoundedDecimalError:\n quantity = Decimal('1.0')\n log.warn(\"Could not parse a decimal from '%s', returning '1.0'\", reqdata['quantity'])\n else:\n quantity = Decimal('1.0')\n\n if 'ConfigurableProduct' in product.get_subtypes():\n cp = product.configurableproduct\n chosen_options = optionids_from_post(cp, reqdata)\n product = cp.get_product_from_options(chosen_options)\n\n if product:\n price = product.get_qty_price(quantity)\n\n results['slug'] = product.slug\n results['price'] = float(price)\n results['success'] = True\n results['message'] = \"\"\n\n except Product.DoesNotExist:\n found = False\n\n data = json_encode(results)\n if found:\n return http.HttpResponse(data, content_type=\"text/javascript\")\n else:\n return http.HttpResponseNotFound(data, content_type=\"text/javascript\")\n","sub_path":"satchmo/satchmo/apps/product/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"641446571","text":"from typing import Callable, Dict, List, Optional, Set, Tuple, Type\n\nfrom datamodel_code_generator import PythonVersion, snooper_to_methods\nfrom datamodel_code_generator.format import format_code\nfrom datamodel_code_generator.imports import IMPORT_ANNOTATIONS\nfrom datamodel_code_generator.model.enum import Enum\nfrom datamodel_code_generator.parser.base import (\n JsonSchemaObject,\n Parser,\n dump_templates,\n get_singular_name,\n sort_data_models,\n)\nfrom datamodel_code_generator.types import DataType\nfrom prance import BaseParser\n\nfrom ..model.base import DataModel, DataModelField\n\n\n@snooper_to_methods(max_variable_length=None)\nclass OpenAPIParser(Parser):\n def __init__(\n self,\n data_model_type: Type[DataModel],\n data_model_root_type: Type[DataModel],\n data_model_field_type: Type[DataModelField] = DataModelField,\n filename: Optional[str] = None,\n base_class: Optional[str] = None,\n target_python_version: PythonVersion = PythonVersion.PY_37,\n text: Optional[str] = None,\n result: Optional[List[DataModel]] = None,\n dump_resolve_reference_action: Optional[Callable[[List[str]], str]] = None,\n ):\n self.base_parser = (\n BaseParser(filename, text, backend='openapi-spec-validator')\n if filename or text\n else None\n )\n\n super().__init__(\n data_model_type,\n data_model_root_type,\n data_model_field_type,\n filename,\n base_class,\n target_python_version,\n text,\n result,\n dump_resolve_reference_action,\n )\n\n def parse_any_of(self, name: str, obj: JsonSchemaObject) -> List[DataType]:\n any_of_data_types: List[DataType] = []\n for any_of_item in obj.anyOf:\n if any_of_item.ref: # $ref\n any_of_data_types.append(\n self.data_type(\n type=any_of_item.ref_object_name,\n ref=True,\n version_compatible=True,\n )\n )\n else:\n singular_name = get_singular_name(name)\n self.parse_object(singular_name, any_of_item)\n any_of_data_types.append(\n self.data_type(\n type=singular_name, ref=True, version_compatible=True\n )\n )\n return any_of_data_types\n\n def parse_all_of(self, name: str, obj: JsonSchemaObject) -> List[DataType]:\n fields: List[DataModelField] = []\n base_classes: List[DataType] = []\n for all_of_item in obj.allOf:\n if all_of_item.ref: # $ref\n base_classes.append(\n self.data_type(\n type=all_of_item.ref_object_name,\n ref=True,\n version_compatible=True,\n )\n )\n\n else:\n fields_ = self.parse_object_fields(all_of_item)\n fields.extend(fields_)\n\n data_model_type = self.data_model_type(\n name,\n fields=fields,\n base_classes=[b.type for b in base_classes],\n auto_import=False,\n custom_base_class=self.base_class,\n )\n self.append_result(data_model_type)\n\n return [self.data_type(type=name, ref=True, version_compatible=True)]\n\n def parse_object_fields(self, obj: JsonSchemaObject) -> List[DataModelField]:\n requires: Set[str] = set(obj.required or [])\n fields: List[DataModelField] = []\n\n for field_name, filed in obj.properties.items(): # type: ignore\n is_list = False\n field_types: List[DataType]\n if filed.ref:\n field_types = [\n self.data_type(\n type=filed.ref_object_name, ref=True, version_compatible=True\n )\n ]\n elif filed.is_array:\n class_name = self.get_class_name(field_name)\n array_fields, array_field_classes = self.parse_array_fields(\n class_name, filed\n )\n field_types = array_fields[0].data_types\n is_list = True\n elif filed.is_object:\n class_name = self.get_class_name(field_name)\n self.parse_object(class_name, filed)\n field_types = [\n self.data_type(type=class_name, ref=True, version_compatible=True)\n ]\n elif filed.enum:\n enum = self.parse_enum(field_name, filed)\n field_types = [\n self.data_type(type=enum.name, ref=True, version_compatible=True)\n ]\n elif filed.anyOf:\n field_types = self.parse_any_of(field_name, filed)\n elif filed.allOf:\n field_types = self.parse_all_of(field_name, filed)\n else:\n data_type = self.get_data_type(filed)\n field_types = [data_type]\n required: bool = field_name in requires\n fields.append(\n self.data_model_field_type(\n name=field_name,\n data_types=field_types,\n required=required,\n is_list=is_list,\n )\n )\n return fields\n\n def parse_object(self, name: str, obj: JsonSchemaObject) -> None:\n fields = self.parse_object_fields(obj)\n data_model_type = self.data_model_type(\n name, fields=fields, custom_base_class=self.base_class\n )\n self.append_result(data_model_type)\n\n def parse_array_fields(\n self, name: str, obj: JsonSchemaObject\n ) -> Tuple[List[DataModelField], List[DataType]]:\n if isinstance(obj.items, JsonSchemaObject):\n items: List[JsonSchemaObject] = [obj.items]\n else:\n items = obj.items # type: ignore\n item_obj_data_types: List[DataType] = []\n is_union: bool = False\n for item in items:\n if item.ref:\n item_obj_data_types.append(\n self.data_type(\n type=item.ref_object_name, ref=True, version_compatible=True\n )\n )\n elif isinstance(item, JsonSchemaObject) and item.properties:\n singular_name = get_singular_name(name)\n self.parse_object(singular_name, item)\n item_obj_data_types.append(\n self.data_type(\n type=singular_name, ref=True, version_compatible=True\n )\n )\n elif item.anyOf:\n item_obj_data_types.extend(self.parse_any_of(name, item))\n is_union = True\n elif item.allOf:\n singular_name = get_singular_name(name)\n item_obj_data_types.extend(self.parse_all_of(singular_name, item))\n else:\n item_obj_data_types.append(self.get_data_type(item))\n\n field = self.data_model_field_type(\n data_types=item_obj_data_types,\n required=True,\n is_list=True,\n is_union=is_union,\n )\n return [field], item_obj_data_types\n\n def parse_array(self, name: str, obj: JsonSchemaObject) -> None:\n fields, item_obj_names = self.parse_array_fields(name, obj)\n data_model_root = self.data_model_root_type(\n name, fields, custom_base_class=self.base_class\n )\n\n self.append_result(data_model_root)\n\n def parse_root_type(self, name: str, obj: JsonSchemaObject) -> None:\n if obj.type:\n types: List[DataType] = [self.get_data_type(obj)]\n elif obj.anyOf:\n types = self.parse_any_of(name, obj)\n else:\n types = [\n self.data_type(\n type=obj.ref_object_name, ref=True, version_compatible=True\n )\n ]\n\n data_model_root_type = self.data_model_root_type(\n name,\n [self.data_model_field_type(data_types=types, required=not obj.nullable)],\n custom_base_class=self.base_class,\n )\n self.append_result(data_model_root_type)\n\n def parse_enum(self, name: str, obj: JsonSchemaObject) -> DataModel:\n enum_fields = []\n\n for enum_part in obj.enum: # type: ignore\n if obj.type == 'string':\n default = f\"'{enum_part}'\"\n field_name = enum_part\n else:\n default = enum_part\n field_name = f'{obj.type}_{enum_part}'\n enum_fields.append(\n self.data_model_field_type(name=field_name, default=default)\n )\n\n enum = Enum(self.get_class_name(name), fields=enum_fields)\n self.append_result(enum)\n return enum\n\n def parse(\n self, with_import: Optional[bool] = True, format_: Optional[bool] = True\n ) -> str:\n for obj_name, raw_obj in self.base_parser.specification['components'][\n 'schemas'\n ].items(): # type: str, Dict\n obj = JsonSchemaObject.parse_obj(raw_obj)\n if obj.is_object:\n self.parse_object(obj_name, obj)\n elif obj.is_array:\n self.parse_array(obj_name, obj)\n elif obj.enum:\n self.parse_enum(obj_name, obj)\n elif obj.allOf:\n self.parse_all_of(obj_name, obj)\n else:\n self.parse_root_type(obj_name, obj)\n\n result: str = ''\n if with_import:\n if self.target_python_version == PythonVersion.PY_37:\n self.imports.append(IMPORT_ANNOTATIONS)\n result += f'{self.imports.dump()}\\n\\n\\n'\n\n _, sorted_data_models, require_update_action_models = sort_data_models(\n self.results\n )\n\n result += dump_templates(list(sorted_data_models.values()))\n if self.dump_resolve_reference_action:\n result += f'\\n\\n{self.dump_resolve_reference_action(require_update_action_models)}'\n\n if format_:\n result = format_code(result, self.target_python_version)\n\n return result\n","sub_path":"datamodel_code_generator/parser/openapi.py","file_name":"openapi.py","file_ext":"py","file_size_in_byte":10352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"151090469","text":"import asyncio\nfrom asyncio import CancelledError\nimport json\nimport logging.config\nimport os\nimport pathlib\nfrom json import JSONDecodeError\n\nimport yaml\nimport time\nimport aioredis\nfrom aioredis import Redis, Channel\nimport dotenv\n\nfrom quart import Quart\nfrom quart_session import Session as QuartSession\n\n\nwith open(pathlib.Path(__file__).parent / \"logging.conf.yaml\") as logging_file:\n logging.config.dictConfig(yaml.load(logging_file, Loader=yaml.FullLoader))\n\nlogger = logging.getLogger(__name__)\n\napp = Quart(\"xyz\")\n\ndotenv.load_dotenv(verbose=True)\n\napp.secret_key = os.getenv(\"FLASK_SECRET\")\nredis_url = os.getenv(\"REDISTOGO_URL\")\napp.config[\"SESSION_TYPE\"] = \"redis\"\n\napp.config[\"request_number\"] = 1\n\n\n@app.route(\"/\")\nasync def _hello() -> dict:\n request_number = app.config[\"request_number\"]\n app.config[\"request_number\"] = request_number + 1\n request = {\n \"response\": {\"request_number\": request_number},\n \"response_channel\": f\"myreplychannel{request_number}\",\n }\n response = await redis_request_reply(\n redis=app.config[\"SESSION_REDIS\"],\n request=json.dumps(request),\n request_channel=\"mychannel\",\n reply_channel=f\"myreplychannel{request_number}\",\n )\n\n return {\"message\": \"Hello, World!\", \"response\": response}\n\n\n@app.before_serving\nasync def start_redis_listener():\n app.config[\"SESSION_REDIS\"] = await aioredis.create_redis_pool(redis_url)\n QuartSession(app)\n\n loop = asyncio.get_event_loop()\n\n async def task(redis: Redis):\n channel: [Channel, None] = None\n try:\n channel, *_ = await redis.subscribe(\"mychannel\")\n\n async for message in channel.iter(encoding=\"utf-8\"):\n try:\n message = json.loads(message)\n except JSONDecodeError:\n pass\n logger.debug(f\"request from: {str(channel.name)}: {message}\")\n # add some delay\n time.sleep(0.5)\n if isinstance(message, dict):\n response_channel, response = (\n message[\"response_channel\"],\n message[\"response\"],\n )\n logger.debug(\n f\"sending response: {response} to channel {response_channel}\"\n )\n received_count = await redis.publish_json(\n response_channel, response\n )\n logger.debug(f\"received by {received_count} consumers\")\n\n else:\n logger.debug(\"message isn't a dict\")\n\n except CancelledError:\n pass\n finally:\n if channel:\n await redis.unsubscribe(channel.name)\n\n loop.create_task(task(app.config[\"SESSION_REDIS\"]))\n\n\nasync def redis_request_reply(redis, request, request_channel, reply_channel):\n \"\"\"\n Sends a message to a Redis channel, then waits for a singular reply\n\n :param redis: the redis to use\n :param request: the message to send - if a dict, then it is first JSON serialised\n :param request_channel: the channel to send the request to\n :param reply_channel: the channel on which to wait for a reply\n :return: the response received, JSON decoded unless it's not valid JSON\n \"\"\"\n channel: Channel\n channel, *_ = await redis.subscribe(reply_channel)\n try:\n if isinstance(request, dict):\n request = json.dumps(request)\n received_count = await redis.publish(request_channel, request)\n if not received_count:\n logger.warning(\n f'message \"{request}\" sent to channel {request_channel} was not received by any subscriber'\n )\n # TODO should we bail at this point?\n # probably - or put in some retries\n else:\n logger.debug(\n f'message \"{request}\" was received by {received_count} consumers'\n )\n\n async def one_message():\n logger.debug(f\"waiting for one message on channel {channel.name}...\")\n async for message in channel.iter(encoding=\"utf-8\"):\n logger.debug(f\"received response {message}\")\n return json.loads(message)\n\n return await asyncio.wait_for(one_message(), timeout=5)\n finally:\n if channel:\n redis.unsubscribe(channel.name)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\")\n","sub_path":"xyz.py","file_name":"xyz.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"351950325","text":"\"\"\"\nGeoMACH design variable class\nJohn Hwang, July 2014\n\"\"\"\n# pylint: disable=E1101\nfrom __future__ import division\nimport numpy\n\nfrom GeoMACH.PGM.core.PGMobject import PGMobject\n\n\nclass PGMdv(PGMobject):\n\n def __init__(self, shape, val=None, lower=None, upper=None, scale=None):\n super(PGMdv, self).__init__()\n\n self._shape = shape\n self.val = val\n self.lower = lower\n self.upper = upper\n self.scale = scale\n","sub_path":"GeoMACH/PGM/core/PGMdv.py","file_name":"PGMdv.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"445739498","text":"import selenium.webdriver\nfrom selenium.webdriver.common.by import By\n\nclass SelPy:\n\n driver=None\n def __init__(self):\n try:\n dr=selenium.webdriver.Chrome(\"driver/chromedriver.exe\")\n self.driver=dr\n except:\n print(\"Error while starting chrome driver\")\n def navigate(self,url):\n self.driver.delete_all_cookies()\n self.driver.get(url)\n self.driver.find_element(By.XPATH, '//button[text()=\"OK\"]').click()\n \n \n def curURL(self):\n return self.driver.current_url\n","sub_path":"selpy/selpy.py","file_name":"selpy.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"298986501","text":"from rest_framework import serializers\nfrom specials.models import Special\n\nclass SpecialSerializer(serializers.ModelSerializer):\n class Meta:\n model = Special\n fields = ['id','title','description','locations','reoccuring_weekend','start_date','start_time','end_date','end_time','linenos','language','style']\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Special` instance, given the validated data.\n \"\"\"\n return Special.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Special` instance, given the validated data.\n \"\"\"\n instance.title = validated_data.get('title', instance.title)\n instance.description = validated_data.get('description', instance.description)\n instance.locations = validated_data.get('locations', instance.locations)\n instance.reoccuring_weekend = validated_data.get('reoccuring_weekend', instance.reoccuring_weekend)\n instance.start_date = validated_data.get('start_date', instance.start_date)\n instance.start_time = validated_data.get('start_time', instance.start_time)\n instance.end_date = validated_data.get('end_date', instance.end_date)\n instance.end_time = validated_data.get('end_time', instance.end_time)\n instance.save()\n return instance","sub_path":"specials/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"78257663","text":"print(\"Welcome to my game!\")\n\nenemyhealth = 100\nwhile enemyhealth > 0:\n attack = input(\"What is your attack? \")\n if attack == \"heavy\":\n enemyhealth = enemyhealth - 50\n if attack == \"medium\":\n enemyhealth = enemyhealth - 25\nelse:\n print(\"You Won\")\n\n\n\n\n\n\n\n\n \n","sub_path":"My python game 1.py","file_name":"My python game 1.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"25050079","text":"import sys\nfrom pathlib import Path\nfrom LanguageHelpers import isValidLanguage, genClassByLanguage\n\nif len(sys.argv) > 1:\n\n if sys.argv[1][0] == '-':\n if sys.argv[1] == \"--help\":\n print(\"Give this command a language and a filename. See --options or -o for language options\")\n print(\"Like this:\", sys.argv[0], \"c ClassName\")\n print(\"Or this:\", sys.argv[0], \"c++ helpers/HelperClass\")\n elif sys.argv[1] == \"--options\" or sys.argv[1] == \"-o\":\n print(\"Language options are c, c++, cpp, and cxx\")\n else:\n print(\"Invalid argument, see --help for help\")\n else:\n if (isValidLanguage(sys.argv[1])):\n if len(sys.argv) > 2:\n for i in range(2, len(sys.argv)):\n # Guarantees the path does not contain any special characters or special dir paths\n genClassByLanguage(sys.argv[1], str(Path(sys.argv[i]).resolve()))\n print()\n else:\n print(\"You must provide a class name or class path\")\n\n else:\n print(\"Invalid language given, see --options for language options\")\n \nelse:\n print(\"Use --help for help, or --options for language options\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"251253345","text":"#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\n\"\"\"A tool to produce various statistics from files of BNB RDF data.\"\"\"\r\n\r\nimport bnb_rdf_stats\r\nimport sys\r\n\r\n__author__ = 'Victoria Morris'\r\n__license__ = 'MIT License'\r\n__version__ = '1.0.0'\r\n__status__ = '4 - Beta Development'\r\n\r\nbnb_rdf_stats.main(sys.argv[1:])\r\n","sub_path":"bin/bnb_rdf_stats.py","file_name":"bnb_rdf_stats.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"458536151","text":"# Game merge version 1.25\n# Including works of Wessel, Furkan, Onno, Niels and Ziggy\n# Importing code done by Ziggy\n\nimport pygame\nimport random\nimport time\n\n#pygame init ---\npygame.init()\n\n#database functies\nimport psycopg2\n\ndef interact_with_database(command):\n # Connect and set up cursor\n connection = psycopg2.connect(\"dbname=postgres user=postgres password=E2f3446d78xd\")\n cursor = connection.cursor()\n\n # Execute the command\n cursor.execute(command)\n connection.commit()\n\n # Save results\n results = None\n try:\n results = cursor.fetchall()\n except psycopg2.ProgrammingError:\n # Nothing to fetch\n pass\n\n # Close connection\n cursor.close()\n connection.close()\n\n return results\n\n\n# Uploads a score into the hiscore table\ndef upload_score(name, score):\n interact_with_database(\"UPDATE player SET score = {} WHERE name = '{}'\"\n .format(score, name))\n\n# Downloads score data from database\ndef download_scores():\n return interact_with_database(\"SELECT * FROM player\")\n\n\n# Downloads the top score from database\ndef download_top_score():\n result = interact_with_database(\"SELECT * FROM player ORDER BY score DESC\")\n return result\n\n\n\n#colors ---\nwhite = (255,255,255)\nblack = (0,0,0)\nyellow = (255,255,0)\nbright_yellow = (200,200,0)\nblue = (0,0,255)\nbright_blue = (0,0,200)\nred = (200,0,0)\nbright_red = (255,0,0)\ngreen = (0,200,0)\nbright_green = (0,255,0)\nbackground_color = (51, 204, 255)\n\n#board colors ---\ncolors = (red,green,yellow,blue)\ntowercolors = (blue,yellow,green,red)\n\n#text font definitions ---\nsmallText = pygame.font.Font(\"freesansbold.ttf\", 40)\nsmallText2 = pygame.font.Font('freesansbold.ttf', 15)\nsmallText3 = pygame.font.Font(\"freesansbold.ttf\", 50)\nsmallText4 = pygame.font.Font(\"freesansbold.ttf\", 20)\nlargeText = pygame.font.Font('freesansbold.ttf', 90)\nbuttonText = pygame.font.SysFont(\"monospace\", 20)\nhighscoreText = pygame.font.Font('freesansbold.ttf', 20)\n\n#background image definitions ---\nbackground_img = pygame.image.load('euromast-rotterdam.png')\n\n#dice img loading and other settings ---\ndice1_img = pygame.image.load('dice1.png')\ndice2_img = pygame.image.load('dice2.png')\ndice3_img = pygame.image.load('dice3.png')\ndice4_img = pygame.image.load('dice4.png')\ndice5_img = pygame.image.load('dice5.png')\ndice6_img = pygame.image.load('dice6.png')\n\ndice_img = [dice1_img, dice2_img, dice3_img, dice4_img, dice5_img, dice6_img]\n\n# list dice (not used)\n'''dice1 = 1\ndice2 = 2\ndice3 = 3\ndice4 = 4\ndice5 = 5\ndice6 = 6\ndice_list = [dice1,dice2,dice3,dice4,dice5,dice6]'''\n\ndice_choosed = 0\ndice_display = 0\n\n#game settings\ndisplay_width = 1024\ndisplay_height = 650\ndisplay_height_button = 75\ndisplay_x_menu = ((display_width / 2) + (display_width / 3))\ndisplay_x_menu_dice = ((display_width / 2) + (display_width / 3) + ((display_width / 3)/7))\ndisplay_y_menu_dice = ((display_height/4) + (display_height / 6))\ndisplay_height_menu = display_height\ndisplay_width_menu = display_width / 3\n\n#board settings\n#rijen zie Setup and Turn Layout.docx\nrowsx = 8\nrowsy = 16\n\nbeginwidth = (display_width/10)\nblocksizex = (display_width/2)/rowsx\nblocksizey = display_height/rowsy\nrowstower = 5 # hoeveel blokken krijgt de top van de toren?\nprint(\"blocksize is: width=\",blocksizex, \"height=\",blocksizey)\n\n#display window\ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption(\"De Euromast\")\nclock = pygame.time.Clock()\n\n# gamestate ---\ngamestate = 0\n\n#buttonstate ---\nclickedbutton = 0\n\n# state 0 is start of game, state 1 is game state\n\n# quit function ---\ndef game_quit():\n pygame.quit()\n quit()\n\n# text display function ---\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\n#title display\ndef title_display(text):\n TextSurf, TextRect = text_objects(text, largeText)\n TextRect.center = ((display_width / 2), (display_height / 4))\n gameDisplay.blit(TextSurf, TextRect)\n\n# button draw function ---\ndef button(msg, x, y, w, h, ic, ac, action=None):\n global clickedbutton\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n clickedbutton = 1\n action()\n pygame.display.flip()\n else:\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\n\n textSurf, textRect = text_objects(msg, buttonText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n gameDisplay.blit(textSurf, textRect)\n\n#dirty trick to roll dices\ndef dicebutton(msg, x, y, w, h, ic, ac, action=None):\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n action()\n else:\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\n\n textSurf, textRect = text_objects(msg, buttonText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n gameDisplay.blit(textSurf, textRect)\n\n\n#start menu buttons\ndef beginbutton(msg, posx, posy, sizex, sizey, ic, ac, action=None):\n global clickedbutton\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if posx + sizex > mouse[0] > posx and posy + sizey > mouse[1] > posy:\n pygame.draw.rect(gameDisplay, ac, (posx, posy, sizex, sizey))\n\n if click[0] == 1 and action != None:\n action()\n return True\n else:\n pygame.draw.rect(gameDisplay, ic, (posx, posy, sizex, sizey))\n textSurf, textRect = text_objects(msg, smallText3)\n textRect.center = ((posx + (sizex / 2)), (posy + (sizey / 2)))\n gameDisplay.blit(textSurf, textRect)\n\n# a button delay function so you dont click another underlying button, put in every function above the while code\ndef clickedButtonDelay():\n global clickedbutton\n if(clickedbutton == 1):\n pygame.time.delay(160)\n clickedbutton = 0\n\n# board draw function ---\ndef drawgameboard(display_width, display_height, towercolors, colors, beginwidth = (display_width/10), blocksizex = ((display_width/2)/rowsx), blocksizey = (display_height/rowsy), rowsx = 8, rowsy = 16, rowstower = 5):\n color = 0\n towercolor = 0\n for i in range(rowsx):\n for j in range(rowsy):\n if (j > (rowstower - 1)): # toren (dikker) 2 rij blokken\n pygame.draw.rect(gameDisplay, colors[color], (beginwidth + (i * blocksizex), j * blocksizey, blocksizex, blocksizey), 2)\n # cat[color] #this is how you see the body category (prints when block is red: \"Entertainment\")\n else:\n if (i % 2 == 0): # toren 1 rij blokken\n pygame.draw.rect(gameDisplay, towercolors[towercolor], (beginwidth + (i * blocksizex) + (blocksizex / 2), j * blocksizey, blocksizex, blocksizey), 2)\n # cat[towercolor] #this is how you see the body category (prints when block is yellow: \"History\")\n\n #adding colors to each row including the towers top\n if (i % 2 == 1):\n if (color < (len(colors) - 1)):\n color += 1\n else:\n color = 0\n\n if (towercolor < (len(towercolors) - 1)):\n towercolor += 1\n else:\n towercolor = 0\n\n#player functions ---\nclass Player:\n def __init__(self, name, score):\n self.Name = name\n self.Score = score\n\n def score_increase(self):\n self.Score += 1\n\nplayer_1 = Player(\"player 1\", 0)\nplayer_2 = Player(\"player 2\", 0)\nplayer_3 = Player(\"player 3\", 0)\nplayer_4 = Player(\"player 4\", 0)\n\nplayers = download_top_score()\n\n# dice functions ---\ndef dice_roll():\n global dice_choosed\n choice = 0\n click = pygame.mouse.get_pressed()\n if (click != 0):\n choice = random.randint(1, 6)\n if (choice >= 0):\n gameDisplay.blit(pygame.transform.scale(dice_img[choice - 1], (75, 75)),(display_x_menu_dice, display_y_menu_dice))\n dice_choosed = choice\n\n pygame.time.delay(40)\n return dice_choosed\n\ndef final_dice(): # final dice stays visible\n global dice_choosed\n global dice_display\n if (dice_choosed > 0):\n dice_display = dice_choosed # dice_display is de waarde die altijd blijft dice_choosed NIET!\n\n dice_choosed = 0\n\n if (dice_display > 0):\n gameDisplay.blit(pygame.transform.scale(dice_img[dice_display - 1], (75, 75)),(display_x_menu_dice, display_y_menu_dice))\n\n# game instructions draw function ---\ndef game_instructions():\n clickedButtonDelay()\n print(\"Start instructions\")\n\n pygame.draw.rect(gameDisplay, white, (display_width / 18.5, display_height / 8, ((display_width / 2) + (display_width / 2.5)), ((display_height / 2) + (display_height / 4))))\n TextSurf, TextRect = text_objects(\"Instructions\", smallText)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 10)))\n gameDisplay.blit(TextSurf, TextRect)\n\n #regel 1\n TextSurf, TextRect = text_objects(\"1. Elke Player kiest een eigen categorie. Let op! 1 speler per categorie.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 5.5)))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n #regel 2\n TextSurf, TextRect = text_objects(\"2 Player 1 begint het spel. Vervolgens is Player 2 enz.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 4.3)))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n #regel 3\n TextSurf, TextRect = text_objects(\"3 De Player die aan de beurt is krijgt een vraag die hij/zij moet beantwoorden\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 3.5)))\n gameDisplay.blit(TextSurf, TextRect)\n\n TextSurf, TextRect = text_objects(\"door middel van klikken op het antwoord.\",smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 3.0)))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n #regel 4\n TextSurf, TextRect = text_objects(\"4 Als de Player de vraag goed heeft beantwoord mag hij/zij een\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 2.6)))\n gameDisplay.blit(TextSurf, TextRect)\n\n TextSurf, TextRect = text_objects(\"richting kiezen en vervolgens dobbelen.\",smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 2.3)))\n gameDisplay.blit(TextSurf, TextRect)\n\n #regel 5\n TextSurf, TextRect = text_objects(\"5 De player verplaatst het aantal stappen dat is gedobbelt.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 12) + (display_height / 2.0)))\n gameDisplay.blit(TextSurf, TextRect)\n\n #regel 6\n TextSurf, TextRect = text_objects(\"6 Einde Beurt. Volgende Player.\", smallText4)\n TextRect.center = ((display_width / 2), ((display_height / 12) + (display_height / 1.8)))\n gameDisplay.blit(TextSurf, TextRect)\n\n while game_instructions:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n button(\"Start\", ((display_width / 20) * 12), ((display_height / 8.5) * 6),((display_width / 6) + (display_width / 6)), 75, green, bright_green, start_game)\n button(\"Main Menu\",(((display_width / 20)* 2) - (display_width/40)), ((display_height / 8.5) * 6),((display_width / 6) + (display_width / 6)), 75, green, bright_green, startmenu_game)\n\n pygame.display.update()\n\ndef start_game():\n print(\"Start playing game\")\n game_loop()\n\ndef startmenu_game():\n print(\"Start startmenu\")\n startmenu_loop()\n\n\n# pause draw function ---\ndef pause_menu():\n #mouse = pygame.mouse.get_pos()\n clickedButtonDelay()\n pygame.draw.rect(gameDisplay, white, (display_width / 6, display_height / 8, ((display_width / 2) + (display_width /6)), ((display_height / 2) + (display_height /4))))\n smallText = pygame.font.Font(\"freesansbold.ttf\", 40)\n TextSurf, TextRect = text_objects(\"Paused\", smallText)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 10)))\n gameDisplay.blit(TextSurf, TextRect)\n\n print(\"Start pausemenu\")\n\n while pause_menu:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n button(\"Continue\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 5), ((display_width / 6) + (display_width / 6)), 75, green, bright_green, pause_menu_quit)\n button(\"Options\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 8), ((display_width / 6) + (display_width / 6)), 75, blue, bright_blue, pause_menu_options)\n button(\"Restart\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 11), ((display_width / 6) + (display_width / 6)), 75, yellow, bright_yellow, startmenu_game)\n button(\"Quit\", ((display_width / 6) + (display_width / 6)), ((display_height/20) * 14), ((display_width / 6) + (display_width / 6)), 75,red, bright_red, game_quit)\n pygame.display.update()\n\ndef pause_options():\n #mouse = pygame.mouse.get_pos()\n clickedButtonDelay()\n pygame.draw.rect(gameDisplay, white, (\n display_width / 6, display_height / 8, ((display_width / 2) + (display_width / 6)),\n ((display_height / 2) + (display_height / 4))))\n TextSurf, TextRect = text_objects(\"Options\", smallText)\n TextRect.center = ((display_width / 2), ((display_height / 10) + (display_height / 10)))\n gameDisplay.blit(TextSurf, TextRect)\n\n print(\"Start pause options\")\n\n while pause_menu:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n button(\"Continue\", ((display_width / 6) + (display_width / 6)), ((display_height / 20) * 6),((display_width / 6) + (display_width / 6)), 75, green, bright_green, pause_menu_quit)\n\n pygame.display.update()\n\ndef pause_menu_quit():\n game_loop()\n\ndef pause_menu_options():\n pause_options()\n\n# temporary fix\ndef font_temp():\n return pygame.font.Font(None, 30)\n#score functions ---\ndef current_scores(player):\n n = 0\n highscoreTitleSurf, highscoreTitleRect = text_objects(\"Scores\", highscoreText)\n highscoreTitleRect.center = ((display_width / 1.1), (display_height / 1.4))\n gameDisplay.blit(highscoreTitleSurf, highscoreTitleRect)\n # deze for loop gaat door de spelerslijst heen, en plaatst naam en score op het scherm.\n for a in player:\n score_text = font_temp().render(\"{}: {}\".format(a[0], a[1]),1, (0, 0, 0))\n gameDisplay.blit(score_text, (display_width - 140, (display_height - (160 + n))))\n n += -25\n\n# startmenu state function ---\ndef startmenu_loop():\n clickedButtonDelay()\n running = True\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n gameDisplay.fill(background_color)\n gameDisplay.blit(background_img, (-250, -40))\n\n title_display(\"De Euromast\")\n\n # START button\n beginbutton(\"Start\", (display_width / 4), ((display_height / 4) + (display_height / 4)), (display_width / 2), (display_height_button), green, bright_green, game_instructions)\n\n # EXIT button\n beginbutton(\"Quit\", (display_width / 4), ((display_height / 4) + (display_height / 4) + (display_height / 4)), (display_width / 2), (display_height_button), red, bright_red, game_quit)\n\n pygame.display.update()\n\n\n# playing state function ---\ndef game_loop():\n clickedButtonDelay()\n #loop for window + window.QUIT\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n #print(event)\n #background color\n gameDisplay.fill(background_color)\n\n # drawgameboard(width, height, towercolors, colors) # here we draw the gameboard with all the default values\n # or\n drawgameboard(display_width, display_height, towercolors, colors, beginwidth, blocksizex, blocksizey, rowsx, rowsy,rowstower) # here we draw the gameboard WITH non-deafult values\n\n #right menu dice\n pygame.draw.rect(gameDisplay, bright_green, (display_x_menu, 0, (display_width_menu),(display_height_menu)))\n # on window color positon (x) menu position (y) menu menu width menu height\n\n # get mouse pos\n mouse = pygame.mouse.get_pos()\n\n # final dice shows latest dice rolled\n final_dice()\n # PAUSE button\n button(\"Pause\", (display_width + 1 - (display_width_menu / 4)), 0, (display_width_menu / 4), 50, white, white, pause_menu)\n\n #Roll Button\n dice_button = dicebutton(\"Roll!\", (display_x_menu), (display_height / 6), (display_width_menu / 2),display_height_button, bright_red, red, dice_roll)\n\n # dit laat de huidige scores zien rechtsonder het scherm\n current_scores(players)\n # dit zorgt ervoor dat als spelers winnen, ze punten krijgen.\n addScore()\n #test score\n if pygame.mouse.get_pressed()[0]:\n player_1.score_increase()\n\n\n # window update\n pygame.display.update()\n clock.tick(15)\n\n# deze functie is een win conditie: als de speler de laatste tegel behaald.\ndef addScore():\n if player_1.startY > ((display_height * 0.95) + 15 * .063):\n player_1.Score += 100\n return player_1.Score\n elif player_2.startY > ((display_height * 0.95) + 15 * .063):\n player_2.Score += 100\n return player_2.Score\n elif player_3.startY > ((display_height * 0.95) + 15 * .063):\n player_3.Score += 100\n return player_3.Score\n elif player_4.startY > ((display_height * 0.95) + 15 * .063):\n player_4.Score += 100\n return player_4.Score\n\n# winnersmenu function ---\ndef winnersmenu_game():\n clickedButtonDelay()\n print(\"Finished game, winnersmenu here\")\n\n#gamestate calling ---\n#start game by state (default 0)\nif gamestate == 0:\n startmenu_loop()\nelif gamestate == 1:\n game_loop()","sub_path":"merging_24-1-2017/game + database functies furkan.py","file_name":"game + database functies furkan.py","file_ext":"py","file_size_in_byte":18363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"328509652","text":"import muda\nfrom muda.deformers import BackgroundNoise\nimport jams\n\nfrom pydub.generators import WhiteNoise\n\n#%%\nduration = 5000\nwn = WhiteNoise().to_audio_segment(duration=duration)\nwn.export('noise.wav', format='wav')\n\n#%%\nj_ori = muda.load_jam_audio(jams.JAMS(), 'original.wav')\ndeformer = BackgroundNoise(files='noise.wav', n_samples=1, weight_max=0.05, weight_min=0.01)\njam_out = deformer.transform(j_ori)\n\nfor i, jam_out in enumerate(jam_out):\n muda.save('output_{:02d}.wav'.format(i),\n 'output_{:02d}.jams'.format(i),\n jam_out)","sub_path":"MudaTest.py","file_name":"MudaTest.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"509769792","text":"from __future__ import print_function\nimport sys, os, atexit\nimport pprint as pp\nfrom json import JSONEncoder, loads, dumps\nfrom flask import request, send_from_directory, make_response, Response, Blueprint\n# from flask.json import json_encoder, json_decoder\nfrom flask import json\nfrom werkzeug import secure_filename, FileStorage\nfrom urllib3.exceptions import MaxRetryError\n# from PDB2PQR_web import app\n# import storage_utils\nfrom . import storage_utils\nfrom io import BytesIO\n\nstorage_app = Blueprint('storage_app', __name__)\n\n''' \n Below is the endpoint to interact with the storage container.\n Ideally, this will run within its own container via main.py\n'''\n\nMINIO_URL = os.environ.get('MINIO_URL', 'localhost:9000')\nMINIO_CACHE_DIR = os.environ.get('STORAGE_CACHE_DIR', '/apbs-rest/.minio_cache')\nMINIO_ACCESS_KEY = os.environ.get('MINIO_ACCESS_KEY')\nMINIO_SECRET_KEY = os.environ.get('MINIO_SECRET_KEY')\nJOB_BUCKET_NAME = os.environ.get('MINIO_JOB_BUCKET', 'jobs')\n\nminioClient = storage_utils.get_minio_client(MINIO_URL, MINIO_ACCESS_KEY, MINIO_SECRET_KEY)\nstorageClient = storage_utils.StorageClient(MINIO_URL, MINIO_CACHE_DIR, MINIO_ACCESS_KEY, MINIO_SECRET_KEY)\natexit.register(storageClient.clear_cache)\n\n@storage_app.route('/', methods=['GET'])\n@storage_app.route('/check', methods=['GET'])\ndef is_Alive():\n return '', 200\n\n@storage_app.route('/api/storage//', methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS'])\n@storage_app.route('/api/storage/', methods=['DELETE'])\ndef storage_service(job_id, file_name=None):\n # def storage_service(job_id, file_name=None):\n \"\"\"Endpoint serving as the gateway to storage bucket\"\"\"\n \n if file_name:\n object_name = os.path.join(job_id, file_name)\n # print('%s %s' % (request.method, object_name))\n\n if request.method == 'GET':\n return_json = False\n if 'json' in request.args.keys():\n if request.args['json'].lower() == 'true':\n return_json = True\n\n if not return_json:\n '''send_file_from_directory'''\n # file_path_in_cache = storageClient.fget_object(JOB_BUCKET_NAME, object_name)\n # file_dir = os.path.dirname(file_path_in_cache)\n # return send_from_directory(file_dir, file_path_in_cache.split('/')[-1])\n\n try:\n file_path_in_cache = storageClient.fget_object(JOB_BUCKET_NAME, object_name)\n file_dir = os.path.dirname(file_path_in_cache)\n return send_from_directory(file_dir, file_path_in_cache.split('/')[-1])\n except MaxRetryError:\n return 'Error in retrieving file\\n', 500\n except:\n return 'File %s does not exist\\n' % file_name, 404\n else:\n try:\n file_str = storageClient.get_object(JOB_BUCKET_NAME, object_name)\n file_str_json = { object_name: file_str.decode('utf-8') }\n # response = make_response(JSONEncoder().encode(file_str_json))\n response = make_response( dumps(file_str_json) )\n response.headers['Content-Type'] = 'application/json'\n http_response_code = 200\n # return response, http_response_code\n except MaxRetryError:\n json_string = {object_name: None}\n response = make_response(dumps(json_string))\n response.headers['Content-Type'] = 'application/json'\n http_response_code = 500\n # return response, 500\n except Exception as e:\n # import traceback\n # json_string = {object_name: None, 'error': str(e), 'traceback': traceback.format_exc()}\n json_string = {object_name: None}\n response = make_response(dumps(json_string))\n response.headers['Content-Type'] = 'application/json'\n http_response_code = 500\n # return response, 500\n finally:\n return response, http_response_code\n\n elif request.method == 'PUT':\n try:\n payload = loads(request.data)\n except:\n payload = request.data\n\n elif request.method == 'POST':\n EXTENSION_WHITELIST = set(['pqr', 'pdb', 'in', 'p'])\n # pp.pprint(dict(request.files))\n # pp.pprint(request.form['job_id'])\n\n # pp.pprint(request.files.keys())\n print('request.files keys:')\n for key in request.files.keys():\n print(' ', key)\n try:\n file_data = request.files['file_data']\n # print(type(file_data), flush=True)\n except:\n # file_data = BytesIO(request.data)\n # print(request.data.decode('utf-8'))\n\n file_data = FileStorage(\n stream=BytesIO(request.data),\n filename=file_name,\n )\n # print(type(file_data))\n\n if file_data.filename:\n file_name = secure_filename(file_data.filename)\n if file_data.filename and file_name:\n storageClient.put_object(JOB_BUCKET_NAME, object_name, file_data)\n # if file_data.filename and allowed_file(file_name, EXTENSION_WHITELIST):\n # # print('uploading to bucket')\n # storageClient.put_object(JOB_BUCKET_NAME, object_name, file_data)\n # elif not allowed_file(file_name, EXTENSION_WHITELIST):\n # return 'Unsupported media type', 415\n\n # time.sleep(1)\n return 'Success', 201\n\n elif request.method == 'DELETE':\n object_list = []\n if file_name is None:\n # get list of objects with prefix\n # for each object, delete from bucket\n job_objects = storageClient.list_objects(JOB_BUCKET_NAME, prefix=job_id+'/')\n for obj in job_objects:\n object_list.append(obj.object_name)\n\n else:\n # delete single object from bucket\n object_list.append(object_name)\n\n storageClient.remove_objects(JOB_BUCKET_NAME, object_list)\n\n return 'Success', 204\n\n elif request.method == 'OPTIONS':\n options = ['GET', 'PUT', 'POST', 'DELETE']\n response = make_response()\n response = storage_utils.get_request_options(response, options)\n http_response_code = 204\n \n return response, http_response_code","sub_path":"src/storage/service/storage_service.py","file_name":"storage_service.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"512733634","text":"class Solution:\n def asteroidCollision(self, asteroids: List[int]) -> List[int]:\n stack = []\n i = 0\n \n # astroid + ile stack'e at\n # değilse collision var mı\n # varsa durum ne\n while i < len(asteroids):\n if asteroids[i] > 0:\n stack.append(asteroids[i])\n else:\n while len(stack) > 0 and stack[-1] > 0 and stack[-1] < abs(asteroids[i]):\n stack.pop()\n \n if len(stack) <= 0 or stack[-1] < 0:\n stack.append(asteroids[i])\n elif stack[-1] == abs(asteroids[i]):\n stack.pop()\n i += 1\n \n return stack\n","sub_path":"asteroidCollision.py","file_name":"asteroidCollision.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"108269562","text":"import os\nimport sys\nimport subprocess as sp\n\nfrom genomepy.plugin import Plugin\nfrom genomepy.utils import mkdir_p, cmd_ok, run_index_cmd\n\nclass Hisat2Plugin(Plugin):\n def after_genome_download(self, genome):\n if not cmd_ok(\"hisat2-build\"):\n return\n\n # Create index dir\n index_dir = genome.props[\"hisat2\"][\"index_dir\"]\n index_name = genome.props[\"hisat2\"][\"index_name\"] \n mkdir_p(index_dir)\n\n # Create index\n cmd = \"hisat2-build {} {}\".format(genome.filename, index_name)\n run_index_cmd(\"hisat2\", cmd)\n \n def get_properties(self, genome):\n props = {\n \"index_dir\": os.path.join(\n os.path.dirname(genome.filename), \"index\", \"hisat2\"\n ),\n \"index_name\": os.path.join(\n os.path.dirname(genome.filename), \"index\", \"hisat2\", genome.name\n ),\n }\n return props\n","sub_path":"genomepy/plugins/hisat2.py","file_name":"hisat2.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"64746175","text":"\"\"\" Field module. \"\"\"\n\n# ISC License\n#\n# Copyright (c) 2020, Paul Wilhelm, M. Sc. \n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nimport numpy as np\nfrom numba import jit, prange, set_num_threads\nfrom magneticalc.Assert_Dialog import Assert_Dialog\nfrom magneticalc.BiotSavart_CUDA import BiotSavart_CUDA\nfrom magneticalc.BiotSavart_JIT import BiotSavart_JIT\nfrom magneticalc.Debug import Debug\nfrom magneticalc.Theme import Theme\n\n\nclass Field:\n \"\"\" Field class. \"\"\"\n\n def __init__(self, backend: int, _type: int, distance_limit: float, length_scale: float):\n \"\"\"\n Initializes an empty field.\n\n @param backend: Backend index (0: JIT; 1: JIT + CUDA)\n @param _type: Field type to display (0: A-field; 1: B-field)\n @param distance_limit: Distance limit (mitigating divisions by zero)\n @param length_scale: Length scale (m)\n \"\"\"\n Debug(self, \": Init\")\n\n self._backend = backend\n self._type = _type\n self._distance_limit = distance_limit\n self._length_scale = length_scale\n\n self._total_limited = None\n self._vectors = None\n\n def is_valid(self) -> bool:\n \"\"\"\n Indicates valid data for display.\n\n @return: True if data is valid for display, False otherwise\n \"\"\"\n return \\\n self._total_limited is not None and \\\n self._vectors is not None\n\n def invalidate(self):\n \"\"\"\n Resets data, hiding from display.\n \"\"\"\n Debug(self, \".invalidate()\", color=(128, 0, 0))\n\n self._total_limited = None\n self._vectors = None\n\n def get_type(self) -> int:\n \"\"\"\n Gets field type.\n\n @return: Field type (0: A-field; 1: B-field)\n \"\"\"\n return self._type\n\n def get_units(self) -> str:\n \"\"\"\n Gets field units.\n\n @return: Field units\n \"\"\"\n return [\n \"Tm\", # A-field: Tesla · meter\n \"T\" # B-field: Tesla\n ][self._type]\n\n def get_vectors(self):\n \"\"\"\n Gets field vectors. (The selected field type determined which field was calculated.)\n\n @return: Ordered list of 3D vectors (field vectors & corresponding sampling volume points have the same indices)\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated field\")\n\n return self._vectors\n\n def get_total_limited(self) -> int:\n \"\"\"\n Gets total number of distance limited points.\n\n @return: Total number of distance limited points\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated field\")\n\n return self._total_limited\n\n # ------------------------------------------------------------------------------------------------------------------\n\n def recalculate(self, wire, sampling_volume, progress_callback, num_cores: int) -> bool:\n \"\"\"\n Recalculates field vectors.\n\n @param wire: Wire\n @param sampling_volume: Sampling volume\n @param progress_callback: Progress callback\n @param num_cores: Number of cores to use for multiprocessing\n @return: True if successful, False if interrupted (CUDA backend currently not interruptable)\n \"\"\"\n\n # Default to JIT backend if CUDA backend is selected but not available\n if self._backend == 1:\n if not BiotSavart_CUDA.is_available():\n Debug(\n self,\n f\".recalculate(): WARNING: CUDA backend not available, defaulting to JIT backend\",\n color=Theme.WarningColor,\n force=True\n )\n self._backend = 0\n\n if self._backend == 0:\n\n # Initialize Biot-Savart JIT backend\n biot_savart = BiotSavart_JIT(\n self._type,\n self._distance_limit,\n self._length_scale,\n wire.get_dc(),\n wire.get_elements(),\n sampling_volume.get_points(),\n sampling_volume.get_permeabilities(),\n progress_callback\n )\n\n # Fetch result using Biot-Savart JIT backend\n set_num_threads(num_cores)\n tup = biot_savart.get_result()\n\n elif self._backend == 1:\n\n # Initialize Biot-Savart CUDA backend\n biot_savart = BiotSavart_CUDA(\n self._type,\n self._distance_limit,\n self._length_scale,\n wire.get_dc(),\n wire.get_elements(),\n sampling_volume.get_points(),\n sampling_volume.get_permeabilities(),\n progress_callback\n )\n\n # Fetch result using Biot-Savart JIT backend\n set_num_threads(num_cores)\n tup = biot_savart.get_result()\n\n else:\n\n Debug(self, f\".recalculate(): No such backend: {self._backend}\", color=Theme.WarningColor, force=True)\n return False\n\n # Handle interrupt\n if tup is None:\n return False\n\n self._total_limited = tup[0]\n self._vectors = tup[1]\n\n # Prints the sampling volume points, current elements and field vectors; may be used for debugging:\n \"\"\"\n def print_array(array): return \"np.array([\" + \",\".join([f\"[{p[0]},{p[1]},{p[2]}]\" for p in array]) + \"])\"\n\n element_centers = [element[0] for element in wire.get_elements()]\n element_directions = [element[1] for element in wire.get_elements()]\n\n import sys\n import numpy\n numpy.set_printoptions(threshold=sys.maxsize)\n\n print(\"sampling_volume_points =\", print_array(sampling_volume.get_points()))\n print(\"element_centers =\", print_array(element_centers))\n print(\"element_directions =\", print_array(element_directions))\n print(\"vectors =\", print_array(self._vectors))\n \"\"\"\n\n return True\n\n # ------------------------------------------------------------------------------------------------------------------\n\n @staticmethod\n @jit(nopython=True, parallel=True)\n def get_arrows(\n sampling_volume_points,\n field_vectors,\n line_pairs,\n head_points,\n arrow_scale: float,\n magnitude_limit: float\n ):\n \"\"\"\n Returns the field arrow parameters needed by L{VispyCanvas}.\n\n @param sampling_volume_points: Sampling volume points\n @param field_vectors: Field vectors\n @param line_pairs: Arrow line pairs (ordered list of arrow start/stop 3D points)\n @param head_points: Arrow head points (ordered list of arrow stop 3D points)\n @param arrow_scale: Arrow scale\n @param magnitude_limit: Magnitude limit (mitigating divisions by zero)\n \"\"\"\n for i in prange(len(field_vectors)):\n\n # Calculate field vector magnitude (mitigating divisions by zero)\n field_vector_length = np.sqrt(\n field_vectors[i][0] ** 2 + field_vectors[i][1] ** 2 + field_vectors[i][2] ** 2\n )\n if field_vector_length < magnitude_limit:\n field_vector_length = magnitude_limit\n\n # Calculate normalized field direction\n field_direction_norm = field_vectors[i] / field_vector_length\n\n # Calculate arrow start & end coordinates\n p_start = sampling_volume_points[i] + field_direction_norm / 2 / 2 * arrow_scale\n p_end = sampling_volume_points[i] - field_direction_norm / 2 / 2 * arrow_scale\n\n # Populate arrow line & head coordinates\n line_pairs[2 * i + 0] = p_start\n line_pairs[2 * i + 1] = p_end\n head_points[i] = p_end\n\n return line_pairs, head_points\n","sub_path":"magneticalc/Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":8507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"40492549","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# @author: Manuel Guenther \n# @date: Wed May 1 11:33:00 CEST 2013\n#\n# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom __future__ import print_function\n\n# import required bob modules\nimport bob.db.atnt\nimport bob.io.base\nimport bob.io.image\nimport bob.ip.base\nimport bob.ip.gabor\nimport bob.measure\n\nimport os, sys\nimport numpy, math\nimport matplotlib\nmatplotlib.use('pdf')\n# enable LaTeX interpreter\nmatplotlib.rc('text', usetex=True)\nmatplotlib.rc('font', family='serif')\nmatplotlib.rc('lines', linewidth = 4)\nfrom matplotlib import pyplot\n\nfrom .utils import atnt_database_directory, load_images\n\n\n# define Gabor wavelet transform class globally since it is reused for all images\ngabor_wavelet_transform = bob.ip.gabor.Transform(k_max = 0.25 * math.pi)\n# pre-allocate Gabor wavelet transform image in the desired size\ntrafo_image = numpy.ndarray((gabor_wavelet_transform.number_of_wavelets, 112, 92), numpy.complex128)\n\ndef extract_feature(image, extractor):\n \"\"\"Extracts the Gabor graphs from the given image\"\"\"\n\n # perform Gabor wavelet transform on the image\n gabor_wavelet_transform.transform(image, trafo_image)\n\n # extract the Gabor graphs from the feature image\n gabor_graph = extractor.extract(trafo_image)\n\n # return the extracted graph\n return gabor_graph\n\n\n# define a certain Gabor jet similarity function that should be used\nSIMILARITY_FUNCTION = bob.ip.gabor.Similarity('PhaseDiffPlusCanberra', gabor_wavelet_transform)\n\ndef main():\n \"\"\"This function will perform Gabor graph comparison test on the AT&T database.\"\"\"\n\n # Check the existence of the AT&T database and download it if not\n # Also check if the AT&T database directory is overwritten by the command line\n image_directory = atnt_database_directory(sys.argv[1] if len(sys.argv) > 1 else None)\n\n # use the bob.db interface to retrieve information about the Database\n db = bob.db.atnt.Database(original_directory = image_directory)\n\n # The protocol does not exist for AT&T, but to be able to exchange the database, we define it here.\n protocol = None\n\n # The group is 'dev', throughout\n group = 'dev'\n\n # The images of the AT&T database are already cropped, so we don't need to specify a face cropper.\n face_cropper = None\n # For other databases you might want to use:\n# face_cropper = bob.ip.base.FaceEyesNorm(crop_size = (80,64), right_eye = (16,15), left_eye = (16,48))\n\n # After image cropping, we apply an image preprocessor\n preprocessor = bob.ip.base.TanTriggs()\n \n # The image resolution of the (cropped) images, which might change with the database\n image_resolution = (112, 92)\n\n #####################################################################\n ### Training\n\n # for Gabor graphs, no training is required.\n print(\"Creating Gabor graph machine\")\n # create a machine that will produce Gabor graphs with inter-node distance (4,4)\n graph_extractor = bob.ip.gabor.Graph(first=(8,6), last=(image_resolution[0]-8, image_resolution[1]-6), step=(4,4))\n\n #####################################################################\n ### extract Gabor graph features for all model and probe images\n\n #####################################################################\n ### extract eigenface features of model and probe images\n\n model_ids = db.model_ids(groups = group)\n print(\"Extracting %d models\" % len(model_ids))\n # generate models for each model ID\n models = {}\n for model_id in model_ids:\n # load enroll images for the current model ID\n enroll_images = load_images(db, db.enroll_files(protocol = protocol, groups = group, model_id = model_id), face_cropper, preprocessor)\n # extract features for all enroll images and store all of them\n models[model_id] = [extract_feature(enroll_image, graph_extractor) for enroll_image in enroll_images]\n\n probe_files = db.probe_files(protocol = protocol, groups = group)\n print(\"Extracting %d probes\" % len(probe_files))\n probe_images = load_images(db, probe_files, face_cropper, preprocessor)\n # extract probe features and store them by probe ID (which is the File.id)\n probes = {}\n for i in range(len(probe_files)):\n probe_id = probe_files[i].id\n probes[probe_id] = extract_feature(probe_images[i], graph_extractor)\n\n #####################################################################\n ### compute scores, we here choose a simple Euclidean distance measure\n positive_scores = []\n negative_scores = []\n\n print(\"Computing scores\")\n\n # iterate through models and probes and compute scores\n model_count = 1\n for model_id, model in models.items():\n # provide status information\n print(\"\\rModel\", model_count, \"of\", len(models), end='')\n sys.stdout.flush()\n model_count += 1\n\n # the client ID that is attached to the model\n model_client_id = db.get_client_id_from_model_id(model_id)\n # get the probe files, which should be compared with this model\n model_probe_files = db.probe_files(protocol = protocol, groups = group, model_id = model_id)\n for probe_file in model_probe_files:\n # get the according probe feature using the File.id of the probe file\n probe_feature = probes[probe_file.id]\n # compute local scores for each model gabor jet and each probe jet\n score = 0.\n for gabor_jet_index in range(len(probe_feature)):\n scores = []\n # compute the similarity to all model jets\n for model_feature_index in range(len(model)):\n scores.append(SIMILARITY_FUNCTION(model[model_feature_index][gabor_jet_index], probe_feature[gabor_jet_index]))\n # .. and take the most similar one\n score += max(scores)\n # the final score is computed as the average over all positions, taking the most similar model jet\n score /= len(probe_feature)\n\n # check if this is a positive score\n if model_client_id == probe_file.client_id:\n positive_scores.append(score)\n else:\n negative_scores.append(score)\n\n print(\"\\nEvaluation\")\n # convert list of scores to numpy arrays\n positives = numpy.array(positive_scores)\n negatives = numpy.array(negative_scores)\n\n # compute equal error rate\n threshold = bob.measure.eer_threshold(negatives, positives)\n FAR, FRR = bob.measure.farfrr(negatives, positives, threshold)\n\n print(\"Result: FAR\", FAR, \"and FRR\", FRR, \"at threshold\", threshold)\n\n # plot ROC curve\n bob.measure.plot.roc(negatives, positives, CAR=True)\n pyplot.xlabel(\"False Acceptance Rate (\\%)\")\n pyplot.ylabel(\"Correct Acceptance Rate (\\%)\")\n pyplot.title(\"ROC Curve for Gabor phase based AT\\&T Verification Experiment\")\n pyplot.grid()\n pyplot.axis([0.1, 100, 0, 100]) #xmin, xmax, ymin, ymax\n\n # save plot to file\n pyplot.savefig(\"gabor_graph.pdf\")\n print(\"Saved figure 'gabor_graph.pdf'\")\n\n # show ROC curve.\n # enable it if you like. This will open a window and display the ROC curve\n# pyplot.show()\n\n","sub_path":"bob/example/faceverify/gabor_graph.py","file_name":"gabor_graph.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"16272779","text":"'''\njson_to_csv.py\n'''\n\nimport json\nimport io\nimport csv\n\nfrom newdoc2vec.settings import DATASET_LOCATION, DATASET_NAME\n\n#Settings\nBASE_LOC = DATASET_LOCATION+'/json/'+DATASET_NAME+'/'\nOUT_LOC = DATASET_LOCATION\n# OUT_LOC = '../'+DATASET_LOCATION\n\nREPORT_FILE = 'reports'\nREPORT_ATTRBS = ['opening','current_status','current_resolution']\nATTRBS = ['product','component','severity','op_sys','short_desc','resolution']\n\nT_ATTRBS = REPORT_ATTRBS+['resolved_at','resolution_time']+ATTRBS\nN_AN_ATTRBS = ['short_desc','resolution']\n\n\n\ndata = {}\nanom_atrb_map = {}\n\nnorm_file = open(OUT_LOC+'normal.csv','w',newline='',encoding='utf-8')\nanom_file = open(OUT_LOC+'anom.csv','w',newline='',encoding='utf-8')\ncorr_anom_file = open(OUT_LOC+'corr_anom.csv','w',newline='',encoding='utf-8') #correct values for anomalour\n\nnorm_writer = csv.DictWriter(norm_file,quoting=csv.QUOTE_ALL, fieldnames=T_ATTRBS)\nanom_writer = csv.DictWriter(anom_file,quoting=csv.QUOTE_ALL, fieldnames=T_ATTRBS)\ncorr_anom_writer = csv.DictWriter(corr_anom_file,quoting=csv.QUOTE_ALL, fieldnames=T_ATTRBS)\n\nnorm_writer.writeheader()\nanom_writer.writeheader()\ncorr_anom_writer.writeheader()\n\n## Helpers ##\ndef save_bug(bug_id,anomaly):\n '''\n Convert, Filter & Save Bug-id\n :param bug_id:\n :param anomaly:\n :return: Nrows written if bug is saved else False\n '''\n\n row = {}\n corr_row = {}\n for atrb in ATTRBS:\n if anomaly and atrb not in N_AN_ATTRBS:\n row[atrb] = data[atrb][bug_id][0]['what']\n else:\n row[atrb] = data[atrb][bug_id][-1]['what']\n corr_row[atrb] = data[atrb][bug_id][-1]['what']\n\n for atrb in REPORT_ATTRBS:\n corr_row[atrb] = row[atrb] = data[REPORT_FILE][bug_id][atrb]\n corr_row['resolved_at'] = row['resolved_at'] = data['resolution'][bug_id][-1]['when']\n corr_row['resolution_time']= row['resolution_time'] = row['resolved_at'] - row['opening'] if row['resolved_at'] else None\n\n #Filter & Convert rows\n row = filt_conv_component(conv_op_sys_other(filt_current_status(filt_not_eclipse(row))))\n corr_row = filt_conv_component(conv_op_sys_other(filt_current_status(filt_not_eclipse(corr_row))))\n if not row or not corr_row:\n return False\n\n if anomaly:\n corr_anom_writer.writerow(corr_row)\n return anom_writer.writerow(row)\n return norm_writer.writerow(row)\n\n## Convert ##\ndef conv_op_sys_other(row):\n if not row:\n return False\n CONV_OP_SYS = ['windows 95','linux QT','symbianos s60',\n 'windows server 2008','windows mobile 5.0',\n 'windows mobile 2003','windows me',\n 'Other']\n if row['op_sys'].lower() in CONV_OP_SYS:\n row['op_sys'] = 'other'\n return row\n\ndef filt_conv_component(row):\n if not row:\n return False\n if row['component'] == 'PMC':\n return False\n if row['component'] == 'Incubator':\n row['component'] = 'Incubators'\n return row\n\n## Filters ##\ndef filt_current_status(row):\n if not row:\n return False\n if row['current_status'] == 'NEW':\n return False\n return row\n\ndef filt_not_eclipse(row):\n if not row:\n return False\n if row['current_resolution'] == 'NOT_ECLIPSE':\n return False\n return row\n\n## Main ##\n#Populating Bug Details\nwith io.open(BASE_LOC+REPORT_FILE+'.json','r',encoding='utf-8') as data_file:\n data[REPORT_FILE] = json.load(data_file)[REPORT_FILE]\n data_file.close()\n\n#Populating Attributes\nfor atrb in ATTRBS:\n with io.open(BASE_LOC+atrb+'.json','r',encoding='utf-8') as data_file:\n data[atrb] = json.load(data_file)[atrb]\n data_file.close()\n\n#Saving Data\nn_bugs = 0\nanom_bugs = 0\nfor bug_id in data['short_desc'].keys():\n anomaly = False\n for atrb in ATTRBS:\n if atrb not in N_AN_ATTRBS and len(data[atrb][bug_id]) > 1:\n anomaly = True\n break\n\n if save_bug(bug_id, anomaly):\n n_bugs += 1\n if anomaly:\n anom_bugs += 1\n\nprint(\"Total Bugs\", n_bugs)\nprint(\"Anom Bugs\",anom_bugs)\nnorm_file.close()\nanom_file.close()\ncorr_anom_file.close()\n","sub_path":"tools/json_to_csv.py","file_name":"json_to_csv.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"402645434","text":"import argparse\nfrom nilearn import surface\nimport pickle as pkl\nimport numpy as np\nimport os.path as op\nimport os\nimport pandas as pd\nfrom scipy import interpolate\nfrom itertools import product\nfrom tqdm import tqdm\nfrom skimage.filters import gabor_kernel\nfrom skimage.io import imsave\nimport scipy.ndimage as ndi\nimport sharedmem\n\n\ndef main(derivatives,\n subject,\n session):\n\n grid_resolution = .35\n max_frequency = 1.\n min_frequency = 1/10.\n n_frequencies = 50\n n_orientations = 16\n scale_factor = (min_frequency/max_frequency)**(-1/(n_frequencies-1))\n frequencies_mm = scale_factor**-np.arange(0, n_frequencies) * max_frequency\n orientations = np.linspace(0, np.pi, n_orientations, endpoint=False)\n\n frequencies_pix = frequencies_mm * grid_resolution\n\n for hemi in ['lh', 'rh']:\n\n xy = pd.read_pickle(op.join(derivatives, 'coordinate_patches', 'sub-{subject}',\n 'anat', 'sub-{subject}_hemi-{hemi}_coordinatepatch.pkl').format(**locals()))\n\n depths = np.round(np.linspace(0, 1, 8), 3)[1:-1]\n\n pb = tqdm(total=len(frequencies_pix) * len(orientations) * len(depths))\n\n mask_name = 'V1'+hemi[:1]\n\n results = []\n\n for depth in depths:\n zmap = op.join(derivatives, 'sampled_giis', 'sub-{subject}', 'ses-{session}',\n 'func',\n 'sub-{subject}_ses-{session}_left_over_right_desc-zmap-depth-{depth}_hemi-{hemi}.gii'). \\\n format(**locals())\n zmap_v = surface.load_surf_data(zmap)\n zmap = pd.DataFrame(zmap_v, columns=['z_value'])\n df = zmap.merge(xy, left_index=True, right_index=True)\n df = df.loc[(df.z == 0) & df[mask_name]]\n\n x_grid, y_grid = np.meshgrid(np.arange(df['x'].min(), df['x'].max(), grid_resolution),\n np.arange(df['y'].min(), df['y'].max(), grid_resolution))\n\n\n data = interpolate.griddata(df[['x', 'y']],\n df['z_value'],\n np.vstack((x_grid.ravel(), y_grid.ravel())).T,\n fill_value=0,\n method='linear').reshape(x_grid.shape)\n\n results_dir = op.join(derivatives, 'zmap_spatfreq',\n 'sub-{subject}',\n 'ses-{session}',\n 'func').format(**locals())\n\n if not op.exists(results_dir):\n os.makedirs(results_dir)\n\n print('Writing zmap...')\n imsave(op.join(results_dir, f'sub-{subject}_ses-{session}_hemi-{hemi}_depth-{depth}_desc-zmap2d_image.png'), data)\n\n pars = [(freq, ori) for freq, ori in product(frequencies_pix, orientations)]\n\n n_jobs = 16\n\n with sharedmem.MapReduce(np=n_jobs) as pool:\n def reduce(r):\n pb.update()\n return r\n\n def get_power(pars):\n freq, ori = pars\n kernel = gabor_kernel(freq, ori, n_stds=3)\n filtered_real = ndi.convolve(data, np.real(kernel), mode='wrap')\n filtered_imag = ndi.convolve(data, np.imag(kernel), mode='wrap')\n power = np.sqrt(filtered_real**2 + filtered_imag**2)\n power[data == 0] = np.nan\n\n power = pd.DataFrame([power.ravel()],\n index=pd.MultiIndex.from_tuples([(depth, freq/grid_resolution, ori)], names=['depth', 'frequency', 'orientation']))\n\n return power\n\n results += pool.map(get_power, pars, reduce)\n\n results = pd.concat(results, axis=0)\n\n results_vertex = interpolate.griddata(np.vstack((x_grid.ravel(),\n y_grid.ravel())).T,\n results.T,\n df[['x', 'y']])\n\n results_vertex = pd.DataFrame(results_vertex.T,\n index=results.index,\n columns=df.index)\n results_vertex.loc[:, df[df['z_value'] == 0].index] = np.nan\n\n\n results_vertex.to_pickle(op.join(results_dir,\n 'sub-{subject}_ses-{session}_hemi-{hemi}_energies.pkl').format(**locals()))\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"subject\", \n type=str,\n help=\"subject to process\")\n parser.add_argument(\"session\", \n type=str,\n help=\"subject to process\")\n parser.add_argument(\"--derivatives\", \n type=str,\n default='/derivatives',\n help=\"Folder where derivatives reside\")\n args = parser.parse_args()\n\n main(derivatives=args.derivatives,\n subject=args.subject,\n session=args.session)\n","sub_path":"analysis/surface_convolutions/gabor_filtering.py","file_name":"gabor_filtering.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"290924609","text":"from decimal import Decimal\n\nfrom cryptofeed.defines import BUY, LIQUIDATIONS, SELL, TRADES\nfrom cryptofeed.exchanges import Deribit\nfrom cryptofeed.standards import timestamp_normalize\n\n\nclass DeribitBlotter(Deribit):\n async def _trade(self, msg: dict, timestamp: float):\n \"\"\"\n {\n \"params\":\n {\n \"data\":\n [\n {\n \"trade_seq\": 933,\n \"trade_id\": \"9178\",\n \"timestamp\": 1550736299753,\n \"tick_direction\": 3,\n \"price\": 3948.69,\n \"instrument_name\": \"BTC-PERPETUAL\",\n \"index_price\": 3930.73,\n \"direction\": \"sell\",\n \"amount\": 10\n }\n ],\n \"channel\": \"trades.BTC-PERPETUAL.raw\"\n },\n \"method\": \"subscription\",\n \"jsonrpc\": \"2.0\"\n }\n \"\"\"\n for trade in msg[\"params\"][\"data\"]:\n price = Decimal(trade[\"price\"])\n volume = Decimal(trade[\"amount\"])\n notional = volume / price\n await self.callback(\n TRADES,\n feed=self.id,\n uid=trade[\"trade_id\"],\n symbol=trade[\"instrument_name\"], # Do not normalize\n timestamp=timestamp_normalize(self.id, trade[\"timestamp\"]),\n price=price,\n volume=volume,\n notional=notional,\n tickRule=1 if trade[\"direction\"] == \"buy\" else -1,\n )\n if \"liquidation\" in trade:\n await self.callback(\n LIQUIDATIONS,\n feed=self.id,\n symbol=trade[\"instrument_name\"],\n side=BUY if trade[\"direction\"] == \"buy\" else SELL,\n leaves_qty=Decimal(trade[\"amount\"]),\n price=Decimal(trade[\"price\"]),\n order_id=trade[\"trade_id\"],\n timestamp=timestamp_normalize(self.id, trade[\"timestamp\"]),\n receipt_timestamp=timestamp,\n )\n","sub_path":"cryptoblotter/exchanges/deribit.py","file_name":"deribit.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"17403823","text":"#!/usr/bin/env python\n\n# Advent of Code 2019 - Day 1 - Puzzle 1\n\ndef calculate_fuel(mass):\n divthree = mass/3\n return divthree - 2\n\ntotal_fuel = 0\nwith open('p1_input.txt', 'r') as fin:\n for line in fin:\n total_fuel += calculate_fuel(int(line))\n\nprint(\"Total Fuel: {}\".format(total_fuel))\n","sub_path":"Day1/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"202928381","text":"#\n# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed \n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either \n# express or implied. See the License for the specific language governing \n# permissions and limitations under the License.\n#\n\n#\n# Data Protection\n# kms_cmk_rotation_enabled\n#\n\nimport json\nimport boto3\nimport sys\nimport time\nfrom datetime import datetime\n\nSTS_SESSION = ''\n\ndef get_sts_session(event, region_name=False):\n sts = boto3.client(\"sts\")\n RoleArn = event[\"executionRoleArn\"]\n if not region_name:\n region_name = event['configRuleArn'].split(\":\")[3]\n response = sts.assume_role(\n RoleArn=RoleArn,\n RoleSessionName='ComplianceAudit',\n DurationSeconds=900)\n sts_session = boto3.Session(\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken'],\n region_name=region_name,\n botocore_session=None,\n profile_name=None)\n return sts_session\n\ndef kms_cmk_rotation_enabled(event):\n configuration_item = {}\n\n regions = STS_SESSION.client(\"ec2\").describe_regions()['Regions']\n for region in regions:\n region_session = get_sts_session(event, region['RegionName'])\n kms_client = region_session.client('kms')\n keys = kms_client.list_keys()\n if len(keys['Keys']) == 0:\n continue\n else:\n for key in keys['Keys']:\n eval = {}\n eval[\"ComplianceResourceType\"] = \"AWS::KMS::Key\"\n eval[\"ComplianceResourceId\"] = key['KeyArn']\n if kms_client.describe_key(KeyId=key['KeyId'])[\"KeyMetadata\"][\"KeyManager\"] == \"AWS\":\n continue\n if kms_client.get_key_rotation_status(KeyId=key['KeyId'])['KeyRotationEnabled'] == True:\n response = {\n \"ComplianceType\": \"COMPLIANT\",\n \"Annotation\": \"The yearly rotation is activated for this key.\"\n }\n else:\n response = {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The yearly rotation is not activated for this key.\"\n }\n eval[\"ComplianceType\"]=response[\"ComplianceType\"]\n eval[\"Annotation\"]=response[\"Annotation\"]\n eval[\"OrderingTimestamp\"]=json.loads(event[\"invokingEvent\"])['notificationCreationTime']\n put_eval(eval, result_token) \n\ndef put_eval(eval, token):\n config = STS_SESSION.client(\"config\")\n config.put_evaluations(\n Evaluations=[\n {\n \"ComplianceResourceType\": eval[\"ComplianceResourceType\"],\n \"ComplianceResourceId\": eval[\"ComplianceResourceId\"],\n \"ComplianceType\": eval[\"ComplianceType\"],\n \"Annotation\": eval[\"Annotation\"],\n \"OrderingTimestamp\": eval[\"OrderingTimestamp\"]\n },\n ],\n ResultToken=token\n )\n \n# This is the handler that's invoked by Lambda\ndef lambda_handler(event, context):\n global STS_SESSION\n global result_token\n if \"resultToken\" in event:\n result_token = event[\"resultToken\"]\n\n rule_parameters={}\n if 'ruleParameters' in event:\n rule_parameters = json.loads(event['ruleParameters'])\n\n STS_SESSION = get_sts_session(event)\n \n kms_cmk_rotation_enabled(event)\n","sub_path":"compliance-account-rulesets-setup/rule-code/KMS_CMK_ROTATION_ENABLED.py","file_name":"KMS_CMK_ROTATION_ENABLED.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"487589164","text":"import os\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport argparse\nimport re\n\ndef argument_choice():\n \"\"\"\n Function dedicated to being able to take the input from the user, whether be from the command line or from the python program directly.\n \"\"\"\n parse = argparse.ArgumentParser(description=\"Script that allows a user to download all the possible chapters in a manga from Mangadex.org.\")\n parse.add_argument('url', help=\"Enter the Mangadex URL\", nargs=\"?\", const=None, default=None)\n arguments = parse.parse_args()\n\n if arguments.url != None: #If the input is from command line\n answer = arguments.url\n else: #If the input is via python itself\n py_input = input(\"Enter the full URL from Mangadex: \")\n answer = py_input\n \n if re.match(r\"(https:)//(mangadex).(com|org)/(chapter)/(\\d{6})/(\\d)\", answer) is not None: #There is a valid URL\n return answer\n else:\n print(\"The page is not valid for scraping, please make sure the text is from mangadex's URL.\")\n try:\n input(\"Press Enter to continue\")\n except SyntaxError:\n pass\n exit()\n\ndef browser_shutdown(web_browser): # Closing Everything\n web_browser.close()\n web_browser.quit()\n\nmangadex_url = argument_choice() # URL source for the manga \n\ndriver = webdriver.Firefox() # Utilizing Firefox as the main driver\ndriver.get(mangadex_url) # Opening the URL of the manga\nstart_time = time.perf_counter() # Timer to find out how long this takes\n\n# Fixing the settings to make it Long Scroll\ndriver.find_element_by_id('settings-button').click() # Goes to the Settings\ndriver.find_element_by_xpath('/html/body/div[1]/div[4]/div/div/div[2]/div[1]/div[4]/div/div/button[3]').click() # Clicks the Long Page\ndriver.find_element_by_xpath('/html/body/div[1]/div[4]/div/div/div[1]/button').click() # Closes the settings\n\nchapter_num = 1\nmulti_page_flag = True\n\nwhile multi_page_flag == True:\n try:\n time.sleep(2)\n \n # Page Loading and Jumping\n total_manga_pages = int(driver.find_element_by_class_name('total-pages').text) # Get the total amount of pages.\n current_manga_pages = int(driver.find_element_by_class_name('current-page').text) # Get the current page from the jump\n while current_manga_pages < total_manga_pages: # Keeps scrolling down till it satisfies the condition\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight)\")\n time.sleep(1.5)\n current_manga_pages = int(driver.find_element_by_class_name('current-page').text) # Get the current page from the jump\n print(f\"Loaded {current_manga_pages} pages out of {total_manga_pages}.\\n\")\n time.sleep(1)\n\n # The Beautiful Soup Scraping Section\n html_source = driver.page_source\n soup = BeautifulSoup(html_source, 'html.parser')\n\n # Laddering Down Pt 1: For the images\n body = soup.body\n content = body.find('div', id='content')\n main_row = content.find('div', class_='reader-main col row no-gutters flex-column flex-nowrap noselect')\n manga_scraping_zone = main_row.find('div', class_='reader-images col-auto row no-gutters flex-nowrap m-auto text-center cursor-pointer directional')\n\n # Laddering Down Pt 2: For the title\n content_2 = content.find('div', class_='container reader-controls-container p-0')\n content_3 = content_2.find('div', class_='reader-controls-wrapper bg-reader-controls row no-gutters flex-nowrap')\n content_4 = content_3.find('div', class_='reader-controls col row no-gutters flex-column flex-nowrap')\n content_5 = content_4.find('div', class_='reader-controls-title col-auto text-center p-2')\n content_6 = content_5.find('div', style='font-size:1.25em')\n title = content_6.find('a', class_='manga-link')['title']\n print(title)\n\n # Folder Creating for the Images to be saved\n character_source = (title)\n try:\n os.mkdir(character_source) # Makes the directory of the title, the directory is placed where the script was exectuted.\n if chapter_num == 1:\n print(\"Directory \" , character_source , \" has been created.\\n\")\n except FileExistsError:\n if chapter_num == 1:\n print(\"Directory \" , character_source , \" already exists.\\n\")\n\n try:\n os.mkdir(character_source + f\"\\\\Chapter_{chapter_num}\")\n print(f\"Chapter {chapter_num} has been created.\\n\")\n except FileExistsError:\n print(f\"Chapter {chapter_num} already exists.\\n\")\n\n for manga_images in manga_scraping_zone.find_all('div', class_='reader-image-wrapper col-auto my-auto justify-content-center align-items-center noselect nodrag row no-gutters'):\n x = manga_images.find('img', class_='noselect nodrag cursor-pointer')['src']\n print(x)\n filename = x.split('/')\n if os.path.exists(f\"{character_source}\\\\Chapter_{chapter_num}\\\\\" + filename[5]):\n print(\"File already Exist.\\n\")\n else:\n # The downloading segment.\n print(\"Downloading...\\n\")\n second_request = requests.get(x)\n with open(f\"{character_source}\\\\Chapter_{chapter_num}\\\\\" + filename[5], 'wb') as f:\n f.write(second_request.content)\n\n # Find if there are multiple pages for the manga\n driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div[2]/div[2]/a[2]').click()\n multi_page_flag = True\n chapter_num += 1\n except:\n if chapter_num == 1:\n print(\"An error occurred while attempting to access page, try again later or in another page\")\n multi_page_flag = False\n \n\n# End of the line.\nbrowser_shutdown(driver) # Closes the browser completely.\nend_time = time.perf_counter()\nif chapter_num != 1:\n print(f\"All possible images have successfully been downloaded in {round(end_time-start_time, 2)} seconds.\")\nend = input(\"Press Enter to end the program \\n\")","sub_path":"Mangadex Image Scrapper Test (Full Manga).py","file_name":"Mangadex Image Scrapper Test (Full Manga).py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"407581421","text":"#!/usr/bin/env python3\n\n#-----------------------------------\n# @nagayosi 2018.2.11\n#\n# how to use: python3 main.py --train --iter 1000 --test\n# if you only train: python3 main.py --train --iter 1000\n# if you only test: python3 main.py --test\n#-----------------------------------\n\nimport chainer\nfrom chainer.dataset import convert\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer.links import Convolution2D as Conv2D, Deconvolution2D as Deconv2D\n\nimport glob, argparse, random, os, time\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport _pickle as pickle\n\n\n## Training data directory path\nHOME = os.path.expanduser('~') + '/'\nTrain_file = 'pokemon_train.txt'\npokemon_file = 'pokemon.txt'\nwaza_file = 'pokemon_waza.txt'\nseikaku_file = 'pokemon_seikaku.txt'\n\n\n## Config containing hyper-parameters\ncf = {\n 'Iteration': 1000,\n 'Minibatch': 10,\n 'LearningRate': 0.01,\n 'WeightDecay':0.0005,\n 'FineTuning': False,\n 'SaveModel': 'MyNet.npz'\n}\n\n## Network model\nclass MyNet(chainer.Chain):\n def __init__(self):\n super(MyNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, 1024, nobias=True)\n self.fc2 = L.Linear(None, 1024, nobias=True)\n self.out_doryokuchi = L.Linear(None, 6, nobias=True)\n self.out_seikaku = L.Linear(None, 25, nobias=True)\n self.out_waza = L.Linear(None, 668, nobias=True)\n\n def acti(self, x):\n return F.tanh(x)\n \n def __call__(self, x):\n fc1 = self.acti(self.fc1(x))\n fc2 = self.acti(self.fc2(fc1))\n out1 = F.sigmoid(self.out_doryokuchi(fc2))\n out2 = self.out_seikaku(fc2)\n out3 = F.sigmoid(self.out_waza(fc2))\n return out1, out2, out3\n\n\n## Image Load function\ndef load_data(shuffle=True):\n\n datas = [x.strip() for x in open(Train_file, 'r').readlines()]\n pokemon = [x.strip() for x in open(pokemon_file, 'r').readlines()]\n waza = [x.strip() for x in open(waza_file, 'r').readlines()]\n seikaku = [x.strip() for x in open(seikaku_file, 'r').readlines()]\n \n\n for i, line in enumerate(datas):\n item = line.split(',')\n if len(item) < 12:\n continue\n\n name = np.zeros(len(pokemon), dtype=np.float32)\n name[pokemon.index(item[0])] = 1.\n d = np.array(list(map(int, item[1:7]))) / 4. / 63.\n s = seikaku.index(item[7])\n w = np.zeros(len(waza), dtype=np.float32)\n for j in item[8:]:\n w[waza.index(j)] = 1.\n \n x = np.array((name)).astype(np.float32)\n t = np.hstack((d, s, w)).astype(np.float32)\n\n if i == 0:\n data1 = x\n data2 = t\n else:\n data1 = np.vstack((data1, x))\n data2 = np.vstack((data2, t))\n\n if shuffle: \n inds = np.arange(len(datas))\n random.shuffle(inds)\n data1 = data1[inds]\n data2 = data2[inds]\n \n data = [data1, data2]\n\n return data\n\n\n## Fine-tuning function\ndef copy_model(src, dst):\n assert isinstance(src, chainer.Chain)\n assert isinstance(dst, chainer.Chain)\n for child in src.children():\n if child.name not in dst.__dict__: continue\n dst_child = dst[child.name]\n if type(child) != type(dst_child): continue\n if isinstance(child, chainer.Chain):\n copy_model(child, dst_child)\n if isinstance(child, chainer.Link):\n match = True\n for a, b in zip(child.namedparams(), dst_child.namedparams()):\n if a[0] != b[0]:\n match = False\n break\n if a[1].data.shape != b[1].data.shape:\n match = False\n break\n if not match:\n print('Ignore %s because of parameter mismatch' % child.name)\n continue\n for a, b in zip(child.namedparams(), dst_child.namedparams()):\n b[1].data = a[1].data\n print('Copy %s' % child.name)\n\n\ndef get_batch(data, batch, last):\n\n ins, gts = data\n\n data_num = len(ins)\n ind = last + batch\n\n if ind < data_num:\n in_data = ins[last : ind]\n gt = gts[last : ind]\n last = ind\n else:\n resi = ind - data_num\n in1, gt1 = ins[last:], gts[last:]\n\n inds = np.arange(len(ins))\n random.shuffle(inds)\n ins = ins[inds]\n gts = gts[inds]\n data = [ins, gts]\n\n in2, gt2 = ins[:resi], gts[:resi]\n in_data = np.vstack((in1, in2))\n gt = np.vstack((gt1, gt2))\n last = resi\n\n return in_data, gt, last, data\n\n\ndef parse(data):\n d = data[:, :6].astype(np.float32)\n s = data[:, 6].astype(np.int32)\n w = data[:, 7:].astype(np.float32)\n return d, s, w\n\n\n## Train function\ndef main_train(args):\n\n ## Prepare Images\n train = load_data()\n test = load_data()\n\n if len(train) < 1 or len(test) < 1:\n raise Exception('train num : {}, test num: {}'.format(len(train), len(test)))\n \n train_count = len(train)\n test_count = len(test)\n\n print('# train images: {}'.format(train_count))\n print('# test images: {}'.format(test_count))\n\n \n ## Prepare Network\n model = MyNet()\n if args.gpu_id >=0:\n model.to_gpu()\n\n if cf['FineTuning']:\n orig = pickle.load(open(\"../bvlc_alexnet.pkl\", \"rb\"))\n copy_model(orig, model)\n #serializers.load_npz(\"result/mynet_epoch_100.model\", model)\n\n ## Prepare Optimizer\n optimizer = chainer.optimizers.MomentumSGD(cf['LearningRate'])\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(cf['WeightDecay']))\n\n\n ## Training start!!\n sum_accuracy_train = 0\n sum_loss_train = 0\n start = time.time()\n \n print('epoch train_loss train_accuracy test_loss test_accuracy Elapsed-Time')\n \n last = 0\n\n for i in range(cf['Iteration']):\n i += 1\n\n x, y, last, train = get_batch(train, cf['Minibatch'], last)\n d, s, w = parse(y)\n\n train_losses = []\n train_accuracies = []\n\n if args.gpu_id >= 0:\n x = chainer.cuda.to_gpu(x)\n d = chainer.cuda.to_gpu(d)\n s = chainer.cuda.to_gpu(s)\n w = chainer.cuda.to_gpu(w)\n\n x = chainer.Variable(x)\n t_d = chainer.Variable(d)\n t_s = chainer.Variable(s)\n t_w = chainer.Variable(w)\n\n y_d, y_s, y_w = model(x)\n\n loss_train1 = F.mean_squared_error(y_d, t_d)\n loss_train2 = F.softmax_cross_entropy(y_s, t_s)\n loss_train3 = F.mean_squared_error(y_w, t_w)\n\n model.cleargrads()\n loss_train1.backward()\n loss_train2.backward()\n loss_train3.backward()\n optimizer.update()\n\n train_losses.append(chainer.cuda.to_cpu(loss_train1.data))\n #acurracy_train.to_cpu()\n #train_accuracies.append(accuracy_train.data)\n train_accuracies.append(chainer.cuda.to_cpu(loss_train1.data))\n \n #sum_loss_train += float(model.loss.data) * len(t.data)\n #sum_accuracy_train += float(model.accuracy.data) * len(t.data)\n\n \"\"\"\n if train_iter.is_new_epoch and train_iter.epoch % 20 == 0:\n #print('epoch: ', train_iter.epoch)\n #print('train mean loss: {:.2f}, accuracy: {:.2f}'.format( sum_loss_train / train_count, sum_accuracy_train / train_count))\n # evaluation\n\n test_losses = []\n test_accuracies = []\n\n sum_accuracy_test = 0\n sum_loss_test = 0\n \n #model.predictor.train = False\n for batch in test_iter:\n x_array, t_array = convert.concat_examples(batch,args.gpu_id)\n x = chainer.Variable(x_array)\n t = chainer.Variable(t_array)\n\n y = model(x)\n\n loss_test = F.mean_squared_error(y, t)\n #accuracy_test = F.accuracy(y, t)\n \n test_losses.append(chainer.cuda.to_cpu(loss_test.data))\n #accuracy_test.to_cpu()\n #test_accuracies.append(accuracy_test.data)\n test_accuracies.append(chainer.cuda.to_cpu(loss_test.data))\n\n test_iter.reset()\n #model.predictor.train = True\n #print('test mean loss: {:.2f}, accuracy: {:.2f}'.format( sum_loss_test / test_count, sum_accuracy_test / test_count))\n \n print('{:>5} {:^10.4f} {:^14.4f} {:^9.4f} {:^13.4f} {:^12.2f}'.format(train_iter.epoch, np.mean(train_losses), np.mean(train_accuracies), np.mean(test_losses), np.mean(test_accuracies), time.time()-start))\n \"\"\"\n\n print('{:>5} {:^10.4f}'.format(i, np.mean(train_losses)))\n\n\n # Save the model and the optimizer\n print('\\nsave the model --> {}'.format(cf['SaveModel']) )\n chainer.serializers.save_npz(cf['SaveModel'], model)\n model_name = cf['SaveModel'].split('.')[-2]\n print('save the optimizer --> {}'.format(model_name + '.state'))\n chainer.serializers.save_npz(model_name + '.state', optimizer)\n print()\n\n## Test function \ndef main_test(args):\n \n ## Prepare Network\n model = MyNet()\n chainer.serializers.load_npz(cf['SaveModel'], model)\n\n if args.gpu_id >= 0:\n model.to_gpu()\n\n ## Test data\n td = ['ガブリアス', 'ボーマンダ']\n\n pokemon = [x.strip() for x in open(pokemon_file, 'r').readlines()]\n waza = [x.strip() for x in open(waza_file, 'r').readlines()]\n seikaku = [x.strip() for x in open(seikaku_file, 'r').readlines()]\n\n\n ## Test start!!\n print('-- test --')\n \n for i in td:\n x = np.zeros(len(pokemon), dtype=np.float32)\n x[pokemon.index(i)] = 1.\n \n # Reshape 1-dimention to [minibatch, data]\n x = x[None, ...]\n \n if args.gpu_id >= 0:\n x = chainer.cuda.to_gpu(x, 0)\n\n y_d = model(x)[0].data[0]\n y_s = model(x)[1].data[0]\n y_w = model(x)[2].data[0]\n\n if args.gpu_id >= 0:\n y_d = chainer.cuda.to_cpu(y_d)\n y_s = chainer.cuda.to_cpu(y_s)\n y_w = chainer.cuda.to_cpu(y_w)\n \n print()\n print(i)\n\n ## Doryokuchi\n d_sum = y_d.sum()\n d = np.round(y_d /d_sum * 126.).astype(np.int) * 4\n\n print(' H | A | B | C | D | S |')\n print('{:>4d}|{:>4d}|{:>4d}|{:>4d}|{:>4d}|{:>4d}|'.format(d[0], d[1], d[2], d[3], d[4], d[5]))\n\n ## Seikaku\n print('seikaku: {}'.format(seikaku[y_s.argmax()]))\n\n ## Waza\n for j, w in enumerate(y_w.argsort()[::-1].astype(int)[:4]):\n j += 1\n print('waza{} : {}'.format(j, waza[w]))\n #print(waza[w] for w in y_w[0].argsort()[::-1].astype(int)[:4])\n\n\n\ndef print_config(args):\n print('-- config parameters --')\n print('GPU ID : {}'.format(args.gpu_id))\n print('Train file : {}'.format(Train_file))\n print('Train pokemon file : {}'.format(pokemon_file))\n print('Train seikaku file : {}'.format(seikaku_file))\n print('Train waza file : {}'.format(waza_file))\n \n for k, v in cf.items():\n print('{} : {}'.format(k, v))\n print('----\\n')\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Faster R-CNN demo')\n parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',default=-1, type=int)\n parser.add_argument('--cpu', dest='cpu_mode',help='Use CPU (overrides --gpu)',action='store_true')\n parser.add_argument('--train', dest='train', help='train', action='store_true')\n parser.add_argument('--test', dest='test', help='test', action='store_true')\n args = parser.parse_args()\n return args\n\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n print_config(args)\n\n if args.gpu_id >= 0:\n chainer.cuda.get_device(args.gpu_id).use()\n \n if args.train:\n main_train(args)\n if args.test:\n main_test(args)\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"636808707","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom crawlers.tickers import getTickers\nfrom crawlers.info import getInfo\n\ndef getFundamentus(table='fundamentalista'):\n base_url = 'https://www.fundamentus.com.br/detalhes.php?papel='\n\n table_index = {\n 'fundamentalista': 2,\n 'balanco': 3,\n 'demonstrativo': 4\n }\n tickers = list(getTickers())\n\n data = []\n \n for ticker in tickers:\n \n print(f'Requisitando dados {ticker} ...')\n\n try:\n url = base_url + ticker\n response = requests.get(url)\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n alltables = soup.find_all('table', 'w728')\n t = alltables[table_index[table]]\n\n label = t.find_all('td', {'class': 'label'})\n content = t.find_all('td', {'class': 'data'})\n except:\n continue\n\n ticker_data = {}\n for l, c in zip(label, content):\n data_label = l.find('span', {'class': 'txt'}).getText()\n data_content = c.getText()\n if fix_string(data_label):\n ticker_data[data_label] = string_to_float(data_content)\n ticker_data['ticker'] = ticker\n\n data.append(ticker_data)\n\n return data\n\ndef string_to_float(string):\n is_percentage = True if '%' in string else False\n replaced_string = string.replace('\\n', '') \\\n .replace('%', '').replace(' ', '').replace('.', '').replace(',', '.')\n if replaced_string == '-':\n return 0.0\n if is_percentage:\n return float(replaced_string) / 100\n return float(replaced_string)\n\ndef fix_string(string):\n return string.replace(' ', '').replace('\\n', '')","sub_path":"crawlers/fundamentus.py","file_name":"fundamentus.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"34233404","text":"# Ugh - he doesn't even have the decency to provide this\nimport numpy as np\nimport pylab as plt\nimport os\n\npath = '/Users/jaguirre/PyModules/ionFR/'\n#path = os.path.expanduser('~/radionopy/')\n\nUT, TEC, B, RM, dRM = np.loadtxt(os.path.join(path, 'IonRM.txt'), unpack=True)\n\nplt.clf()\nplt.errorbar(UT[0:24], RM[0:24], yerr=dRM[0:24], marker='o', ls='None')\nplt.ylim([0, 3])\nplt.xlim([0, 25])\nplt.savefig('test.pdf')\nplt.show()\n","sub_path":"plot_IonRM.py","file_name":"plot_IonRM.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"225971109","text":"\n# coding: utf-8\n\n# TODO List:\n# - Reset/Scramble\n# - Move\n# - More elegent translate action (maybe store in array)\n# - More elegant move\n# - Render\n\n# In[7]:\n\n\nimport numpy as np\nfrom PIL import Image\nimport copy\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport matplotlib.pyplot as plt\n\nclass RubiksEnv(gym.Env):\n \"\"\"\n See cartpole on Github\n Description:\n \n Source:\n \n Observation:\n \n Actions:\n \n Reward:\n The reward\n \n Starting State:\n \n \n Episode Termination:\n Episode terminates when either a cube is in the solved state (i.e. each side only has tiles of one colour) or when the step limit is reached.\n \"\"\"\n \n def __init__(self, size=3, metric='quarter', pomdp=False, solved_reward=1.0, unsolved_reward=0.0, seed=None):\n self.size = size\n \n #Allocate space for Rubik's Cube sides. Each side get's a corresponding integer.\n self.U = (0*np.ones((self.size,self.size))).astype(int)\n self.L = (1*np.ones((self.size,self.size))).astype(int)\n self.F = (2*np.ones((self.size,self.size))).astype(int)\n self.R = (3*np.ones((self.size,self.size))).astype(int)\n self.B = (4*np.ones((self.size,self.size))).astype(int)\n self.D = (5*np.ones((self.size,self.size))).astype(int)\n \n self.orientation = (0,1,3)\n \n self.metric = metric\n self.pomdp = pomdp\n \n if self.metric is 'quarter':\n if self.pomdp:\n self.action_space = spaces.Discrete(16)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(3, self.size, self.size))\n else:\n self.action_space = spaces.Discrete(12)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(6, self.size, self.size))\n else:\n if self.pomdp:\n self.action_space = spaces.Discrete(23)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(3, self.size, self.size))\n else:\n self.action_space = spaces.Discrete(18)\n self.observation_space = spaces.Box(low=0, high=5, dtype=np.uint8, shape=(6, self.size, self.size))\n \n self._action_set = [i for i in range(self.action_space.n)] \n \n self.solved_reward = solved_reward\n self.unsolved_reward = unsolved_reward\n \n self.seed(seed)\n\n self.ACTION_MEANING_QUARTER_METRIC = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\"\n }\n\n self.ACTION_MEANING_QUARTER_METRIC_POMDP = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\",\n 12 : \"North\",\n 13 : \"West\",\n 14 : \"South\",\n 15 : \"East\"\n }\n\n self.ACTION_MEANING_HALF_METRIC = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\",\n 12 : \"U2\",\n 13 : \"L2\",\n 14 : \"F2\",\n 15 : \"R2\",\n 16 : \"B2\",\n 17 : \"D2\"\n }\n\n self.ACTION_MEANING_HALF_METRIC_POMDP = {\n 0 : \"U\",\n 1 : \"L\",\n 2 : \"F\",\n 3 : \"R\",\n 4 : \"B\",\n 5 : \"D\",\n 6 : \"U'\",\n 7 : \"L'\",\n 8 : \"F'\",\n 9 : \"R'\",\n 10 : \"B'\",\n 11 : \"D'\",\n 12 : \"U2\",\n 13 : \"L2\",\n 14 : \"F2\",\n 15 : \"R2\",\n 16 : \"B2\",\n 17 : \"D2\",\n 18 : \"North\",\n 19 : \"West\",\n 20 : \"South\",\n 21 : \"East\",\n 22 : \"Antipode\"\n }\n \n def seed(self, seed=None):\n \"\"\"\"\"\"\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n \n def reset(self, steps = 20, orientation = False):\n \"\"\"\"\"\"\n self.U = (0*np.ones((self.size, self.size))).astype(int)\n self.L = (1*np.ones((self.size, self.size))).astype(int)\n self.F = (2*np.ones((self.size, self.size))).astype(int)\n self.R = (3*np.ones((self.size, self.size))).astype(int)\n self.B = (4*np.ones((self.size, self.size))).astype(int)\n self.D = (5*np.ones((self.size, self.size))).astype(int)\n \n for step in range(steps):\n action = self.np_random.choice(self._action_set)\n self.step(int(action))\n if self.solved():\n self.reset(steps, orientation)\n observation = self.get_observation()\n return observation\n\n def reset_to_action(self, actions, orientation = False):\n \"\"\"\"\"\"\n self.U = (0*np.ones((self.size, self.size))).astype(int)\n self.L = (1*np.ones((self.size, self.size))).astype(int)\n self.F = (2*np.ones((self.size, self.size))).astype(int)\n self.R = (3*np.ones((self.size, self.size))).astype(int)\n self.B = (4*np.ones((self.size, self.size))).astype(int)\n self.D = (5*np.ones((self.size, self.size))).astype(int)\n\n for action in actions:\n self.step(int(action))\n\n observation = self.get_observation()\n return observation\n\n def curriculum_reset(self, level=12*20 - 1, orientation = False):\n \"\"\"\"\"\"\n self.U = (0*np.ones((self.size, self.size))).astype(int)\n self.L = (1*np.ones((self.size, self.size))).astype(int)\n self.F = (2*np.ones((self.size, self.size))).astype(int)\n self.R = (3*np.ones((self.size, self.size))).astype(int)\n self.B = (4*np.ones((self.size, self.size))).astype(int)\n self.D = (5*np.ones((self.size, self.size))).astype(int)\n\n for step in range((level // self._n_actions)):\n action = self.np_random.choice(self._action_set)\n self.step(int(action))\n\n action = self.np_random.choice(self._action_set[:(level % self._n_actions) + 1])\n self.step(int(action))\n\n if self.solved():\n self.curriculum_reset(level, orientation)\n\n observation = self.get_observation()\n\n return observation\n \n def move(self, side, sign, times, orientation):\n \"\"\"\"\"\"\n if orientation is None:\n if side is 0:\n self.U = np.rot90(self.U, times*-sign)\n if times < 2:\n if sign > 0:\n Ftmp = copy.copy(self.F[0,:])\n self.F[0,:] = self.R[0,:]\n Ltmp = copy.copy(self.L[0,:])\n self.L[0,:] = Ftmp\n Btmp = copy.copy(self.B[0,:])\n self.B[0,:] = Ltmp\n self.R[0,:] = Btmp\n if sign < 0:\n Ftmp = copy.copy(self.F[0,:])\n self.F[0,:] = self.L[0,:]\n Rtmp = copy.copy(self.R[0,:])\n self.R[0,:] = Ftmp\n Btmp = copy.copy(self.B[0,:])\n self.B[0,:] = Rtmp\n self.L[0,:] = Btmp\n else:\n Ftmp = copy.copy(self.F[0,:])\n self.F[0,:] = self.B[0,:]\n self.B[0,:] = Ftmp\n Rtmp = copy.copy(self.R[0,:])\n self.R[0,:] = self.L[0,:]\n self.L[0,:] = Rtmp\n \n if side is 1:\n self.L = np.rot90(self.L,times*-sign)\n if times < 2:\n if sign > 0:\n Ftmp = copy.copy(self.F[:,0])\n self.F[:,0] = self.U[:,0]\n Dtmp = copy.copy(self.D[:,0][::-1])\n self.D[:,0] = Ftmp\n Btmp = copy.copy(self.B[:,-1][::-1])\n self.B[:,-1] = Dtmp\n self.U[:,0] = Btmp\n if sign < 0:\n Ftmp = copy.copy(self.F[:,0])\n self.F[:,0] = self.D[:,0]\n Utmp = copy.copy(self.U[:,0][::-1])\n self.U[:,0] = Ftmp\n Btmp = copy.copy(self.B[:,-1][::-1])\n self.B[:,-1] = Utmp\n self.D[:,0] = Btmp\n else:\n Ftmp = copy.copy(self.F[:,0][::-1])\n self.F[:,0] = self.B[:,-1][::-1]\n self.B[:,-1] = Ftmp\n Utmp = copy.copy(self.U[:,0])\n self.U[:,0] = self.D[:,0]\n self.D[:,0] = Utmp\n \n \n if side is 2:\n self.F = np.rot90(self.F,times*-sign)\n if times < 2:\n if sign > 0:\n Utmp = copy.copy(self.U[-1,:])\n self.U[-1,:] = self.L[:,-1][::-1]\n Rtmp = copy.copy(self.R[:,0][::-1])\n self.R[:,0] = Utmp\n Dtmp = copy.copy(self.D[0,:])\n self.D[0,:] = Rtmp\n self.L[:,-1] = Dtmp\n if sign < 0:\n Utmp = copy.copy(self.U[-1,:][::-1])\n self.U[-1,:] = self.R[:,0]\n Ltmp = copy.copy(self.L[:,-1])\n self.L[:,-1] = Utmp\n Dtmp = copy.copy(self.D[0,:][::-1])\n self.D[0,:] = Ltmp\n self.R[:,0] = Dtmp\n else:\n Utmp = copy.copy(self.U[-1,:][::-1])\n self.U[-1,:] = self.D[0,:][::-1]\n self.D[0,:] = Utmp\n Rtmp = copy.copy(self.R[:,0][::-1])\n self.R[:,0] = self.L[:,2]\n self.L[:,-1] = Rtmp\n \n if side is 3:\n self.R = np.rot90(self.R,times*-sign)\n if times < 2:\n if sign > 0:\n Utmp = copy.copy(self.U[:,-1][::-1])\n self.U[:,-1] = self.F[:,-1]\n Btmp = copy.copy(self.B[:,0][::-1])\n self.B[:,0] = Utmp\n Dtmp = copy.copy(self.D[:,-1])\n self.D[:,-1] = Btmp\n self.F[:,-1] = Dtmp\n if sign < 0:\n Utmp = copy.copy(self.U[:,-1]) \n self.U[:,-1] = self.B[:,0][::-1]\n Ftmp = copy.copy(self.F[:,-1])\n self.F[:,-1] = Utmp\n Dtmp = copy.copy(self.D[:,-1][::-1])\n self.D[:,-1] = Ftmp\n self.B[:,0] = Dtmp\n else:\n Utmp = copy.copy(self.U[:,-1])\n self.U[:,-1] = self.D[:,-1]\n self.D[:,-1] = Utmp\n Ftmp = copy.copy(self.F[:,-1][::-1])\n self.F[:,-1] = self.B[:,0][::-1]\n self.B[:,0] = Ftmp\n \n \n if side is 4:\n self.B = np.rot90(self.B,times*-sign)\n if times < 2:\n if sign > 0:\n Utmp = copy.copy(self.U[0,:][::-1])\n self.U[0,:] = self.R[:,-1]\n Ltmp = copy.copy(self.L[:,0])\n self.L[:,0] = Utmp\n Dtmp = copy.copy(self.D[-1,:][::-1])\n self.D[-1,:] = Ltmp\n self.R[:,-1] = Dtmp\n \n if sign < 0:\n Utmp = copy.copy(self.U[0,:])\n self.U[0,:] = self.L[:,0][::-1]\n Rtmp = copy.copy(self.R[:,-1][::-1])\n self.R[:,-1] = Utmp\n Dtmp = copy.copy(self.D[-1,:])\n self.D[-1,:] = Rtmp\n self.L[:,0] = Dtmp\n else:\n Utmp = copy.copy(self.U[0,:][::-1])\n self.U[0,:] = self.D[-1,:][::-1]\n self.D[-1,:] = Utmp\n Rtmp = copy.copy(self.R[:,-1][::-1])\n self.R[:,-1] = self.L[:,0][::-1]\n self.L[:,0] = Rtmp\n \n \n if side is 5:\n self.D = np.rot90(self.D,times*-sign)\n if times < 2:\n if sign > 0:\n Ftmp = copy.copy(self.F[-1,:])\n self.F[-1,:] = self.L[-1,:]\n Rtmp = copy.copy(self.R[-1,:])\n self.R[-1,:] = Ftmp\n Btmp = copy.copy(self.B[-1,:])\n self.B[-1,:] = Rtmp\n self.L[-1,:] = Btmp\n if sign < 0:\n Ftmp = copy.copy(self.F[-1,:])\n self.F[-1,:] = self.R[-1,:]\n Ltmp = copy.copy(self.L[-1,:])\n self.L[-1,:] = Ftmp\n Btmp = copy.copy(self.B[-1,:])\n self.B[-1,:] = Ltmp\n self.R[-1,:] = Btmp\n else:\n Ftmp = copy.copy(self.F[-1,:])\n self.F[-1,:] = self.B[-1,:]\n self.B[-1,:] = Ftmp\n Ltmp = copy.copy(self.L[-1,:])\n self.L[-1,:] = self.R[-1,:]\n self.R[-1,:] = Ltmp\n else:\n raise NotImplementedError('Orientation')\n \n def translate_action(self, action):\n \"\"\"\"\"\"\n #TODO encode this in ACTION_MEANING_QUARTER_METRIC\n side = None\n sign = None\n times = None\n orientation = None\n \n if action in [6,7,8,9,10,11]:\n sign = -1.0\n times = 1.0\n \n if action in [0,1,2,3,4,5]:\n sign = 1.0\n times = 1.0\n \n if action is 0 or action is 6:\n side = 0\n if action is 1 or action is 7:\n side = 1\n if action is 2 or action is 8:\n side = 2\n if action is 3 or action is 9:\n side = 3\n if action is 4 or action is 10:\n side = 4\n if action is 5 or action is 11:\n side = 5\n\n if self.metric is 'half':\n sign = 1.0\n times = 2.0\n \n if action is 12:\n side = 0\n if action is 13:\n side = 1\n if action is 14:\n side = 2\n if action is 15:\n side = 3\n if action is 16:\n side = 4\n if action is 17:\n side = 5\n \n if self.pomdp:\n assert side is None\n assert sign is None\n assert times is None \n \n if action in [12, 18]:\n orientation = \"North\"\n if action in [13, 19]:\n orientation = \"West\"\n if action in [14, 20]:\n orientation = \"South\"\n if action in [15, 21]:\n orientation = \"East\"\n if action is 22:\n orientation = \"Antipode\"\n \n return side, sign, times, orientation\n \n def step(self, action):\n assert self.action_space.contains(action), \"Invalid action\"\n side, sign, times, orientation = self.translate_action(action)\n self.move(side, sign, times, orientation)\n \n observation = self.get_observation()\n done = self.solved()\n if done:\n reward = self.solved_reward\n else:\n reward = self.unsolved_reward\n \n information = {}\n \n return observation, reward, done, information\n \n def solved(self):\n \"\"\"\"\"\"\n sides = [self.U, self.L, self.F, self.R, self.B, self.D]\n \n for index, side in enumerate(sides):\n if not np.all(side == index):\n return 0\n \n return 1\n \n def pretty_print(self):\n \"\"\"\"\"\"\n emptysymbol = np.chararray((self.size, self.size), unicode=True)\n emptysymbol[:] = '-'\n matrix = np.vstack((np.hstack((emptysymbol,self.U.astype(int),emptysymbol,emptysymbol)),\n np.hstack((self.L.astype(int),self.F.astype(int),self.R.astype(int),self.B.astype(int))),\n np.hstack((emptysymbol,self.D.astype(int),emptysymbol,emptysymbol))))\n \n print(matrix)\n \n @property\n def _n_actions(self):\n \"\"\"\"\"\"\n return len(self._action_set)\n \n def render(self):\n \"\"\"\"\"\"\n colordict = {0: [255, 0, 0],\n 1: [0, 0, 255],\n 2: [255, 255, 255],\n 3: [0, 255, 0],\n 4: [255, 255, 0],\n 5: [255, 127, 0]}\n\n factor = 60\n square = int(factor/self.size)\n width = factor*4\n height = factor*3\n\n image = np.ones((height, width, 3), dtype='uint8')*127\n for i in range(self.size):\n for j in range(self.size):\n # UP\n image[i*square:(i+1)*square, factor + j*square:factor + (j+1)*square] = colordict[self.U[i, j]]\n\n # RIGHT\n image[factor + i*square: factor + (i+1)*square, j*square:(j+1)*square] = colordict[self.L[i, j]]\n\n # FRONT\n image[factor + i*square: factor + (i+1)*square, factor + j*square: factor + (j+1)*square] = colordict[self.F[i, j]]\n\n # Right\n image[factor + i*square: factor + (i+1)*square, 2*factor + j*square: 2*factor + (j+1)*square] = colordict[self.R[i, j]]\n\n # Back\n image[factor + i*square: factor + (i+1)*square, 3*factor + j*square: 3*factor + (j+1)*square] = colordict[self.B[i, j]]\n\n # DOWN\n image[2*factor + i*square: 2*factor + (i+1)*square, factor + j*square: factor + (j+1)*square] = colordict[self.D[i, j]]\n plt.imshow(image)\n plt.show()\n \n def close(self):\n \"\"\"\"\"\"\n raise NotImplementedError('close not implemented')\n \n def get_action_meanings(self):\n \"\"\"\"\"\"\n if self.metric is 'quarter':\n if self.pomdp:\n return [self.ACTION_MEANING_QUARTER_METRIC_POMDP[i] for i in self._action_set]\n else:\n return [self.ACTION_MEANING_QUARTER_METRIC[i] for i in self._action_set]\n else:\n if self.pomdp:\n return [self.ACTION_MEANING_HALF_METRIC_POMDP[i] for i in self._action_set]\n else:\n return [self.ACTION_MEANING_HALF_METRIC[i] for i in self._action_set]\n \n \n def get_observation(self):\n \"\"\"\"\"\"\n sides = [self.U, self.L, self.F, self.R, self.B, self.D]\n if self.pomdp:\n raveled_cube = np.array([sides[self.orientation[0]],sides[self.orientation[1]],sides[self.orientation[2]]]).ravel()\n one_hot = np.eye(6)[raveled_cube]\n return one_hot.reshape(-1)\n else:\n raveled_cube = np.array(sides).ravel()\n one_hot = np.eye(6)[raveled_cube]\n \n return one_hot.reshape(-1)\n \n \n \n\n\nif __name__ == \"__main__\":\n env = RubiksEnv(size=3, metric='quarter', pomdp=False, solved_reward=1.0, unsolved_reward=0.0, seed=None)\n for x in range(12):\n env.curriculum_reset(level=0)\n\n env.render()\n\n","sub_path":"rubiks.py","file_name":"rubiks.py","file_ext":"py","file_size_in_byte":20198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"151196173","text":"# Processing Lists\n# ----------------\n# Two of the most common operations on a list are modifying\n# each element of a list, and excluding elements of a list.\n#\n# A function that accepts an element of a list and returns\n# a modified value for that element is called a map function.\n#\n# A function that accepts an element of a list and returns\n# whether the element should be kept or excluded is called\n# a filter function.\n#\n# For example, given a list of numbers, we want to produce\n# another list which contains the squares of only the even\n# numbers in the list.\n\ndef even_squares(t):\n result = []\n for e in t:\n if e % 2 == 0:\n result.append(e ** 2)\n return result\n\nnaturals = [1, 2, 3, 4, 5]\nes = even_squares(naturals)\nprint('naturals ==', naturals)\nprint('es ==', es)\n\n# A nested list is a list within a list.\n# Using the above idiom, we can create lists of lists as well.\n\n# For example, let's produce a list whose elements are lists\n# containing the number and its square.\n\ndef number_square_pairs(t):\n result = []\n for e in t:\n result.append([e, e ** 2])\n return result\n\nnsp = number_square_pairs(naturals)\nprint('naturals ==', naturals)\nprint('nsp ==', nsp)\n\nnames = ['alaa', 'fiha', 'amna', 'hamza', 'mahnoor', 'hajra', 'safina', 'dua']\n\ndef map_to_upper(t):\n result = []\n for e in t:\n result.append(e.upper())\n return result\n\nupper_names = map_to_upper(names)\nprint('names ==', names)\nprint('upper_names ==', upper_names)\n\n# In general, a function that both maps and filters a list has\n# the following pattern:\n\ndef map_filter_list(t):\n result = []\n for e in t:\n if filter_func(e):\n result.append(map_true_func(e))\n else:\n result.append(map_false_func(e))\n return result\n\n# where:\n# map_*_func takes one element as its argument and returns\n# its modified value; and\n# filter_func takes one element as its argument and returns\n# True if the element is kept, False if it is to be excluded.\n\n\n\n# Exercise 1a\n# Write a function which accepts a list of numbers and an\n# exponent as arguments and returns a list of numbers with each\n# element raised to the exponent.\n\ndef exponents(t, exp):\n result = []\n for e in t:\n result.append(e ** exp)\n return result\n\nprint('naturals ==', naturals)\nprint('exponents(naturals, 2) ==', exponents(naturals, 2))\n# -> [1, 4, 9, 16, 25]\n\n# Exercise 1b\n# Modify the above function such that it produces nested lists\n# which contain the number and its exponent.\n\ndef number_exponent_pairs(t, exp):\n result = []\n for e in t:\n result.append([e, e ** exp])\n return result\n\nprint('number_exponent_pairs(naturals, 2) ==', number_exponent_pairs(naturals, 2))\n# -> [[1, 1], [2, 4], [3, 9], [4, 16], [5, 25]]\n\n# Exercise 1c\n# Modify the above function such that it only return the odd\n# numbers in a given list and their exponent.\n\ndef odd_number_exponent_pairs(t, exp):\n result = []\n for e in t:\n if e % 2 == 1:\n result.append([e, e ** exp])\n return result\n\nprint('odd_number_exponent_pairs(naturals, 2) ==', odd_number_exponent_pairs(naturals, 2))\n# -> [[1, 1], [3, 9], [5, 25]]\n\n\n\n# Exercise 2\n# Write a function that returns the list of numbers that are\n# divisible by both x and y.\n\ndef multiples(t, x, y):\n result = []\n for e in t:\n if e % x == 0 and e % y == 0:\n result.append(e)\n return result\n\nprint('multiples(range(50), 2, 5) ==', multiples(range(50), 2, 5))\n# -> [0, 10, 20, 30, 40]\n\n# Note: the above pattern works with any sequence (e.g., range).\n\n\n\n# Exercise 3\n# Write a function that returns a nested list with each element\n# and its parity.\n\ndef number_parity_pairs(t):\n result = []\n for e in t:\n if e % 2 == 0:\n result.append([e, 'even'])\n else:\n result.append([e, 'odd'])\n return result\n\nprint('number_parity_pairs(naturals) ==', number_parity_pairs(naturals))\n# -> [[1, 'odd'], [2, 'even'], [3, 'odd'], [4, 'even'], [5, 'odd']]\n\n\n\n# Exercise 4a\n# Write a function that converts a list of masses in kilograms to\n# grams. Note: to convert from kg to g, multiply by one thousand.\n\ndef convert_kg_to_g(t):\n result = []\n for e in t:\n result.append(e * 1000)\n return result\n\nprint('convert_kg_to_g(naturals) ==', convert_kg_to_g(naturals))\n# -> [1000, 2000, 3000, 4000, 5000]\n\n# Exercise 4b\n# Write a function that converts a list of masses in kilograms to\n# pounds. Note: to convert from kg to lbs, multiply by 2.2046.\n\ndef convert_kg_to_lbs(t):\n result = []\n for e in t:\n result.append(e * 2.2046)\n return result\n\nprint('convert_kg_to_lbs(naturals) ==', convert_kg_to_lbs(naturals))\n# -> [2.2046, 4.4092, 6.6138, 8.8184, 11.023]\n\n# Exercise 4c\n# Modify the above funtion to accept the conversion unit as an\n# argument and make the appropriate conversion.\n# If the unit is unrecognized, return an unmodified list.\n\ndef convert_kg_to(t, unit):\n result = []\n for e in t:\n if unit == 'g':\n result.append(e * 1000)\n elif unit == 'lbs':\n result.append(e * 2.2046)\n else:\n result.append(e)\n return result\n\nprint(\"convert_kg_to(naturals, 'g') ==\", convert_kg_to(naturals, 'g'))\n# -> [1000, 2000, 3000, 4000, 5000]\nprint(\"convert_kg_to(naturals, 'lbs') ==\", convert_kg_to(naturals, 'lbs'))\n# -> [2.2046, 4.4092, 6.6138, 8.8184, 11.023]\nprint(\"convert_kg_to(naturals, 'st') ==\", convert_kg_to(naturals, 'st'))\n# -> [1, 2, 3, 4, 5]\n\n# Alternative:\ndef convert_kg_to(t, unit):\n if unit == 'g':\n return convert_kg_to_g(t)\n if unit == 'lbs':\n return convert_kg_to_lbs(t)\n # There is a potential bug if the list isn't copied.\n return t.copy()\n\nprint(\"convert_kg_to(naturals, 'g') ==\", convert_kg_to(naturals, 'g'))\n# -> [1000, 2000, 3000, 4000, 5000]\nprint(\"convert_kg_to(naturals, 'lbs') ==\", convert_kg_to(naturals, 'lbs'))\n# -> [2.2046, 4.4092, 6.6138, 8.8184, 11.023]\nprint(\"convert_kg_to(naturals, 'st') ==\", convert_kg_to(naturals, 'st'))\n# -> [1, 2, 3, 4, 5]\n\n\n\n# Exercise 5\n# Read a file containing full names and form a nested list where\n# every element is a list containing first name and last name.\n\ndef first_last_name_pairs(filename):\n f = open(filename)\n names = f.readlines()\n result = []\n for name in names:\n result.append(name.split())\n return result\n\nprint(\"first_last_name_pairs('mynames.txt') ==\", first_last_name_pairs('mynames.txt'))\n# -> [['Fiha', 'Ali'], ['Hamza', 'Jafri'], ['Mahnoor',\n# -> 'Mahboob'], ['Vaneeza', 'Iqbal'], ['Afifa', 'Bashir'],\n# -> ['Aala', 'Siddiqi'], ['Zunairah', 'Qureshi'],\n# -> ['Safina', 'Shalwani']]\n","sub_path":"Week 11/Lab11/week11_lab_sol.py","file_name":"week11_lab_sol.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"562349123","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render, get_object_or_404\n\n\n# Create a basic object\ndef new_object(request, form, operation, template_name, redirect_success):\n context = dict()\n\n object_form = form(request.POST or None, prefix='object')\n\n if request.method == 'POST':\n if object_form.is_valid():\n object_form.save()\n return redirect(redirect_success)\n else:\n context['errors'] = object_form.errors\n\n context['edit'] = False\n context['form'] = object_form\n context['operation'] = operation\n\n return render(request=request, template_name=template_name, context=context)\n\n\n# Update a basic object\ndef update_object(request, pk, object_model, form, operation, template_name, redirect_success):\n context = dict()\n\n object_instance = get_object_or_404(object_model, pk=pk)\n object_form = form(request.POST or None, instance=object_instance)\n\n if request.method == 'POST':\n if object_form.is_valid():\n object_form.save()\n\n return redirect(redirect_success)\n else:\n context['errors'] = object_form.errors\n else:\n object_form = form(instance=object_instance)\n\n context['form'] = object_form\n context['operation'] = operation\n context['edit'] = True\n return render(request=request, template_name=template_name, context=context)\n\n\n# Delete ajax object\ndef delete_object(request, pk, object_model):\n if request.method == 'POST':\n try:\n object_instance = object_model.objects.get(pk=pk)\n except object_model.DoesNotExist:\n return JsonResponse('Does Not Exist', status=404, safe=False)\n object_instance.delete()\n return JsonResponse('Success', status=200, safe=False)\n else:\n return JsonResponse('Bad Request', status=405, safe=False)\n","sub_path":"basic-crud/basic_crud.py","file_name":"basic_crud.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"646689401","text":"import sys\n#import math\nimport serial\nimport time\n#import pdb\nimport select\nimport struct\nimport termstatus\n\ndef find_prologix():\n \"\"\"\n Searches the ports for prologix devices to connect to.\n \n Return\n ======\n List of prologix ports available, list(serial.tools.list_ports_common.ListPortInfo)\n \"\"\"\n from serial.tools import list_ports\n\n prologix = [p for p in list_ports.comports() if 'prologix' in p.description.lower()]\n\n if len(prologix) == 0: raise Exception(\"Can't find a Prologix\")\n \n return prologix\n\n\nclass usbGPIB:\n def __init__(self, device, gpibAddr, baud=9600, timeout=0.5, \n eot=b'\\004', debug=0, auto=False, log=False, tSleep=0.1):\n\n #End of Transmission character\n self.eot = eot\n # EOT character number in the ASCII table\n self.eotNum = struct.unpack('B',eot)[0]\n self.debug = debug\n self.auto = auto\n self.tSleep = tSleep\n self.log = log\n self.gpibAddr = gpibAddr\n self.device = device\n\n #Connect to the GPIB-USB converter\n self.ser = serial.Serial(device, baud, timeout=timeout)\n\n self.refresh()\n \n def refresh(self):\n \"\"\"\n Sets up the GPIB connection\n \"\"\"\n self.command(\"++addr \"+str(self.gpibAddr)+\"\\n\", sleep=0.1)\n self.command(\"++eos 3\\n\", sleep=0.1)\n self.command(\"++mode 1\\n\", sleep=0.1)\n \n if self.auto:\n self.command(\"++auto 1\\n\", sleep=0.1)\n else:\n self.command(\"++auto 0\\n\", sleep=0.1)\n \n self.command(\"++ifc\\n\",0.1)\n self.command(\"++read_tmo_ms 3000\\n\",0.1)\n self.command(\"++eot_char \"+str(self.eotNum)+\"\\n\",0.1)\n self.command(\"++eot_enable 1\\n\",0.1)\n \n def getData(self, buf, sleep=None):\n if sleep is None: sleep = self.tSleep + 0.1\n\n data=b\"\"\n dlen=0\n if self.debug == True:\n progressInfo = termstatus.statusTxt(\"0 bytes received\")\n \n while 1: # Repeat reading data until eot is found\n while 1: # Read some data\n readSock, writeSock, errSock = select.select([self.ser],[],[],3)\n if len(readSock) == 1:\n data1 = readSock[0].read(buf)\n if self.debug == True:\n dlen=dlen+len(data1)\n progressInfo.update(str(dlen)+' bytes received')\n break\n \n if data1.endswith(self.eot): #if eot is found at the end\n data = data + data1[:-1] #remove eot\n break\n else:\n data = data + data1\n time.sleep(sleep)\n\n if self.debug == True:\n progressInfo.end()\n return data\n \n def query(self, string, buf=100, sleep=None):\n \"\"\"Send a query to the device and return the result.\"\"\"\n if sleep is None: sleep=self.tSleep\n if self.log: print(sys.stderr, \"?? %s\" % string)\n \n cmd = string.encode() + b'\\n'\n \n self.ser.write(cmd)\n \n if not self.auto:\n self.ser.write(\"++read eoi\\n\".encode()) #Change to listening mode\n \n self.ser.flush()\n time.sleep(sleep)\n \n ret = self.getData(buf)\n \n if self.log: print(sys.stderr, \"== %s\" % ret.strip())\n \n return ret\n\n def srq(self):\n \"\"\"Poll the device's SRQ\"\"\"\n self.command(\"++srq\")\n \n while True: # Read some data\n readSock, writeSock, errSock = select.select([self.ser],[],[],3)\n if len(readSock) == 1:\n data = readSock[0].read(100)\n break\n\n return data[:-2]\n \n def command(self, string, sleep=None):\n \"\"\"Send a command to the device.\"\"\"\n if sleep is None: sleep = self.tSleep\n if self.log: print(sys.stderr, \">> %s\" % string)\n \n cmd = string.encode() + b'\\n'\n self.ser.write(cmd)\n self.ser.flush()\n time.sleep(sleep)\n\n def spoll(self):\n \"\"\"Perform a serial polling and return the result.\"\"\"\n self.command(\"++spoll\")\n while 1: # Read some data\n readSock, writeSock, errSock = select.select([self.ser],[],[],3)\n if len(readSock) == 1:\n data = readSock[0].read(100)\n break\n\n return data[:-2]\n \n def close(self):\n self.ser.close()\n \n def setDebugMode(self, debugFlag):\n if debugFlag:\n self.debug=1\n else:\n self.debug=0","sub_path":"usbgpib.py","file_name":"usbgpib.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"346006891","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nfrom collections import defaultdict\n\nfrom django.contrib.auth.decorators import permission_required\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.decorators.cache import never_cache as force_cache_validation\nfrom django.views.generic import View\nfrom django.utils.translation import ugettext as _\nfrom django.core.cache import caches\nfrom django.views.generic.detail import BaseDetailView\nfrom mapentity.serializers import GPXSerializer\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import same_structure_required\nfrom geotrek.common.utils import classproperty\nfrom geotrek.common.views import PublicOrReadPermMixin\nfrom geotrek.core.models import AltimetryMixin\n\nfrom .models import Path, Trail, Topology\nfrom .forms import PathForm, TrailForm\nfrom .filters import PathFilterSet, TrailFilterSet\nfrom . import graph as graph_lib\nfrom django.http.response import HttpResponse\nfrom django.contrib import messages\nfrom django.db.models import Sum\nfrom django.db.models.functions import Coalesce\nfrom geotrek.api.v2.functions import Length\nfrom django.db.models.fields import FloatField\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreateFromTopologyMixin(object):\n def on_topology(self):\n pk = self.request.GET.get('topology')\n if pk:\n try:\n return Topology.objects.existing().get(pk=pk)\n except Topology.DoesNotExist:\n logger.warning(\"Intervention on unknown topology %s\" % pk)\n return None\n\n def get_initial(self):\n initial = super(CreateFromTopologyMixin, self).get_initial()\n # Create intervention with an existing topology as initial data\n topology = self.on_topology()\n if topology:\n initial['topology'] = topology.serialize(with_pk=False)\n return initial\n\n\nclass PathLayer(MapEntityLayer):\n properties = ['name', 'draft']\n queryset = Path.objects.all()\n\n def get_queryset(self):\n qs = super(PathLayer, self).get_queryset()\n if self.request.GET.get('no_draft'):\n qs = qs.exclude(draft=True)\n return qs\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake')\n filterform = PathFilterSet\n\n @classproperty\n def columns(cls):\n columns = ['id', 'checkbox', 'name', 'networks', 'length', 'length_2d']\n if settings.TRAIL_MODEL_ENABLED:\n columns.append('trails')\n return columns\n\n def get_template_names(self):\n return (u\"core/path_list.html\",)\n\n def get_queryset(self):\n \"\"\"\n denormalize ``trail`` column from list.\n \"\"\"\n qs = super(PathList, self).get_queryset()\n denormalized = {}\n if settings.TRAIL_MODEL_ENABLED:\n paths_id = qs.values_list('id', flat=True)\n paths_trails = Trail.objects.filter(aggregations__path__id__in=paths_id)\n by_id = dict([(trail.id, trail) for trail in paths_trails])\n trails_paths_ids = paths_trails.values_list('id', 'aggregations__path__id')\n for trail_id, path_id in trails_paths_ids:\n denormalized.setdefault(path_id, []).append(by_id[trail_id])\n\n for path in qs:\n path_trails = denormalized.get(path.id, [])\n setattr(path, '_trails', path_trails)\n return qs\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n def get_context_data(self, **kwargs):\n context = super(PathJsonList, self).get_context_data(**kwargs)\n context[\"sumPath\"] = round(self.object_list.aggregate(sumPath=Coalesce(Sum(Length('geom'), output_field=FloatField()), 0))['sumPath'] / 1000, 1)\n return context\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n columns = [\n 'id', 'valid', 'visible', 'name', 'comments', 'departure', 'arrival',\n 'comfort', 'source', 'stake', 'usages', 'networks',\n 'structure', 'date_insert', 'date_update',\n 'cities', 'districts', 'areas', 'length_2d'\n ] + AltimetryMixin.COLUMNS\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n context = super(PathDetail, self).get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass PathGPXDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Path.objects.all()\n\n def render_to_response(self, context):\n gpx_serializer = GPXSerializer()\n response = HttpResponse(content_type='application/gpx+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.gpx\"' % self.object\n gpx_serializer.serialize([self.object], stream=response, geom_field='geom')\n return response\n\n\nclass PathKMLDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Path.objects.all()\n\n def render_to_response(self, context):\n response = HttpResponse(self.object.kml(),\n content_type='application/vnd.google-earth.kml+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.kml\"' % self.object\n return response\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n language = self.request.LANGUAGE_CODE\n self.get_object().prepare_elevation_chart(language, self.request.build_absolute_uri('/'))\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n def dispatch(self, *args, **kwargs):\n if self.request.user.has_perm('core.add_path') or self.request.user.has_perm('core.add_draft_path'):\n return super(MapEntityCreate, self).dispatch(*args, **kwargs)\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n path = self.get_object()\n if path.draft and not self.request.user.has_perm('core.change_draft_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if not path.draft and not self.request.user.has_perm('core.change_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if path.draft and self.request.user.has_perm('core.change_draft_path'):\n return super(MapEntityUpdate, self).dispatch(*args, **kwargs)\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n path = self.get_object()\n if path.draft and not self.request.user.has_perm('core.delete_draft_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if not path.draft and not self.request.user.has_perm('core.delete_path'):\n messages.warning(self.request, _(\n u'Access to the requested resource is restricted. You have been redirected.'))\n return redirect('core:path_detail', **kwargs)\n if path.draft and self.request.user.has_perm('core.delete_draft_path'):\n return super(MapEntityDelete, self).dispatch(*args, **kwargs)\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(PathDelete, self).get_context_data(**kwargs)\n topologies_by_model = defaultdict(list)\n if 'geotrek.core' in settings.INSTALLED_APPS:\n for trail in self.object.trails:\n topologies_by_model[_('Trails')].append({'name': trail.name, 'url': trail.get_detail_url()})\n if 'geotrek.trekking' in settings.INSTALLED_APPS:\n for trek in self.object.treks:\n topologies_by_model[_('Treks')].append({'name': trek.name, 'url': trek.get_detail_url()})\n for service in self.object.services:\n topologies_by_model[_('Services')].append({'name': service.type.name, 'url': service.get_detail_url()})\n for poi in self.object.pois:\n topologies_by_model[_('Pois')].append({'name': poi.name, 'url': poi.get_detail_url()})\n if 'geotrek.infrastructure' in settings.INSTALLED_APPS:\n for signage in self.object.signages:\n topologies_by_model[_('Signages')].append({'name': signage.name, 'url': signage.get_detail_url()})\n for infrastructure in self.object.infrastructures:\n topologies_by_model[_('Infrastructures')].append({'name': infrastructure.name, 'url': infrastructure.get_detail_url()})\n if 'geotrek.maintenance' in settings.INSTALLED_APPS:\n for intervention in self.object.interventions:\n topologies_by_model[_('Interventions')].append({'name': intervention.name, 'url': intervention.get_detail_url()})\n context['topologies_by_model'] = dict(topologies_by_model)\n return context\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\n@force_cache_validation\ndef get_graph_json(request):\n cache = caches['fat']\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.exclude(draft=True))\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailLayer(MapEntityLayer):\n queryset = Trail.objects.existing()\n properties = ['name']\n\n\nclass TrailList(MapEntityList):\n queryset = Trail.objects.existing()\n filterform = TrailFilterSet\n columns = ['id', 'name', 'departure', 'arrival', 'length']\n\n\nclass TrailJsonList(MapEntityJsonList, TrailList):\n pass\n\n\nclass TrailFormatList(MapEntityFormat, TrailList):\n columns = [\n 'id', 'name', 'comments', 'departure', 'arrival',\n 'structure', 'date_insert', 'date_update',\n 'cities', 'districts', 'areas',\n ] + AltimetryMixin.COLUMNS\n\n\nclass TrailDetail(MapEntityDetail):\n queryset = Trail.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super(TrailDetail, self).get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass TrailGPXDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Trail.objects.existing()\n\n def render_to_response(self, context):\n gpx_serializer = GPXSerializer()\n response = HttpResponse(content_type='application/gpx+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.gpx\"' % self.object\n gpx_serializer.serialize([self.object], stream=response, geom_field='geom')\n return response\n\n\nclass TrailKMLDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = Trail.objects.existing()\n\n def render_to_response(self, context):\n response = HttpResponse(self.object.kml(),\n content_type='application/vnd.google-earth.kml+xml')\n response['Content-Disposition'] = 'attachment; filename=\"%s.kml\"' % self.object\n return response\n\n\nclass TrailDocument(MapEntityDocument):\n queryset = Trail.objects.existing()\n\n\nclass TrailCreate(CreateFromTopologyMixin, MapEntityCreate):\n model = Trail\n form_class = TrailForm\n\n\nclass TrailUpdate(MapEntityUpdate):\n queryset = Trail.objects.existing()\n form_class = TrailForm\n\n @same_structure_required('core:trail_detail')\n def dispatch(self, *args, **kwargs):\n return super(TrailUpdate, self).dispatch(*args, **kwargs)\n\n\nclass TrailDelete(MapEntityDelete):\n queryset = Trail.objects.existing()\n\n @same_structure_required('core:trail_detail')\n def dispatch(self, *args, **kwargs):\n return super(TrailDelete, self).dispatch(*args, **kwargs)\n\n\n@permission_required('core.change_path')\ndef merge_path(request):\n \"\"\"\n Path merging view\n \"\"\"\n response = {}\n\n if request.method == 'POST':\n ids_path_merge = request.POST.getlist('path[]')\n\n assert len(ids_path_merge) == 2\n\n path_a = Path.objects.get(pk=ids_path_merge[0])\n path_b = Path.objects.get(pk=ids_path_merge[1])\n\n if not path_a.same_structure(request.user) or not path_b.same_structure(request.user):\n response = {'error': _(u\"You don't have the right to change these paths\")}\n return HttpJSONResponse(response)\n\n if path_a.draft != path_b.draft:\n response = {'error': _(u\"You can't merge 1 draft path with 1 normal path\")}\n return HttpJSONResponse(response)\n\n try:\n result = path_a.merge_path(path_b)\n except Exception as exc:\n response = {'error': u'%s' % exc, }\n return HttpJSONResponse(response)\n\n if result == 2:\n response = {'error': _(u\"You can't merge 2 paths with a 3rd path in the intersection\")}\n elif result == 0:\n response = {'error': _(u\"No matching points to merge paths found\")}\n else:\n response = {'success': _(u\"Paths merged successfully\")}\n messages.success(request, _(u\"Paths merged successfully\"))\n\n return HttpJSONResponse(response)\n\n\nclass ParametersView(View):\n def get(request, *args, **kwargs):\n response = {\n 'geotrek_admin_version': settings.VERSION,\n }\n return HttpResponse(json.dumps(response), content_type=\"application/json\")\n","sub_path":"geotrek/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"130631657","text":"import argparse\nimport torch\nimport torch.distributions as distributions\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nimport networks\nfrom sklearn import datasets\nfrom matplotlib.colors import ListedColormap\nimport data_utils\ndevice = torch.device('cuda:' + str(0) if torch.cuda.is_available() else 'cpu')\n\n\n\n\n\ndef decision_boundary(net, X):\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, .02),\n np.arange(y_min, y_max, .02))\n xxt = torch.from_numpy(xx.ravel()).float()\n yyt = torch.from_numpy(yy.ravel()).float()\n xxyy = torch.cat([xxt[:, None], yyt[:, None]], dim=1)\n logits = net(xxyy)\n Z = logits.argmax(1)\n plt.pcolormesh(xx, yy, Z.numpy().reshape(xx.shape), cmap=ListedColormap(['r', 'b']), alpha=.1)\n\n\n\n\ndef main(args):\n utils.makedirs(args.save)\n net = networks.SmallMLP(2, 2, n_hid=args.hid)\n\n if args.dataset == \"moons\":\n Xf, Y = datasets.make_moons(1000, noise=.1)\n Xfte, Yte = datasets.make_moons(1000, noise=.1)\n Xoh, Xohte = [], []\n elif args.dataset == \"circles\":\n Xf, Y = datasets.make_circles(1000, noise=.03)\n Xfte, Yte = datasets.make_circles(1000, noise=.03)\n Xoh, Xohte = [], []\n elif args.dataset == \"adult\":\n with open(\"data/adult/adult.data\", 'r') as f:\n Xf, Xoh, Y = data_utils.load_adult()\n with open(\"data/adult/adult.test\", 'r') as f:\n Xfte, Xohte, Yte = data_utils.load_adult()\n\n else:\n raise NotImplementedError\n\n Xf = Xf.astype(np.float32)\n Xfl, Xohl, Yl = [], [], []\n if args.n_labels_per_class != -1:\n Xfl.extend(Xf[Y == 0][:args.n_labels_per_class])\n Xfl.extend(Xf[Y == 1][:args.n_labels_per_class])\n Yl.extend([0] * args.n_labels_per_class)\n Yl.extend([1] * args.n_labels_per_class)\n if Xoh is not None:\n Xohl.extend(Xf[Y == 0][:args.n_labels_per_class])\n Xohl.extend(Xf[Y == 1][:args.n_labels_per_class])\n else:\n Xfl, Xohl, Yl = Xf, Xoh, Y\n\n def plot_data(fname=\"data.png\"):\n plt.clf()\n decision_boundary(net, Xf)\n plt.scatter(Xf[:, 0], Xf[:, 1], c='grey')\n plt.scatter(Xfl[:args.n_labels_per_class, 0], Xfl[:args.n_labels_per_class, 1], c='r')\n plt.scatter(Xfl[args.n_labels_per_class:, 0], Xfl[args.n_labels_per_class:, 1], c='b')\n plt.savefig(\"{}/{}\".format(args.save, fname))\n\n optim = torch.optim.Adam(params=net.parameters(), lr=args.lr)\n\n xl = torch.from_numpy(Xl).to(device)\n yl = torch.from_numpy(np.array(Yl)).to(device)\n x_te, y_te = torch.from_numpy(Xte).float(), torch.from_numpy(Yte)\n inds = list(range(X.shape[0]))\n for i in range(args.n_iters):\n batch_inds = np.random.choice(inds, args.batch_size, replace=False)\n x = X[batch_inds]\n x = torch.from_numpy(x).to(device).requires_grad_()\n\n logits = net(xl)\n clf_loss = nn.CrossEntropyLoss(reduction='none')(logits, yl).mean()\n\n logits_u = net(x)\n logpx_plus_Z = logits_u.logsumexp(1)\n sp = utils.keep_grad(logpx_plus_Z.sum(), x)\n e = torch.randn_like(sp)\n eH = utils.keep_grad(sp, x, grad_outputs=e)\n trH = (eH * e).sum(-1)\n\n sm_loss = trH + .5 * (sp ** 2).sum(-1)\n sm_loss = sm_loss.mean()\n\n loss = (1 - args.sm_lam) * clf_loss + args.sm_lam * sm_loss\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n if i % 100 == 0:\n if args.dataset in (\"rings\", \"moons\"):\n plot_data(\"data_{}.png\".format(i))\n te_logits = net(x_te.float())\n te_preds = torch.argmax(te_logits, 1)\n te_acc = (te_preds == y_te).float().mean()\n print(\"Iter {}: Clf Loss = {}, SM Loss = {} | Test Accuracy = {}\".format(i,\n clf_loss.item(), sm_loss.item(),\n te_acc.item()))\n\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Energy Based Models and Shit\")\n # logging + evaluation\n parser.add_argument(\"--save\", type=str, default='.')\n parser.add_argument(\"--seed\", type=int, default=1234)\n parser.add_argument(\"--dim\", type=int, default=10)\n parser.add_argument(\"--hid\", type=int, default=100)\n parser.add_argument(\"--n_labels_per_class\", type=int, default=3)\n parser.add_argument(\"--n_iters\", type=int, default=10000)\n parser.add_argument(\"--sm_lam\", type=float, default=.8)\n parser.add_argument(\"--dist\", type=str, default=\"gaussian\")\n parser.add_argument(\"--posterior\", type=str, default=\"gaussian-1\")\n parser.add_argument(\"--dataset\", type=str, default=\"moons\")\n parser.add_argument(\"--std_sample\", type=str, default=\"log\", choices=[\"log\", \"linear\"])\n # optimization\n parser.add_argument(\"--lr\", type=float, default=1e-3)\n parser.add_argument(\"--decay_epochs\", nargs=\"+\", type=int, default=[])\n parser.add_argument(\"--decay_rate\", type=float, default=.1)\n parser.add_argument(\"--optimizer\", choices=[\"adam\", \"sgd\"], default=\"adam\")\n parser.add_argument(\"--batch_size\", type=int, default=500)\n # regularization\n parser.add_argument(\"--weight_decay\", type=float, default=0.0)\n # network\n parser.add_argument(\"--network\", type=str, default=\"mlp\", choices=[\"mlp\", \"resnet\"])\n # EBM specific\n parser.add_argument(\"--n_steps\", type=int, default=10000)\n parser.add_argument(\"--sgld_lr\", type=float, default=1.0)\n parser.add_argument(\"--sgld_std\", type=float, default=1e-2)\n parser.add_argument(\"--ckpt_every\", type=int, default=10)\n parser.add_argument(\"--eval_every\", type=int, default=1)\n parser.add_argument(\"--print_every\", type=int, default=100)\n parser.add_argument(\"--sample_every\", type=int, default=100)\n parser.add_argument(\"--load_path\", type=str, default=None)\n parser.add_argument(\"--print_to_log\", action=\"store_true\")\n parser.add_argument(\"--form\", type=str, default=\"critic\")\n parser.add_argument(\"--direct_loss\", action=\"store_true\")\n parser.add_argument(\"--logit\", action=\"store_true\")\n\n args = parser.parse_args()\n main(args)\n","sub_path":"jemi_supervised_toy.py","file_name":"jemi_supervised_toy.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"184778087","text":"from typing import TYPE_CHECKING\n\nfrom vk_api.keyboard import VkKeyboard, VkKeyboardColor\nfrom vk_api.longpoll import Event\n\nfrom vkinder.helpers import write_msg\nfrom vkinder.models import User\nfrom vkinder.state._base import State\n\nif TYPE_CHECKING:\n from vkinder.bot import Bot\n from vkinder.state import StateName\n\n\nclass HelloState(State):\n key = \"hello\"\n\n text = (\n \"Привет, {first_name}! \"\n \"Я бот-сваха, прямо как Роза Сябитова, только со мной не страшно. \"\n \"Я помогу тебе найти идеальную пару! \"\n \"Ну, или хотя бы какую-нибудь. Приступим? \"\n \"Жми на кнопку!\"\n )\n\n @classmethod\n def enter(cls, bot: \"Bot\", event: Event) -> None:\n user = bot.storage.get(User, event.user_id)\n\n keyboard = VkKeyboard(one_time=True)\n keyboard.add_button(\"Новый поиск\", color=VkKeyboardColor.PRIMARY)\n\n write_msg(\n bot.group_session,\n event.user_id,\n cls.text.format(first_name=user.first_name),\n keyboard=keyboard.get_keyboard(),\n )\n\n @classmethod\n def leave(cls, bot: \"Bot\", event: Event) -> \"StateName\":\n from vkinder.state import StateName\n\n if event.text == \"Новый поиск\":\n return StateName.SELECT_COUNTRY\n else:\n return StateName.HELLO_ERROR\n\n\nclass HelloErrorState(HelloState):\n key = \"hello_error\"\n\n text = (\n \"Извини, {first_name}, я не знаю такой команды. \"\n \"Используй, пожалуйста, кнопки, чтобы мне было проще тебя понимать. \"\n \"Нажми на кнопку ниже, чтобы начать новый поиск.\"\n )\n\n\nclass HelloAgainState(HelloState):\n key = \"hello_again\"\n\n text = (\n \"Ты находишься в главном меню, {first_name}. Начнём новый поиск? \"\n \"Если ты уже искал людей раньше, то можно просмотреть результаты \"\n \"предыдущих поисков.\"\n )\n","sub_path":"vkinder/state/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"410479687","text":"# Словари для выплывающих неменяемых списков\n# Способ проверки @then('\"{sl_block}\" has default \"{table}\" items')\n#\n# Метод списания (по умолчанию)\nrouteWithdrawType = [\n 'По тех. карте',\n 'По продукту',\n 'Сначала по продукту, затем по тех. карте',\n 'Не списывать'\n]\n#\n# (Базовая) Единица измерения по умолчанию (для новых модификаторов (ингредиентов, полуфабрикатов))\nnewMeasureUnit = [\n 'шт',\n 'кг',\n 'л',\n 'пц'\n]\n#\n# Ед. изм.\nmeasureUnit = [\n 'шт',\n 'кг',\n 'л',\n 'пц'\n]\n#\n# Способ реализации (по умолчанию)\nsellingType = [\n 'Не может продаваться частями',\n 'Может продаваться частями',\n 'Требовать ввод веса при продаже'\n]\n#\n# НДС\nvat = [\n '<без НДС>',\n '0 %',\n '10 %',\n '18 %'\n]\n#\n# Тип покупателя\ncustomerType = [\n 'Гость',\n 'Сотрудник',\n 'Контрагент'\n]\n#\n# Тип корреспондента\ncorrespondentType = [\n 'Клиент',\n 'Сотрудник',\n 'Контрагент',\n 'Списание'\n]\n#\n# Алкоголь/Остатки по ЕГАИС\nunitType = [\n 'Фасованная, шт.',\n 'Нефасованная, дал'\n]\n#\n# CRM/Клиенты\n# Пол\nsexType = [\n 'Мужской',\n 'Женский'\n]\n#\n# Специальное предложение\nbonusConditionType = [\n 'Полное списание',\n 'Кратное количеству блюд по акции'\n]\n# Финанся/Статьи\n# Тип статьи\naccountType = [\n 'Расходная',\n 'Приходная',\n 'Приходно-расходная'\n]\n#\n# Справочники/Типы оплат\n# Тип операции\noperationType = [\n 'Фискальный',\n 'Нефискальный',\n 'За счет заведения'\n]\n#\n# Механизм платежа\npaymentMechanism = [\n 'Налчные',\n 'POS',\n 'Бонусы'\n]\n#\n# Тип контрагента\npartnerType = [\n 'Сотрудник',\n 'Клиент',\n 'Контрагент',\n 'Не определен'\n]\n#\n# Устройства / Банковские терминалы\nposType = [\n 'Не определен',\n 'iBox',\n 'POS'\n]\n# Предприятие/Настройки\n# Вклад отрицательных остатков\nnegativeBalanceContribution = [\n 'Отсутствует',\n '1 единица',\n 'Абсолютная величина'\n]\n#\n# Вклад нулевых остатков\nzeroBalanceContribution = [\n 'Отсутствует',\n '1 единица'\n]\n#\n# Себестоимость для отрицательных и нулевых остатков\nbalancePrimeCost = [\n 'Отображать только для положительных остатков',\n 'Отображать только для положительных и нулевых остатков',\n 'Отображать только для положительных и отрицательных остатков',\n 'Отображать всегда'\n]\n#\n# Себестоимость продуктов\nrouteWithdrawMode = [\n 'По типу списания',\n 'По продукту',\n 'По тех. карте рекурсивно',\n 'По тех. карте нерекурсивно'\n]\n#\n# Текущие остатки\nactualBalancePrimeCost = [\n 'На текущий момент',\n 'Вместе с будущими операциями']\n#\n# Исторические остатки\nsnapshotBalancePrimeCost = [\n 'До проведения документа',\n 'После проведения документа',\n 'На текущий момент',\n 'Вместе с будущими операциями'\n]\n#\n# Метод расчета себестоимости\nprimeCostCalculationMethod = [\n 'Среднее арифметическое для положительных остатков',\n 'Среднее арифметическое',\n 'По последнему приходу'\n]\n#\n# Наложение скидок (надбавок)\noverlayType = [\n 'Наложение',\n 'Максимальное значение',\n 'Минимальное значение'\n]\n#\n# Режим расчета операционной прибыли\noperatingRevenueCalculationMode = [\n 'Себестоимость продаж минус приходные накладные',\n 'Отображать только себестоимость продаж'\n]\n# Создание номенклатурной единицы из тех карты\nadd_nomenclature = [\n 'Блюдо',\n 'Модификатор',\n 'Ингредиент',\n 'П/ф',\n]\n","sub_path":"resources/rus/dropdown.py","file_name":"dropdown.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"437283806","text":"from bs4 import BeautifulSoup\nimport urllib3\n\nclass LyricsSearcher():\n def __init__(self):\n self.sites = ['azlyrics'] \n \n \n def _get_next_site(self):\n \"\"\"\n generator to fetch the next website\n \"\"\"\n for site in self.sites:\n yield(site)\n\n \n def _az_format(self,link):\n \"\"\"\n Function that creates formatting specifically for azlyrics.com\n \"\"\"\n link = link.lower()\n \n #azlyrics currently strips single quotes out of names\n link = link.replace(\"\\'\",\"\")\n \n #this removes all spaces in the title\n link = link.split()\n\n return ''.join(link)\n \n #creates a properly formatted url for the Lyrics Searcher to use\n def _construct_link(self,link,artist,title):\n if link == 'azlyrics':\n artist = self._az_format(artist)\n title = self._az_format(title)\n return \"http://www.\" + link + \".com/lyrics/\" + artist + \"/\" + title + \".html\"\n else:\n return NotImplemented\n \n def _fetch_site_data(self,artist,title): \n \"\"\" \n \n Sends a GET request to obtain the requested lyrics\n returns both a HTTPResponse object as well as the correct url\n \"\"\"\n http = urllib3.PoolManager()\n \n for site in self.sites: \n \n url = self._construct_link(site,artist,title)\n \n #this heuristic attempts to find a url from a given site\n #and will start the next one if it does not \n try:\n requested_data = http.request(\"GET\",url)\n \n #if r exists but is not an OK from the website, continue\n if requested_data.status != 200:\n continue\n \n break\n\n # if url is not found, continue\n except urllib3.exceptions.LocationValueError:\n continue\n \n if not requested_data:\n return None \n \n return [requested_data,url]\n\n def _az_fetch(self,artist,title,requested_data): \n \"\"\"\n Function fetches lyrics from azlyrics specifically\n \"\"\"\n soup = BeautifulSoup(requested_data.data, \"lxml\") \n #fetches data\n if not requested_data:\n return None\n #fetches the lyrics\n lyrics = soup.find_all(\"div\", attrs={\"class\": None, \"id\": None})\n \n if not lyrics:\n return None\n \n lyrics = [line.getText() for line in lyrics]\n lyrics = [x.rstrip() for x in lyrics[0].split(\"\\n\")]\n lyrics = [\">\" + x for x in lyrics]\n\n lyrics.insert(0,title.upper())\n lyrics.insert(0,artist.upper())\n \n \n #reddit requires TWO newlines in order to be interpreted \n return \"\\n\\n\".join(lyrics)\n\n def get_lyrics(self,artist,title):\n \"\"\"\n Function returns lyrics from a requested artist and title\n \"\"\"\n site_data = self._fetch_site_data(artist,title)\n requested_data = site_data[0]\n url = site_data[1] \n \n #implementation works for az_lyrics currently\n if \"azlyrics\" in url: \n return self._az_fetch(artist,title,requested_data) \n \n return None\n","sub_path":"Bots/LyricsSearcher/LyricsSearcher.py","file_name":"LyricsSearcher.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"323130336","text":"import sys\nfrom estimator import Estimator\nfrom classifier import Classifier\n\nfrom lib import db\nfrom lib import cli\nfrom csv import DictWriter\nfrom lib import logger\nfrom lib import aggregator as ag\nfrom lib.node import nodegen\nfrom collections import namedtuple\nfrom configparser import ConfigParser\nfrom multiprocessing import Pool\n\nResults = namedtuple('Results', [ 'keys', 'values', ])\nclass ResultsWriter:\n def __init__(self, header):\n self.header = header\n self.writer = None\n\n def write(self, results):\n if not results.values:\n return\n \n if not self.writer:\n self.writer = DictWriter(sys.stdout, results.keys, delimiter=';')\n if self.header:\n self.writer.writeheader()\n \n self.writer.writerows(results.values)\n\n#\n# Mappings between configuration options and learning\n# interfaces. Dictionary keys should have a corresponding key in the\n# .ini file!\n#\nmachine_ = {\n 'classification': Classifier,\n 'estimation': Estimator,\n}\n\naggregator_ = {\n 'simple': ag.simple,\n 'change': ag.change,\n 'average': ag.average,\n 'difference': ag.difference,\n}\n\n#\n# Run the prediction!\n#\ndef run(args):\n (index, node, (config,)) = args\n\n log = logger.getlogger()\n log.info('node: {0}'.format(node))\n\n opts = config['machine']\n machine = machine_[opts['model']]\n aggregator = aggregator_[opts['feature-transform']]\n model = machine(node, config, aggregator)\n\n keys = model.header()\n values = []\n try:\n values = model.predict(model.classify())\n except ValueError as v:\n log.error(v)\n\n return Results(keys, values)\n\n#\n# Setup\n#\n\nlog = logger.getlogger(True)\nlog.info('phase 1')\nlog.info('db version: {0}'.format(db.mark()))\n\ncargs = cli.CommandLine(cli.optsfile('prediction')) # /etc/opts/prediction\n\nconfig = ConfigParser()\nconfig.read(cargs.args.config) # --config\n\nparams = config['parameters']\nwriter = ResultsWriter(config['output'].getboolean('print-header'))\n\n# Establish the database credentials. Passing None uses the\n# defaults.\ndbinfo = config['database'] if 'database' in config else None\ndb.EstablishCredentials(**dbinfo)\n\n#\n# Processing\n#\nlog.info('phase 2')\n\nif 'node' in params:\n results = run((0, int(params['node']), config))\n writer.write(results)\nelse:\n with Pool() as pool:\n for i in pool.imap_unordered(run, nodegen(config), 1):\n writer.write(i)\n\n#\n# Tear down\n#\nlog.info('phase 3')\n","sub_path":"src/prediction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"85847965","text":"\"\"\"Save Humans!\n\nUsage:\n\tsimulator \n\nOptions:\n -h --help Show this screen.\n --version Show version.\n\n\"\"\"\n\nfrom os import kill\nfrom sys import stderr\nfrom typing import Any, Callable, Dict, List, Tuple, Type, Union\n\nfrom docopt import docopt\n\nfrom codeingame import Ash, Human, Point, Zombie, Game, Field\n\nimport sys\nimport os\n\ntry:\n\timport pygame\nexcept:\n\tpass\n\ndef main(simulation: str = None, enable_graphics: bool = True):\n\tif not simulation:\n\t\targuments = docopt(__doc__)\n\t\tsimulation = arguments['']\n\n\tash = None\n\thumans = []\n\tzombies = []\n\n\tfile = os.path.join(os.getcwd(), 'simulations', f'{simulation}.siml')\n\n\tif not os.path.exists(file):\n\t\tprint(f'Simulation {file}.siml does not exists')\n\t\tsys.exit(1)\n\n\twith open(file) as f:\n\t\tfor entity in f.read().split('\\n'):\n\t\t\tprint(f'parsing entity {entity}', entity)\n\n\t\t\tif entity.startswith('A'):\n\t\t\t\tash = Ash(*[int(i) for i in entity.split()[1:]])\n\n\t\t\telif entity.startswith('H'):\n\t\t\t\thumans.append(Human(*[int(i) for i in entity.split()[1:]]))\n\n\t\t\telif entity.startswith('Z'):\n\t\t\t\tzombies.append(Zombie(*[int(i) for i in entity.split()[1:]]))\n\n\t\t\telse:\n\t\t\t\tif entity:\n\t\t\t\t\tprint(f'unparsable entity: {entity}', file=sys.stderr, flush=True)\n\n\tif not ash or not humans or not zombies:\n\t\tprint('missing something')\n\t\tprint('ash', ash)\n\t\tprint('humans', humans)\n\t\tprint('zombies', zombies)\n\t\tsys.exit(1)\n\n\treturn GameController(ash, humans, zombies, graphic_engine=enable_graphics).run_game()\n\n# === GameController === ===================================================== #\n\nBACKGROUND = (255, 255, 255)\nTEXT = (0, 0, 0)\nASH = (0, 0, 255)\nASH_RANGE = (255, 255, 0)\nHUMAN = (0, 255, 0)\nZOMBIE = (255, 0, 0)\nZOMBIE_RANGE = (0, 255, 255)\n\ndef make_interpolater(left_min, left_max, right_min, right_max):\n\t# Figure out how 'wide' each range is\n\tleft = left_max - left_min\n\tright = right_max - right_min\n\n\t# Compute the scale factor between left and right values\n\tscale = float(right) / float(left) if left != 0 else float(left)\n\n\t# create interpolation function using pre-calculated scaleFactor\n\tdef interp_fn(value):\n\t\t\treturn right_min + (value - left_min) * scale\n\n\treturn interp_fn\n\n\ndef animate(fn):\n\tdef wrapper(game_controller):\n\t\tif not game_controller.graphic_engine:\n\t\t\treturn fn(game_controller)\n\n\t\t# before = game_controller.entities.copy()\n\t\t# retr = fn(game_controller)\n\t\t# after = game_controller.entities\n\n\t\t# print('equals', before == after)\n\n\t\t# return retr\n\t\treturn fn(game_controller)\n\n\treturn wrapper\n\n\nclass GameController(object):\n\tENGINE: Type[Game] = Game\n\n\tash: Ash\n\thumans: List[Human]\n\tzombies: List[Zombie]\n\n\tTICK: 60\n\n\tSCALE: int = 10\n\tWIDTH: int = int(Field.WIDTH / SCALE)\n\tHEIGHT: int = int(Field.HEIGHT / SCALE)\n\n\tentities: Dict[Union[Ash, Human, Zombie], Tuple[int, int]]\n\told_entities: Dict[Union[Ash, Human, Zombie], Tuple[int, int]]\n\n\tinterpolator_w: Callable\n\tinterpolator_h: Callable\n\n\tscreen: Any\n\tfont: Any\n\tclock: Any\n\n\tgraphic_engine: bool\n\n\tdef __init__(self, ash: Ash, humans: List[Human], zombies: List[Zombie], graphic_engine: bool = True):\n\t\tself.graphic_engine = graphic_engine\n\n\t\tif self.graphic_engine:\n\t\t\tpygame.init()\n\n\t\t\tself.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))\n\t\t\tself.clock = pygame.time.Clock()\n\n\t\t\tself.interpolator_w = make_interpolater(0, Field.WIDTH, 0, self.WIDTH)\n\t\t\tself.interpolator_h = make_interpolater(0, Field.HEIGHT, 0, self.HEIGHT)\n\t\t\tself.entities = {}\n\n\t\t\tself.font = pygame.font.SysFont(None, 48)\n\n\n\t\tself.ash = ash\n\t\tself.humans = humans\n\t\tself.zombies = zombies\n\t\tself.score = 0\n\n\tdef run_game(self):\n\n\t\tif self.graphic_engine:\n\t\t\tself.entities[self.ash] = self.translate(self.ash)\n\n\t\t\tfor human in self.humans:\n\t\t\t\tself.entities[human] = self.translate(human)\n\n\t\t\tfor zombie in self.zombies:\n\t\t\t\tself.entities[zombie] = self.translate(zombie)\n\n\t\t\tprint(self.entities)\n\n\t\twhile self.humans and self.zombies:\n\t\t\tif self.graphic_engine:\n\t\t\t\tfor event in pygame.event.get():\n\t\t\t\t# Did the user hit a key?\n\t\t\t\t\tif event.type == pygame.constants.KEYDOWN:\n\t\t\t\t\t\t# Was it the Escape key? If so, stop the loop.\n\t\t\t\t\t\tif event.key == pygame.constants.K_ESCAPE:\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t# Did the user click the window close button? If so, stop the loop.\n\t\t\t\t\telif event.type == pygame.constants.QUIT:\n\t\t\t\t\t\tbreak\n\n\t\t\tself.move_zombies()\n\t\t\tself.ash_move()\n\t\t\tself.ash_attak()\n\t\t\tself.zombie_attak()\n\n\t\t\tif self.graphic_engine:\n\t\t\t\tself.update()\n\t\t\t\tself.clock.tick(1)\n\n\t\tif self.graphic_engine:\n\t\t\tif self.humans:\n\t\t\t\ttext = self.font.render(\"You Win\", True, TEXT)\n\t\t\t# pygame.quit()\n\n\t\t\telse:\n\t\t\t\ttext = self.font.render(\"You Lost\", True, TEXT)\n\n\t\telse:\n\n\t\t\tif self.humans:\n\t\t\t\t# Win\n\t\t\t\treturn True\n\n\t\t\telse:\n\t\t\t\t# Lose\n\t\t\t\treturn False\n\n\t@animate\n\tdef move_zombies(self):\n\t\tfor zombie in self.zombies:\n\t\t\tzombie.move_to(zombie.nearest(self.humans + [self.ash]))\n\n\t\t\tif self.graphic_engine:\n\t\t\t\tself.entities[zombie] = self.translate(zombie.point())\n\n\t@animate\n\tdef ash_move(self):\n\n\t\ttry:\n\t\t\tgame = self.ENGINE(self.ash, self.humans, self.zombies)\n\t\t\tpoint = Point(*[int(i) for i in str(game.play()).split()])\n\n\t\texcept BaseException as e:\n\t\t\tprint(e)\n\t\t\tsys.exit(1)\n\n\t\tself.ash.move_to(point)\n\n\t\tif self.graphic_engine:\n\t\t\tself.entities[self.ash] = self.translate(self.ash.point())\n\n\t@animate\n\tdef ash_attak(self):\n\t\tkills = [zombie for zombie in self.zombies if self.ash.reach(zombie)]\n\n\t\tif kills:\n\t\t\tfor kill in kills:\n\t\t\t\tself.zombies.remove(kill)\n\n\t\t\t\tif self.graphic_engine:\n\t\t\t\t\tdel self.entities[kill]\n\n\t@animate\n\tdef zombie_attak(self):\n\t\tfor zombie in self.zombies:\n\t\t\tkillables = [human for human in self.humans if zombie.reach(human)]\n\n\t\t\tif killables:\n\t\t\t\tnearest = zombie.nearest(killables)\n\t\t\t\tzombie.move_to(nearest)\n\t\t\t\tself.humans.remove(nearest)\n\n\t\t\t\tif self.graphic_engine:\n\t\t\t\t\tdel self.entities[nearest]\n\n\tdef update(self):\n\t\tself.screen.fill(BACKGROUND)\n\n\t\tfor entity, position in self.entities.items():\n\t\t\tif isinstance(entity, Ash):\n\t\t\t\tpygame.draw.circle(self.screen, ASH_RANGE, position, entity.RANGE / self.SCALE)\n\t\t\t\tpygame.draw.circle(self.screen, ASH, self.translate(entity), self.SCALE)\n\n\t\t\telif isinstance(entity, Human):\n\t\t\t\tpygame.draw.circle(self.screen, HUMAN, position, self.SCALE)\n\n\t\t\telif isinstance(entity, Zombie):\n\t\t\t\tpygame.draw.circle(self.screen, ZOMBIE_RANGE, position, entity.RANGE / self.SCALE)\n\t\t\t\tpygame.draw.circle(self.screen, ZOMBIE, position, self.SCALE)\n\n\t\tpygame.display.flip()\n\n\tdef animate(self, fn):\n\t\tpass\n\n\tdef translate(self, point: Point) -> Tuple[int, int]:\n\t\treturn (self.interpolator_w(point.x),\n\t\t\t\t\t\tself.interpolator_h(point.y))\n","sub_path":"simulator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"434651343","text":"import os\nimport sys\nimport django\nimport logging\n\n\nSITE_ROOT = os.path.dirname(os.path.realpath(__file__))\nBASE_ROOT = os.path.abspath(os.path.join(SITE_ROOT, os.pardir))\n# Django settings for conserte_me project.\n\nDEBUG = os.environ['DEBUG']\nTEMPLATE_DEBUG = DEBUG\n\nRUNNING_DEVSERVER = (sys.argv[1] == 'runserver')\n\nADMINS = (\n ('Leonardo Korndorfer', 'leokorndorfer@gmail.com'),\n)\n\nMANAGERS = ['leokorndorfer@gmail.com', 'cristianobfernandes@gmail.com']\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': os.environ['DATABASE_ENGINE'], # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.environ['DATABASE_NAME'], # Or path to database file if using sqlite3.\n 'USER': os.environ['DATABASE_USER'], # Not used with sqlite3.\n 'PASSWORD': os.environ['DATABASE_PASSWORD'], # Not used with sqlite3.\n 'HOST': os.environ['DATABASE_HOST'], # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': os.environ['DATABASE_PORT'], # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n\n# email settings\n# These are the smtp settings for gmail\nEMAIL_HOST = 'mail.conserte.me'\nEMAIL_HOST_USER = 'avisos@conserte.me'\nEMAIL_HOST_PASSWORD = 'Mj?*j!6m'\nDEFAULT_FROM_EMAIL = 'avisos@conserte.me'\nSERVER_EMAIL = 'avisos@conserte.me'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\nAPPEND_SLASH = True\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/login'\n\nLOGOUT_REDIRECT_URL = '/'\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = BASE_ROOT + '/media/'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = 'media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = BASE_ROOT + '/assets/'\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/assets/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n BASE_ROOT + '/static/assets/',\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 's#_3ozw)&(h+2=^t26h1w&z47!pd#0ptsz!*+&nex7vsul_o&i'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'hamlpy.template.loaders.HamlPyFilesystemLoader',\n 'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'conserte_me.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'conserte_me.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n BASE_ROOT + '/templates',\n BASE_ROOT + '/profiles',\n '/home/conserte_me/website/conserte_me',\n '/home/conserte_me/website/issues',\n '/home/conserte_me/website/templates',\n)\n\n# TEMPLATE_CONTEXT_PROCESSORS = 'django.contrib.messages.context_processors.messages'\nMESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n # 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # 'django.contrib.comments',\n\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n\n # thumbs\n 'easy_thumbnails',\n\n # migrations\n 'south',\n\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n\n 'conserte_me',\n 'issues',\n 'profiles',\n\n # plugins\n 'rest_framework',\n\n # custom comments\n # 'issue_comments',\n)\n\n# Custom comment application\n# COMMENTS_APP = 'issue_comments'\n\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'square': {'size': (50, 50), 'crop': 'smart'},\n 'big_square': {'size': (100, 100), 'crop': 'smart'},\n 'medium': {'size': (250, 250), 'crop': False},\n 'big': {'size': (500, 500), 'crop': False},\n },\n}\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# setup logger\nimport logging\nPROJECT_DIR = os.path.dirname(__file__)\nPARENT_DIR = os.path.dirname(PROJECT_DIR)\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s',\n filename=os.path.join(PARENT_DIR, 'django.log'),\n filemode='a+')\n","sub_path":"conserte_me/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"356308916","text":"from random import choice, randint\nimport string, re\nfrom time import sleep\n\ndef clean_data(file):\n with open (file) as f: \n data = f.read()\n clean = data.replace('\\n',' ')\n clean = clean.lower()\n clean = re.sub(r'[^\\w\\s]', '', clean)\n \n return clean\n\ndef generate_model(text, order): \n \n model = {}\n \n for i in range(0,len(text) - order): \n fragment = text[i:i + order]\n next_letter = text[i + order]\n \n if fragment not in model: \n model[fragment] = {}\n \n if next_letter not in model[fragment]:\n model[fragment][next_letter] = 1 \n \n else:\n model[fragment][next_letter] += 1\n\n return(model)\n \ndef get_next_character(model,fragment):\n \n letters = []\n \n for letter in model[fragment].keys():\n \n try:\n for times in range(0, model[fragment][letter]):\n letters.append(letter)\n \n except KeyError:\n print('key not present')\n continue\n \n return choice(letters)\n\ndef generate_text(text, text2, order, length):\n \n data = clean_data(text)\n data2 = clean_data(text2)\n\n # print(data, data2)\n\n data_final=' '.join([data,data2])\n \n model = generate_model(data_final,order)\n \n current_fragment = data[0:order]\n \n output = \"\"\n \n for i in range(0, length - order):\n try :\n new_character = get_next_character(model,current_fragment)\n output += new_character\n current_fragment = current_fragment[1:] + new_character\n \n except KeyError:\n continue \n \n return output\n\ndef loop():\n rand_num = randint(100, 1000)\n resultat = generate_text(\"data.txt\", \"data2.txt\", 4, rand_num)\n return resultat\n \n# déclaration fonction pour enregister\n\nif __name__ == \"__main__\":\n while True:\n print(loop(), '\\n')\n # appel fonction enregister\n sleep(2)","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"558054780","text":"import re as re # regular expressions\nimport pprint # pretty printing\nimport os, fnmatch # to retrieve file information from path\nimport pickle # serialize the data structure\nimport copy\n\ndef processDirectoryOfBreakthroughFiles(path, playerList):\n for playerGameHistoryData in findFiles(path, '*.txt'):\n playerList.append(processBreakthroughFile(path, playerGameHistoryData))\n\ndef processBreakthroughFile(path, playerGameHistoryData):\n fileName = playerGameHistoryData[\n len(path):len(playerGameHistoryData) - len('.txt')] # trim path & extension\n fileName = fileName.split('間') # user間2000間687687 -> ['user',2000, 687687]\n playerName = str(fileName[0])\n playerID = int(fileName[2])\n rank = int(fileName[1])\n gamesList, numWins, numLosses = formatGameList(playerGameHistoryData, playerName)\n return {'Player': playerName, 'PlayerID': playerID, 'Rank': rank, 'Games': gamesList, 'Wins': numWins, 'Losses': numLosses}\n\ndef writeToDisk(input, path):\n outputFile = open(path + r'PlayerDataPythonDataset.p', 'wb')\n pickle.dump(input, outputFile)\n\ndef findFiles(path, filter): # recursively find files at path with filter extension; pulled from StackOverflow\n for root, dirs, files in os.walk(path):\n for file in fnmatch.filter(files, filter):\n yield os.path.join(root, file)\n\ndef preprocessGamesList(playerGameHistoryData): #normalized regex/iterable friendly list\n gamesList = [y[1] for y in list(\n enumerate([x.strip() for x in open(playerGameHistoryData, \"r\")]))] # read in file and convert to list\n gamesList = filter(None, gamesList) # remove empty strings from list\n gamesList = list(filter(lambda a: a != \"[Site \\\"www.littlegolem.net\\\"]\", gamesList)) # remove site from list\n return gamesList\n\ndef formatGameList(playerGameHistoryData, playerName):\n quotesRegex = re.compile(r'\"(.*)\"')\n eventEntry = 0\n whiteEntry = 1\n blackEntry = 2\n resultEntry = 3\n moveEntry = 4\n games = []\n gamesList = preprocessGamesList(playerGameHistoryData)\n numWins = 0\n numLosses = 0\n # flags to indicate if something wasn't set properly\n opponentName = None\n event = None\n playerColor = None\n opponentColor = None\n win = None\n # format game list\n for j in range(0, len(gamesList)):\n thisRow = j % 5\n if thisRow != moveEntry:\n rowData = quotesRegex.search(gamesList[j]).group(1)\n if thisRow == eventEntry:\n # [Event \"Tournament null\"] -> Event: 'Tournament null'\n event = rowData\n elif thisRow == whiteEntry:\n if playerName.lower() == rowData.lower(): # ignore case just in case (no pun intended)\n playerColor = 'White'\n opponentColor = 'Black'\n else:\n opponentName = rowData\n playerColor = 'Black'\n opponentColor = 'White'\n elif thisRow == blackEntry:\n # assignment case handled above\n if playerName.lower() != rowData.lower():\n opponentName = rowData\n elif thisRow == resultEntry:\n #\n if playerColor == 'White':\n if rowData[0] == '1':\n win = True\n elif rowData[0] == '0':\n win = False\n elif rowData[0] == '*':\n win = \"Game In Progress\"\n elif playerColor == 'Black':\n if rowData[0] == '0':\n win = True\n elif rowData[0] == '1':\n win = False\n elif rowData[0] == '*':\n win = \"Game In Progress\"\n else:\n print(\"UNEXPECTED DATA FORMAT\")\n win = \"Undefined at line \" + str(j)\n elif thisRow == moveEntry:\n # format move list\n moveList = formatMoveList(gamesList[j])\n boardStates = generateBoardStates(moveList, playerColor, win) # generate board states from moveList\n assert (playerColor != opponentColor and opponentName != playerName)\n if len(moveList) > 3 and boardStates['Win'] != \"Game In Progress\":\n #non-spurrious games, remove if statement for all games.\n if win == True:\n numWins += 1\n elif win == False:\n numLosses += 1\n\n games.append({'Event': event, 'PlayerColor': playerColor, 'OpponentColor': opponentColor,\n 'OpponentName': opponentName, 'Win': win,\n 'Moves': moveList, 'BoardStates': boardStates}) # append new game after formatting move list\n return games, numWins, numLosses\n\n\ndef generateBoardStates(moveList, playerColor, win):\n if win == \"Game In Progress\":\n return {'Win': win, 'States': []}\n #for human readability version\n empty = 'e'\n white = 'w'\n black = 'b'\n\n # win/loss 'value' symmetrical\n if win == True:\n win = 1\n elif win == False:\n win = -1\n state = [\n {\n 9: 0,\n 8: {'a': black, 'b': black, 'c': black, 'd': black, 'e': black, 'f': black, 'g': black, 'h': black},\n 7: {'a': black, 'b': black, 'c': black, 'd': black, 'e': black, 'f': black, 'g': black, 'h': black},\n 6: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 5: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 4: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 3: {'a': empty, 'b': empty, 'c': empty, 'd': empty, 'e': empty, 'f': empty, 'g': empty, 'h': empty},\n 2: {'a': white, 'b': white, 'c': white, 'd': white, 'e': white, 'f': white, 'g': white, 'h': white},\n 1: {'a': white, 'b': white, 'c': white, 'd': white, 'e': white, 'f': white, 'g': white, 'h': white}\n }, win]\n mirrorState = mirrorBoardState(state)\n boardStates = {'Win': win, 'States': [state], 'MirrorStates': [mirrorState]}\n for i in range(0, len(moveList)):\n assert (moveList[i]['#'] == i + 1)\n if isinstance(moveList[i]['White'], dict): # if string, then == resign or NIL\n whoseMove = 'White'\n state = [movePiece(state[0], moveList[i]['White']['To'], moveList[i]['White']['From'], playerColor, 'White'), win]\n boardStates['States'].append(state)\n mirrorState = mirrorBoardState(state)\n boardStates['MirrorStates'].append(mirrorState)\n if isinstance(moveList[i]['Black'], dict): # if string, then == resign or NIL\n whoseMove = 'Black'\n state= [movePiece(state[0], moveList[i]['Black']['To'], moveList[i]['Black']['From'], playerColor, 'Black'), win]\n boardStates['States'].append(state)\n mirrorState = mirrorBoardState(state)\n boardStates['MirrorStates'].append(mirrorState)\n # for data transformation; inefficient to essentially compute board states twice, but more error-proof\n boardStates = convertBoardStatesToArrays(boardStates, playerColor)\n return boardStates\n\ndef mirrorBoardState(state):#since a mirror image has the same strategic value\n mirrorStateWithWin = copy.deepcopy(state) # edit copy of boardState\n mirrorState = mirrorStateWithWin[0]\n state = state[0] #the board state; state[1] is the win or loss value\n for row in sorted(state):\n whoseMoveIndex = 9\n if row != whoseMoveIndex: #don't touch the index that shows whose move generated this state\n for column in sorted(state[row]):\n if column == 'a':\n mirrorState[row]['h'] = state[row][column]\n elif column == 'b':\n mirrorState[row]['g'] = state[row][column]\n elif column == 'c':\n mirrorState[row]['f'] = state[row][column]\n elif column == 'd':\n mirrorState[row]['e'] = state[row][column]\n elif column == 'e':\n mirrorState[row]['d'] = state[row][column]\n elif column == 'f':\n mirrorState[row]['c'] = state[row][column]\n elif column == 'g':\n mirrorState[row]['b'] = state[row][column]\n elif column == 'h':\n mirrorState[row]['a'] = state[row][column]\n return mirrorStateWithWin\n\n\n\ndef convertBoardStatesToArrays(boardStates, playerColor):\n newBoardStates = boardStates\n states = boardStates['States']\n mirrorStates = boardStates['MirrorStates']\n assert len(states) == len(mirrorStates)\n newBoardStates['States'] = []\n newBoardStates['MirrorStates'] = []\n for i in range (0, len (states)):\n newBoardStates['States'].append(convertBoardTo1DArray(states[i], playerColor))\n newBoardStates['MirrorStates'].append(convertBoardTo1DArray(mirrorStates[i], playerColor))\n return newBoardStates\n\n\ndef convertBoardTo1DArray(boardState, playerColor):\n state = boardState[0]\n whoseMoveIndex = 9\n oneDArray = []\n for row in sorted(state):\n if row != whoseMoveIndex: #don't touch the index that shows whose move generated this state\n for column in sorted(state[row]):\n #needs to be sorted to traverse dictionary in lexicographical order\n value = -5\n if state[row][column] == 'e':\n value = 0\n elif state[row][column] == 'w':\n if playerColor == 'White':\n value = 1\n else:\n value = -1\n elif state[row][column] == 'b':\n if playerColor == 'Black':\n value = 1\n else:\n value = -1\n else:\n print(\"error in convertBoard\")\n exit(-190)\n oneDArray.append(value)\n oneDArray.append(state[whoseMoveIndex])#65th element is a flag indicating who generated this state\n newBoardState = [oneDArray, boardState[1]] # [x vector, y scalar]\n return newBoardState\n\n\ndef movePiece(boardState, To, From, playerColor, whoseMove):\n empty = 'e'\n whoseMoveIndex = 9\n nextBoardState = copy.deepcopy(boardState) # edit copy of boardState\n nextBoardState[int(To[1])][To[0]] = nextBoardState[int(From[1])][From[0]]\n nextBoardState[int(From[1])][From[0]] = empty\n if (playerColor == 'White' and whoseMove == 'White') or (playerColor == 'Black' and whoseMove == 'Black'):\n nextBoardState[whoseMoveIndex] = 1 #player made move\n else:\n nextBoardState[whoseMoveIndex] = -1 #opponent made move\n return nextBoardState\n\n\ndef formatMoveList(moveListString):\n moveRegex = re.compile(r'(\\d+)\\.\\s(resign|[a-h]\\d.[a-h]\\d)\\s(resign|[a-h]\\d.[a-h]\\d|\\d-\\d)',\n re.IGNORECASE) # fix last group to solve for resign first or second\n moveList = moveRegex.findall(moveListString)\n for i in range(0, len(moveList)):\n move = list(moveList[i])\n move[0] = int(move[0])\n assert (move[0] == i + 1)\n if move[1] == \"resign\":\n move[2] = \"NIL\"\n else:\n move[1] = {'From': move[1][0:2], 'To': move[1][3:len(move[1])]} # set White's moves\n if move[2] != \"resign\" and move[2] != \"NIL\": # set Black's moves\n if len(move[2]) > 3:\n move[2] = {'From': move[2][0:2], 'To': move[2][3:len(move[2])]}\n else:\n move[2] = \"NIL\"\n moveList[i] = {'#': move[0], 'White': move[1], 'Black': move[2]}\n return moveList\n\n\n #main script\nplayerList = []\npathToCheck = r'/Users/TeofiloZosa/BreakthroughData/AutomatedData/'\nprocessDirectoryOfBreakthroughFiles(pathToCheck, playerList)\n# for i in range(0, len(playerList)):\n# pprint.pprint(\"Player # \" + str(i + 1) + \": \" + playerList[i]['Player'])\n\nwriteToDisk(playerList, pathToCheck)\n\n# Verified Working.\n# #double check\n#pathToCheck2 = r'/Users/TeofiloZosa/BreakthroughData/'\n# newList = pickle.load(open(pathToCheck+r'PlayerDataPython.p', 'rb'))\n# oldList = pickle.load(open(pathToCheck2+r'PlayerDataPython.p', 'rb'))\n# assert (playerList == newList == oldList)\n\n\n","sub_path":"PlayerDataDirectoryToAnalysisFormat.py","file_name":"PlayerDataDirectoryToAnalysisFormat.py","file_ext":"py","file_size_in_byte":12383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"190384505","text":"# -*- coding:utf-8 -*-\n# @lc app=leetcode.cn id=387 lang=python\n#\n# [387] 字符串中的第一个唯一字符\n#\n\n# @lc code=start\nfrom collections import Counter\n\nclass Solution(object):\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n # cnt = Counter(s)\n # minVal = float('inf')\n # for x,i in cnt.items():\n # if i == 1:\n # minVal = min(minVal, s.index(x)) \n # return minVal if minVal != float('inf') else -1\n for c in s:\n if s.find(c) == s.rfind(c):\n return s.find(c)\n return -1\n \n# @lc code=end\nif __name__ == \"__main__\":\n obj = Solution()\n ret = obj.firstUniqChar(\"loveleetcode\")\n print(ret)\n ret = obj.firstUniqChar(\"cc\")\n print(ret)\n ret = obj.firstUniqChar(\"\")\n print(ret)\n ret = obj.firstUniqChar(\"leetcode\")\n print(ret)","sub_path":"Week_08/G20200343030585/LeetCode_387_585.py","file_name":"LeetCode_387_585.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"366317141","text":"#encoding:utf-8\nfrom StringIO import StringIO\nimport gzip,urllib\n#压缩\ndef compress(raw): \n out_file=StringIO()\n try:\n with gzip.GzipFile(fileobj=out_file,mode=\"wb\") as gzip_out:\n gzip_out.write(raw)\n gzip_out.flush()\n gzip_out.close()\n out_file.seek(0)\n return out_file.read()\n finally:\n out_file.close()\n#解压\ndef uncompress(raw):\n file_in=StringIO(raw)\n try:\n gzip_in=gzip.GzipFile(fileobj=file_in,mode=\"r\")\n return gzip_in.read()\n finally:\n file_in.close()\n\ndef urldecode(query,charset=\"utf-8\"):\n d = {}\n a = query.split('&')\n for s in a:\n if s.find('='):\n k,v = map(urllib.unquote, s.split('='))\n try:\n d[k]=v.decode(charset)\n except KeyError:\n d[k] = [v]\n return d\n","sub_path":"deb-src/uucin-zabbix-longlive/etc/zabbix/zabbix_plugin.conf.d/longlive_checker/longlive_checker_env/local/lib/python2.7/site-packages/tornado-3.1.1.alpha.0-py2.7.egg/tornado/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"7638616","text":"from collections import deque\nimport copy\nimport math\nimport random\n\n'''\n IMPORTANT: Read through the code before beginning implementation!\n Your solution should fill in the various \"TODO\" items within this starter code.\n'''\ng_CYCLE_TIME = .100\n\n\n# Default parameters will create a 4x4 grid to test with\ng_MAP_SIZE_X = 2. # 2m wide\ng_MAP_SIZE_Y = 1.5 # 1.5m tall\ng_MAP_RESOLUTION_X = 0.5 # Each col represents 50cm\ng_MAP_RESOLUTION_Y = 0.375 # Each row represents 37.5cm\ng_NUM_X_CELLS = int(g_MAP_SIZE_X // g_MAP_RESOLUTION_X) # Number of columns in the grid map\ng_NUM_Y_CELLS = int(g_MAP_SIZE_Y // g_MAP_RESOLUTION_Y) # Number of rows in the grid map\n\n# Map from Lab 4: values of 0 indicate free space, 1 indicates occupied space\ng_WORLD_MAP = [0] * g_NUM_Y_CELLS*g_NUM_X_CELLS # Initialize graph (grid) as array\n\n# Source and Destination (I,J) grid coordinates\ng_dest_coordinates = (2,2)\ng_src_coordinates = (0,0)\n\ncost_matrix = []\n\n\ndef create_test_map(map_array):\n # Takes an array representing a map of the world, copies it, and adds simulated obstacles\n num_cells = len(map_array)\n map_matrix = copy.copy(map_array)\n # Add obstacles to up to sqrt(n) vertices of the map\n for i in range(int(math.sqrt(len(map_array)))):\n random_cell = random.randint(0, num_cells-1)\n map_matrix[random_cell] = 1\n\n return map_matrix\n\ndef vertex_index_to_ij(vertex_index):\n '''\n vertex_index: unique ID of graph vertex to be convered into grid coordinates\n Returns COL, ROW coordinates in 2D grid\n '''\n global g_NUM_X_CELLS\n return (vertex_index % g_NUM_X_CELLS, vertex_index // g_NUM_X_CELLS)\n\ndef ij_to_vertex_index(i,j):\n '''\n i: Column of grid map\n j: Row of grid map\n\n returns integer 'vertex index'\n '''\n global g_NUM_X_CELLS\n return j*g_NUM_X_CELLS + i\n\n\ndef ij_coordinates_to_xy_coordinates(i,j):\n '''\n i: Column of grid map\n j: Row of grid map\n\n returns (X, Y) coordinates in meters at the center of grid cell (i,j)\n '''\n global g_MAP_RESOLUTION_X, g_MAP_RESOLUTION_Y\n return (i+0.5)*g_MAP_RESOLUTION_X, (j+0.5)*g_MAP_RESOLUTION_Y\n\ndef xy_coordinates_to_ij_coordinates(x,y):\n '''\n i: Column of grid map\n j: Row of grid map\n\n returns (X, Y) coordinates in meters at the center of grid cell (i,j)\n '''\n global g_MAP_RESOLUTION_X, g_MAP_RESOLUTION_Y\n return int(i // g_MAP_RESOLUTION_X), int(j // g_MAP_RESOLUTION_Y)\n\n# **********************************\n# * Core Dijkstra Functions *\n# **********************************\n\ndef get_travel_cost(vertex_source, vertex_dest):\n # Returns the cost of moving from vertex_source (int) to vertex_dest (int)\n # INSTRUCTIONS:\n '''\n This function should return 1 if:\n vertex_source and vertex_dest are neighbors in a 4-connected grid (i.e., N,E,S,W of each other but not diagonal) and neither is occupied in g_WORLD_MAP (i.e., g_WORLD_MAP isn't 1 for either)\n\n This function should return 1000 if:\n vertex_source corresponds to (i,j) coordinates outside the map\n vertex_dest corresponds to (i,j) coordinates outside the map\n vertex_source and vertex_dest are not adjacent to each other (i.e., more than 1 move away from each other)\n '''\n if vertex_source < len(g_WORLD_MAP) and vertex_dest < len(g_WORLD_MAP):\n start_i, start_j = vertex_index_to_ij(vertex_source)\n dest_i, dest_j = vertex_index_to_ij(vertex_dest)\n manDist = abs(start_i - dest_i) + abs(start_j - dest_j)\n\n if manDist == 1 and g_WORLD_MAP[vertex_source] != 1 and g_WORLD_MAP[vertex_dest] != 1:\n return 1\n\n return 1000\n\n\ndef run_dijkstra(source_vertex):\n '''\n source_vertex: vertex index to find all paths back to\n returns: 'prev' array from a completed Dijkstra's algorithm run\n\n Function to return an array of ints corresponding to the 'prev' variable in Dijkstra's algorithm\n The 'prev' array stores the next vertex on the best path back to source_vertex.\n Thus, the returned array prev can be treated as a lookup table: prev[vertex_index] = next vertex index on the path back to source_vertex\n '''\n global g_NUM_X_CELLS, g_NUM_Y_CELLS, cost_matrix\n\n\n source_index = source_vertex\n # Array mapping vertex_index to distance of shortest path from vertex_index to source_vertex.\n dist = [1000] * g_NUM_X_CELLS * g_NUM_Y_CELLS\n dist[source_index] = 0\n # Queue for identifying which vertices are up to still be explored:\n # Will contain tuples of (vertex_index, cost), sorted such that the min cost is first to be extracted (explore cheapest/most promising vertices first)\n Q_cost = [1000] * g_NUM_X_CELLS * g_NUM_Y_CELLS\n\n Q_cost[source_index] = 0\n\n # Array of ints for storing the next step (vertex_index) on the shortest path back to source_vertex for each vertex in the graph\n prev = [-1] * g_NUM_X_CELLS * g_NUM_Y_CELLS\n\n # Insert your Dijkstra's code here. Don't forget to initialize Q_cost properly!\n\n for i in range(0, g_NUM_X_CELLS * g_NUM_Y_CELLS):\n for j in range(0, g_NUM_X_CELLS * g_NUM_Y_CELLS):\n j_x, j_y = vertex_index_to_ij(j)\n alt = dist[i] + get_travel_cost(i, j)\n\n if(alt < dist[j]):\n # print(\"Q_Cost[i] (%s) + cost (%s) = %s\" % (dist[i], get_travel_cost(j_x, j_y), alt))\n # print(\"Update %s to %s\" % (j, alt))\n Q_cost[j] = alt\n dist[j] = alt\n prev[j] = i\n\n # print(\"Q_Cost:\", dist)\n # print(\"Prevl:\", prev)\n # Return results of algorithm run\n cost_matrix = Q_cost\n return prev\n\n\ndef reconstruct_path(prev, source_vertex, dest_vertex):\n '''\n Given a populated 'prev' array, a source vertex_index, and destination vertex_index,\n allocate and return an integer array populated with the path from source to destination.\n The first entry of your path should be source_vertex and the last entry should be the dest_vertex.\n If there is no path between source_vertex and dest_vertex, as indicated by hitting a '-1' on the\n path from dest to source, return an empty list.\n '''\n final_path = deque()\n vertex = dest_vertex\n\n while vertex != source_vertex:\n # print(vertex)\n if vertex == -1:\n return []\n\n final_path.append(vertex)\n vertex = prev[vertex]\n\n final_path.append(source_vertex)\n\n return final_path\n\n\ndef render_map(map_array):\n '''\n DONE-\n Display the map in the following format:\n Use \" . \" for free grid cells\n Use \"[ ]\" for occupied grid cells\n\n Example:\n For g_WORLD_MAP = [0, 0, 1, 0,\n 0, 1, 1, 0,\n 0, 0, 0, 0,\n 0, 0, 0, 0]\n There are obstacles at (I,J) coordinates: [ (2,0), (1,1), (2,1) ]\n The map should render as:\n . . . .\n . . . .\n . [ ][ ] .\n . . [ ] .\n\n\n Make sure to display your map so that I,J coordinate (0,0) is in the bottom left.\n (To do this, you'll probably want to iterate from row 'J-1' to '0')\n '''\n for i in range(len(map_array)-1,0,-g_NUM_X_CELLS):\n for j in range(i-g_NUM_X_CELLS+1, i+1):\n if map_array[j] == 0:\n print(' . '),\n else:\n print('[ ]'),\n\n print('\\n')\n\n\n\ndef main():\n global g_WORLD_MAP, g_NUM_X_CELLS, cost_matrix\n #Just a little test case for the reconstruction\n # test = [1,2,-1,0,-1,-1,3,-1,-1]\n # stack = reconstruct_path(test,2,6)\n # while stack:\n # print(stack.pop())\n\n # DONE: Initialize a grid map to use for your test -- you may use create_test_map for this, or manually set one up with obstacles\n g_WORLD_MAP = create_test_map(g_WORLD_MAP)\n # g_NUM_X_CELLS = 4\n # g_WORLD_MAP = [0, 0, 1, 0,\n # 0, 1, 1, 0,\n # 0, 0, 0, 0,\n # 0, 0, 0, 0]\n # g_NUM_X_CELLS = 3\n # g_WORLD_MAP = [0, 0, 0,\n # 0, 1, 1,\n # 0, 0, 0]\n\n # Use render_map to render your initialized obstacle map\n render_map(g_WORLD_MAP)\n\n # TODO: Find a path from the (I,J) coordinate pair in g_src_coordinates to the one in g_dest_coordinates using run_dijkstra and reconstruct_path\n prev = run_dijkstra(ij_to_vertex_index(g_src_coordinates[0], g_src_coordinates[1]))\n stack = reconstruct_path(prev,ij_to_vertex_index(g_src_coordinates[0], g_src_coordinates[1]),ij_to_vertex_index(g_dest_coordinates[0], g_dest_coordinates[1]))\n\n '''\n DONE-\n Display the final path in the following format:\n Source: (0,0)\n Goal: (3,1)\n 0 -> 1 -> 2 -> 6 -> 7\n '''\n\n print('Source: ', g_src_coordinates)\n print('Destination: ', g_dest_coordinates)\n if len(stack) > 0:\n while stack:\n print(stack.pop()),\n print(' -> '),\n print('\\n')\n else:\n print('There is no path from source to destination.')\n\n print('\\n\\n')\n print('Cost Matrix: ')\n for i in range(g_NUM_X_CELLS-1,-1,-1):\n for j in range(g_NUM_Y_CELLS):\n print(cost_matrix[j + i*g_NUM_X_CELLS]),\n\n print('\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lab_6/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":8945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"299319153","text":"import cv2\nimport os\nimport json\nimport requests\nfrom collections import namedtuple\n\nheaders = {'Content-type': 'application/json'}\nurl='http://localhost:8080/api/camera'\nparams={'page':1,'size':5}\nr=requests.get(url, params=params, headers=headers)\nx=json.loads(r.text)\nresultCamera = x[\"data\"][\"cameraList\"]\ncamera={}\nprint(x[\"data\"][\"cameraList\"])\nfor i in range(0,5):\n camera.update({resultCamera[i]['id']:resultCamera[i][\"resource\"]})\n##print(camera)\nprint('camera',camera)\nfor keys,values in camera.items():\n print(keys)\n print(values)\n\n","sub_path":"Python Detection-NotUsed/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"247137706","text":"### https://www.projecteuler.net/problem=43\n\nimport sys\nsys.path.append(r'..\\euler')\nimport common as euler\nimport time\n\ndef has_duplicate_digits(n: int):\n ''' Returns true if the integer n has\n at least one repeated digit'''\n digits = [dig for dig in str(n)]\n digits.sort()\n\n for i in range(len(digits) - 1):\n if (digits[i] == digits[i + 1]):\n return True\n\n return False\n\ndef add_next_digit(n: list, div: list):\n ''' Given a number n and a list of divisors,\n adds the next digit so that the three left-most\n digits will divide the given divisor, as\n stated by the problem.'''\n cumulative_sum = 0\n digits = [str(i) for i in range(10) if str(i) not in n]\n for i in digits:\n # Check if adding i as a digit satisfy the divisibility condition\n if ((100 * int(i) + 10 * int(n[0]) + int(n[1])) % div[0] == 0):\n # If we have more than one remaining divisor to satisfy, call the function recursively\n if (len(div) > 1):\n cumulative_sum += add_next_digit([i] + n, div[1:])\n # If we are left with only one divisor to deal with (which will be 2), and two digits to fit, then they can be fit both ways\n else:\n new_number = [int(dig) * (10 ** ((len(n) + 1) - ndx)) for ndx, dig in enumerate([dig for dig in digits if dig is not i] + [i] + n)]\n print ('Found new number: %i' % sum(new_number))\n cumulative_sum += sum(new_number)\n\n return cumulative_sum\n\ndef Problem43():\n ''' Returns the sum of all pandigital numbers\n satistying the problem conditions'''\n\n # Initialise time\n start_time = time.time()\n res = 0\n\n i17 = 1\n while (i17 * 17 < 1000):\n # Starting by defining the number as a multiple of 17\n num = i17 * 17\n if (not has_duplicate_digits(num)):\n digits = [dig for dig in str(num)]\n if (num < 100):\n digits.insert(0, '0')\n \n # For each multiple of 17 with no repeated digits, loop through each divisor adding digits\n res = res + add_next_digit(digits, [13, 11, 7, 5, 3, 2])\n \n i17 = i17 + 1\n\n # Return result and time elapsed\n return res, '%.3f s' % (time.time() - start_time)","sub_path":"Page1/problem43.py","file_name":"problem43.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"479596805","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport rsa\nimport base64 #编码库\nfrom OpenSSL.crypto import PKey #处理公钥\nfrom OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM, FILETYPE_ASN1 #处理文件\nfrom OpenSSL.crypto import dump_privatekey, dump_publickey #key写入文件\n\n\n# pip install pyopenssl\n\npk = PKey() # 调用openssl加密标准\npk.generate_key(TYPE_RSA, 1024) # 1024位rsa类型\nprint(pk)\n\n# 秘钥写入文件\npubkey_file = dump_publickey(FILETYPE_PEM, pk)\nprivatekey_file = dump_privatekey(FILETYPE_ASN1, pk)\n\n# 秘钥读取\npubkey = rsa.PublicKey.load_pkcs1_openssl_pem(pubkey_file)\nprivatekey = rsa.PrivateKey.load_pkcs1(privatekey_file, \"DER\")\n# privatekey1 = rsa.PublicKey.load_pkcs1_openssl_pem(privatekey_file)\n\nprint(pubkey.save_pkcs1())\nprint(privatekey.save_pkcs1())\n# print(pubkey, privatekey, privatekey1)\n\ndata = rsa.encrypt(\"自古多情空余恨,此恨绵绵无绝期\".encode(\"utf-8\"), pubkey) # 加密\ndata = base64.b64encode(data)\nprint(data)\ndata_d = rsa.decrypt(base64.b64decode(data), privatekey) # 解密\nprint(data_d.decode(\"utf-8\"))\n","sub_path":"algorithm/OpenSSLTest.py","file_name":"OpenSSLTest.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"447334231","text":"class C2: ... # Make superclass objects\r\nclass C3: ...\r\nclass C1(C2, C3): # Make and link class C1\r\n def setname(self, who): # Assign name: C1.setname\r\n self.name = who # Self is either I1 or I2\r\nI1 = C1() # Make two instances\r\nI2 = C1()\r\nI1.setname('bob') # Sets I1.name to 'bob'\r\nI2.setname('sue') # Sets I2.name to 'sue'\r\nprint(I1.name)\r\nprint(I2.name)\r\n","sub_path":"VI/class_check.py","file_name":"class_check.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"422142425","text":"\"\"\"Common steps for Oneprovider.\n\"\"\"\n__author__ = \"Jakub Liput\"\n__copyright__ = \"Copyright (C) 2016 ACK CYFRONET AGH\"\n__license__ = \"This software is released under the MIT license cited in \" \\\n \"LICENSE.txt\"\n\nimport re\nimport pyperclip\n\nfrom tests.utils.acceptance_utils import list_parser\nfrom tests.gui.conftest import WAIT_BACKEND, WAIT_FRONTEND, MAX_REFRESH_COUNT, \\\n WAIT_REFRESH\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait as Wait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom pytest_bdd import given, parsers, when, then\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\n\nfrom ..utils.inspect import is_active\nfrom ..utils.generic import refresh_and_call, click_on_element, parse_url\nfrom pytest_selenium_multi.pytest_selenium_multi import select_browser\n\n\nmain_menu_tab_to_url = {'spaces': 'spaces',\n 'groups': 'groups',\n 'data': 'data',\n 'shared': 'shares'}\n\n\ndef _click_on_tab_in_main_menu_sidebar(driver, tab):\n def _load_main_menu_tab_page():\n def _check_url():\n try:\n found = parse_url(driver.current_url).group('tab')\n except AttributeError:\n return False\n else:\n return main_menu_tab_to_url[tab] == found.lower()\n\n click_on_element(driver, item_name=tab,\n css_path='.primary-sidebar a#main-{:s}'\n ''.format(main_menu_tab_to_url[tab]),\n msg='clicking on {:s} tab in main menu')\n\n return Wait(driver, WAIT_FRONTEND).until(\n lambda _: _check_url(),\n message='waiting for url to change.'\n 'Current url: {:s}'.format(driver.current_url)\n )\n\n Wait(driver, WAIT_BACKEND).until(\n lambda _: _load_main_menu_tab_page(),\n message='waiting for {:s} main menu tab page to load'\n ''.format(tab)\n )\n\n\n@given(parsers.re('users? of (?P.*) clicked on the '\n '\"(?P.*)\" tab in main menu sidebar'))\ndef g_op_click_on_the_given_main_menu_tab(selenium, browser_id_list,\n main_menu_tab):\n for browser_id in list_parser(browser_id_list):\n driver = select_browser(selenium, browser_id)\n _click_on_tab_in_main_menu_sidebar(driver, main_menu_tab)\n\n\n@when(parsers.re('users? of (?P.*) clicks on the '\n '\"(?P.*)\" tab in main menu sidebar'))\n@then(parsers.re('users? of (?P.*) clicks on the '\n '\"(?P.*)\" tab in main menu sidebar'))\ndef wt_op_click_on_the_given_main_menu_tab(selenium, browser_id_list,\n main_menu_tab):\n for browser_id in list_parser(browser_id_list):\n driver = select_browser(selenium, browser_id)\n _click_on_tab_in_main_menu_sidebar(driver, main_menu_tab)\n\n\n@when(parsers.parse('user of {browser_id} refreshes Oneprovider site'))\n@then(parsers.parse('user of {browser_id} refreshes Oneprovider site'))\ndef op_refresh_op_site_by_rm_hashtag(selenium, browser_id):\n driver = select_browser(selenium, browser_id)\n op_url = parse_url(driver.current_url).group('base_url')\n driver.get(op_url)\n\n\n@when(parsers.parse('user of {browser_id} selects \"{item_name}\" '\n 'from {item_type} list'))\n@then(parsers.parse('user of {browser_id} selects \"{item_name}\" '\n 'from {item_type} list'))\ndef op_select_item_from_list(selenium, browser_id, item_name, item_type):\n driver = select_browser(selenium, browser_id)\n click_on_element(driver, item_name=item_name,\n ignore_case=False,\n css_path='ul.{:s}-list '\n '.secondary-sidebar-item'.format(item_type),\n msg='clicking on {{:s}} item in {type} '\n 'list'.format(type=item_type))\n\n\n@when(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible token'))\n@then(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible token'))\n@when(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible url'))\n@then(parsers.parse('user of {browser_id} clicks on copy button next to '\n 'input box to copy visible url'))\ndef op_copy_visible_token_to_clipboard(selenium, browser_id):\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '.input-with-button '\n 'button.copy-btn'))\n ).click()\n\n\n@when(parsers.parse('user of {browser_id} sends copied {item_type} '\n 'to users of {browser_list}'))\n@then(parsers.parse('user of {browser_id} sends copied {item_type} '\n 'to users of {browser_list}'))\ndef op_send_visible_token_to_other_users(selenium, browser_id, item_type,\n browser_list, tmp_memory):\n select_browser(selenium, browser_id)\n item = pyperclip.paste()\n for browser in list_parser(browser_list):\n if browser in tmp_memory:\n tmp_memory[browser][item_type] = item\n else:\n tmp_memory[browser] = {item_type: item}\n\n\n@when(parsers.parse('user of {browser_id} clicks on the \"{button_name}\" '\n 'button in {main_menu_tab} sidebar'))\n@then(parsers.parse('user of {browser_id} clicks on the \"{button_name}\" '\n 'button in {main_menu_tab} sidebar'))\ndef op_click_on_button_in_main_menu_tab_sidebar(selenium, browser_id,\n button_name,\n main_menu_tab):\n driver = select_browser(selenium, browser_id)\n assert main_menu_tab in ('spaces', 'groups')\n\n click_on_element(driver, item_name=button_name,\n css_path='.secondary-sidebar '\n 'figure.icon',\n msg='clicking on {{:s}} '\n 'in {tab}'.format(tab=main_menu_tab))\n\n\ndef _check_for_item_in_given_list(driver, name, elem_type):\n def _assert_one_item_in_list(s, item_name, item_type):\n items = s.find_elements_by_css_selector('.{:s}-list .secondary-'\n 'sidebar-item .item-label '\n '.truncate'.format(item_type))\n return sum(1 for li in items if li.text == item_name) == 1\n\n Wait(driver, MAX_REFRESH_COUNT * WAIT_BACKEND).until(\n lambda s: refresh_and_call(s, _assert_one_item_in_list,\n name, elem_type),\n message='searching for exactly one {item} '\n 'on {list} list'.format(item=name, list=elem_type)\n )\n\n\n@given(parsers.parse('that in {browser_id} there is an \"{item_name}\" '\n 'item on the {item_type} list'))\n@given(parsers.parse('that in {browser_id} there is a \"{item_name}\" '\n 'item on the {item_type} list'))\ndef op_check_if_there_is_an_item_on_the_list(selenium, browser_id,\n item_name, item_type):\n driver = select_browser(selenium, browser_id)\n _check_for_item_in_given_list(driver, item_name, item_type)\n\n\n@when(parsers.parse('user of {browser_id} sees that the new item has appeared '\n 'on the {item_type} list'))\n@then(parsers.parse('user of {browser_id} sees that the new item has appeared '\n 'on the {item_type} list'))\ndef op_check_if_new_item_appeared_in_list(selenium, browser_id,\n item_type, name_string):\n driver = select_browser(selenium, browser_id)\n _check_for_item_in_given_list(driver, name_string, item_type)\n\n\n@when(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has appeared on the {item_type} list'))\n@then(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has appeared on the {item_type} list'))\ndef op_check_if_item_of_name_appeared_in_list(selenium, browser_id,\n item_name, item_type):\n driver = select_browser(selenium, browser_id)\n _check_for_item_in_given_list(driver, item_name, item_type)\n\n\n# TODO uncomment when leave from group backend will be repaired\n# @then(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n# 'has disappeared from the {item_type} list'))\n# def op_check_if_item_of_name_disappeared_from_list(selenium, browser_id,\n# item_type, item_name):\n# def _check_for_lack_of_item_in_list(s):\n# items = s.find_elements_by_css_selector('.{:s}-list .secondary-'\n# 'sidebar-item .item-label '\n# '.truncate'.format(item_type))\n# return all(item.text != item_name for item in items)\n#\n# driver = select_browser(selenium, browser_id)\n# Wait(driver, MAX_REFRESH_COUNT*WAIT_BACKEND).until(\n# lambda s: refresh_and_call(s, _check_for_lack_of_item_in_list),\n# message='waiting for {item} to disappear from '\n# '{list} list'.format(item=item_name, list=item_type)\n# )\n\n\n# TODO rm when leave from group backend will be repaired\n@when(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has disappeared from the {item_type} list'))\n@then(parsers.parse('user of {browser_id} sees that the \"{item_name}\" '\n 'has disappeared from the {item_type} list'))\ndef op_check_if_item_of_name_disappeared_from_list(selenium, browser_id,\n item_name, item_type):\n def _check_for_lack_of_item_in_list(s):\n items = s.find_elements_by_css_selector('.{:s}-list .secondary-'\n 'sidebar-item .item-label '\n '.truncate'.format(item_type))\n return all(item.text != item_name for item in items)\n\n def _refresh_and_call():\n \"\"\"Refresh browser and keep calling callback with given args\n until achieve expected result or timeout.\n \"\"\"\n op_url = parse_url(driver.current_url).group('base_url')\n driver.get(op_url)\n _click_on_tab_in_main_menu_sidebar(driver, item_type)\n\n try:\n result = Wait(driver, WAIT_REFRESH).until(\n lambda s: _check_for_lack_of_item_in_list(s)\n )\n except TimeoutException:\n return None\n else:\n return result\n\n driver = select_browser(selenium, browser_id)\n Wait(driver, MAX_REFRESH_COUNT*WAIT_BACKEND).until(\n lambda s: _refresh_and_call(),\n message='waiting for {:s} to disappear from '\n 'groups list'.format(item_name)\n )\n\n\ndef _check_for_presence_of_item_in_table(driver, name, caption):\n table_elems = driver.find_elements_by_css_selector('table thead, '\n 'table tbody')\n for thead, tbody in zip(table_elems[::2], table_elems[1::2]):\n th = thead.find_element_by_css_selector('th .item-label')\n if th.text.lower() == caption.lower():\n items = tbody.find_elements_by_css_selector('.permissions-'\n 'table-row '\n '.truncate')\n return any(item.text == name for item in items)\n\n\n@when(parsers.parse('user of {browser_id} sees that \"{name}\" item has appeared '\n 'on current {caption} permissions table'))\n@then(parsers.parse('user of {browser_id} sees that \"{name}\" item has appeared '\n 'on current {caption} permissions table'))\ndef op_check_if_row_of_name_appeared_in_table(selenium, browser_id,\n name, caption):\n driver = select_browser(selenium, browser_id)\n Wait(driver, MAX_REFRESH_COUNT * WAIT_BACKEND).until(\n lambda s: refresh_and_call(s, _check_for_presence_of_item_in_table,\n name, caption),\n message='searching for exactly one {:s} '\n 'on {:s} list in table'.format(name, caption)\n )\n\n\ndef _find_item_in_sidebar_list(driver, item_name, item_type):\n items = driver.find_elements_by_css_selector('.' + item_type + '-list '\n '.secondary-sidebar-item')\n for item in items:\n # if settings dropdown menu is expanded text looks like: name\\noption1\\noption2\\n...\n # so splitting text on nl and getting 0 element\n if item_name == item.text.split('\\n')[0]: # TODO better way to check if it is the item we seek\n return item\n\n\n@when(parsers.parse('user of {browser_id} clicks a settings icon displayed '\n 'for \"{item_name}\" item on the {item_type} list'))\n@then(parsers.parse('user of {browser_id} clicks a settings icon displayed '\n 'for \"{item_name}\" item on the {item_type} list'))\ndef op_click_settings_icon_for_list_item(selenium, browser_id,\n item_name, item_type):\n\n def _find_settings_icon_and_check_if_clickable(s):\n list_item = _find_item_in_sidebar_list(s, item_name, item_type)\n icon = list_item.find_element_by_css_selector('.oneicon-settings')\n if icon.is_enabled():\n s.execute_script('arguments[0].scrollIntoView();', icon)\n return icon\n\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n _find_settings_icon_and_check_if_clickable,\n message='clicks on settings icon for {name} on {type} '\n 'list'.format(name=item_name, type=item_type)\n ).click()\n\n\n@when(parsers.parse('user of {browser_id} sees a settings dropdown menu for '\n '\"{name}\" item on the {elem_type} list'))\n@then(parsers.parse('user of {browser_id} sees a settings dropdown menu for '\n '\"{name}\" item on the {elem_type} list'))\ndef op_wait_for_settings_dropdown_menu(selenium, browser_id, name, elem_type):\n\n def _find_expanded_menu(s):\n list_item = _find_item_in_sidebar_list(s, name, elem_type)\n toggle = list_item.find_element_by_css_selector('.dropdown-toggle')\n return toggle.get_attribute('aria-expanded') == 'true'\n\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n _find_expanded_menu,\n message='waiting for settings dropdown to expand'\n )\n\n\n@when(parsers.parse('user of {browser_id} clicks on the \"{item_name}\" item '\n 'in current settings dropdown'))\n@then(parsers.parse('user of {browser_id} clicks on the \"{item_name}\" item '\n 'in current settings dropdown'))\ndef op_click_on_item_in_current_settings_dropdown(selenium, browser_id,\n item_name):\n driver = select_browser(selenium, browser_id)\n click_on_element(driver, item_name=item_name,\n css_path='.settings-dropdown '\n '.dropdown-menu-settings '\n '.clickable',\n msg='clicking on {:s} in current '\n 'settings dropdown')\n\n\n@given(parsers.parse('user of {browser_id} sees that main content '\n 'has ended loading'))\ndef op_check_if_main_content_has_been_reloaded(selenium, browser_id):\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_FRONTEND).until(\n EC.invisibility_of_element_located((By.CSS_SELECTOR,\n '.common-loader-spinner')),\n message='wait for main content to end loading'\n )\n\n\n@given(parsers.re('users? of (?P.*?) seen that '\n 'Oneprovider session has started'))\ndef wait_for_op_session_to_start(selenium, browser_id_list):\n def _check_url():\n try:\n found = parse_url(driver.current_url).group('access')\n except AttributeError:\n return False\n else:\n return 'onedata' == found.lower()\n\n for browser_id in list_parser(browser_id_list):\n driver = select_browser(selenium, browser_id)\n Wait(driver, WAIT_BACKEND).until(\n lambda _: _check_url(),\n message='waiting for session to start'\n )\n","sub_path":"tests/gui/steps/oneprovider_common.py","file_name":"oneprovider_common.py","file_ext":"py","file_size_in_byte":16942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"651598069","text":"__author__ = 'charlesztt'\n\n\nimport os\nimport re\nfrom xml.sax.saxutils import escape\n\ndef date_detection(one_file):\n token_list=one_file.split('_')\n for one_token in token_list:\n re1='((?:(?:[1]{1}\\\\d{1}\\\\d{1}\\\\d{1})|(?:[2]{1}\\\\d{3}))(?:[0]?[1-9]|[1][012])(?:(?:[0-2]?\\\\d{1})|(?:[3][01]{1})))(?![\\\\d])'\t# YYYYMMDD 1\n rg = re.compile(re1,re.IGNORECASE|re.DOTALL)\n m = rg.search(one_token)\n if m:\n yyyymmdd1=m.group(1)\n return yyyymmdd1\n else:\n continue\n return \"\"\n\n\ntopic_list=['isis']\n\nfor one_topic in topic_list:\n file_list=os.listdir(os.path.join('./data/nered',one_topic))\n for one_file in file_list:\n f=open(os.path.join('./data/nered',one_topic,one_file))\n f2=open(os.path.join('./data/true',one_topic,one_file.replace(\".txt\",\".sgm\")),'w')\n\n f2.write('\\n')\n f2.write('%s\\n'%(one_file.replace(\".txt\",\"\")))\n f2.write(' NEWS STORY \\n')\n f2.write(' %s '%date_detection(one_file.replace(\".txt\",\"\")))\n f2.write('\\n\\n')\n f2.write('\\n')\n for one_line in f:\n first_flag=1;\n one_line.replace(\"\\n\",\"\")\n one_line_list=one_line.split(\" \")\n for one_token in one_line_list:\n if first_flag==1:\n first_flag=0\n temp=one_token.split('/')[0].lower().title()\n f2.write(escape(temp+' '))\n continue\n try:\n if one_token.split('/')[1] != 'O':\n temp=one_token.split('/')[0].lower().title()\n f2.write(escape(temp+' '))\n else:\n temp=one_token.split('/')[0].lower()\n f2.write(escape(temp+' '))\n except:\n pass\n f2.write('\\n')\n f2.write('\\n\\n')","sub_path":"python/true_case_them.py","file_name":"true_case_them.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"397343988","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom contextlib import contextmanager\n\nfrom sqlalchemy import create_engine\n\nfrom scout_apm.sqlalchemy import instrument_sqlalchemy\nfrom tests.compat import mock\n\n\n@contextmanager\ndef conn_with_scout():\n \"\"\"\n Create an instrumented SQLAlchemy connection to an in-memory SQLite database.\n\n \"\"\"\n engine = create_engine(\"sqlite:///:memory:\")\n instrument_sqlalchemy(engine)\n conn = engine.connect()\n try:\n yield conn\n finally:\n conn.close()\n\n\ndef test_hello():\n with conn_with_scout() as conn:\n result = conn.execute(\"SELECT 'Hello World!'\")\n assert list(result) == [(\"Hello World!\",)]\n\n\n# Monkey patch should_capture_backtrace in order to keep the test fast.\n@mock.patch(\n \"scout_apm.core.n_plus_one_call_set.NPlusOneCallSetItem.should_capture_backtrace\"\n)\ndef test_hello_capture_backtrace(should_capture_backtrace):\n should_capture_backtrace.return_value = True\n with conn_with_scout() as conn:\n result = conn.execute(\"SELECT 'Hello World!'\")\n assert list(result) == [(\"Hello World!\",)]\n\n\ndef test_instrument_engine_is_idempotent():\n engine = create_engine(\"sqlite:///:memory:\")\n instrument_sqlalchemy(engine)\n instrument_sqlalchemy(engine) # does nothing, doesn't crash\n","sub_path":"tests/integration/test_sqlalchemy.py","file_name":"test_sqlalchemy.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"269385965","text":"from Bio.Blast import NCBIWWW\nfrom Bio.Blast import NCBIXML\nfrom Bio import SeqIO\nfrom pathlib import Path\nfrom Bio import SearchIO\n\ntarget_species = [\"Homo sapiens\", \"Pan troglodytes\", \"Notechis scutatus\", \\\n \"Takifugu rubripes\"]\n\ndef blaster(fasta_file): \n \"\"\"\n Based on a target species list, we BLAST the given\n input sequence and put them in a file. \n \"\"\"\n fasta_string = open(fasta_file).read()\n print(\"BLAST initiated...\")\n\n # qblast opens up the BLAST function in NCBI. \n result_handle = NCBIWWW.qblast(\"blastn\", \"nt\", fasta_string)\n\n print(\"BLAST search done.\")\n # Records will then be written in a file.\n records = []\n # Results need to go into an XML file. \n with open(\"my_blast.xml\", \"w\") as out_handle:\n out_handle.write(result_handle.read())\n\n blast_result = SearchIO.read(\"my_blast.xml\", \"blast-xml\")\n print(\"Writing BLAST results to file..\")\n for i in target_species: \n # Interate through the blast result hits. \n for hit in blast_result: \n print(hit)\n if i in hit.description: \n # If the taret species is found, append. \n records.append(hit[0].hit)\n\n\n # Pretty easy way to write the given sequences in one file. \n SeqIO.write(records, \"blast-results.fasta\", \"fasta\")\n print(\"\\nBLAST result file written to blast_results.fasta.\")\n return (\"blast_results.fasta\")\n \n\n \n \n","sub_path":"scripts/blast2.py","file_name":"blast2.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"644350089","text":"#!/usr/bin/python3 -u\nfrom scipy.io import netcdf\nfrom scipy.ndimage import zoom\nfrom PIL import Image, ImageOps, ImageChops\nfrom time import sleep\n\nimport subprocess\nimport numpy as np\nimport os\n#import gc\n\nfrom tempfile import NamedTemporaryFile\nfrom urllib.request import urlopen\nimport json\nimport datetime\nimport h5py\n\n# Format for URLs (spaces added by yours truly)\n# --////OR_---M-C-G-s _e _c.nc\n#https://storage.cloud.google.com/gcp-public-data-goes-16/ABI -L2 -CMIPF /2018 /070 /20 /OR_ABI -L2 -CMIPF -M3 C02 _G16 _s20180702000416_e20180702011183_c20180702011253.nc\n\n# Path to store tempfiles to\nTEMP = 'data'\nSTORAGE = 'data'\n# URL to fetch directory listings from\n# https://www.googleapis.com/storage/v1/b/gcp-public-data-goes-16/o?prefix=ABI-L2-CMIPF/ 2018/070/21/OR_ABI-L2-CMIPF-M3C01\nDIR_LIST = 'https://www.googleapis.com/storage/v1/b/gcp-public-data-goes-16/o?prefix=ABI-L2-CMIPF/{date:%Y/%j/%H}/OR_ABI-L2-CMIPF-M3C{channel:02}'\n# Size to chunk downloads into, bytes\nCHUNK_SIZE = 5000000 # 5MB\n# Final size of the generated images. Refer to the \"Channel 2 is X by Y\" message for the full size.\n# This must be a common denominator to all layers. (10848, 5424, 2712, 1356, ...)\n#FINAL_SIZE = (10848, 10848)\n#FINAL_SIZE = (5424, 5424)\n#FINAL_SIZE = (2712, 2712)\nFINAL_SIZE = (500, 500)\nTHUMB_SIZE = (1000, 1000)\n\n# Polling time - how often to check the API for new images (seconds)\n# Full-disk scans come every 15 minutes.\nPOLL_TIME = 5*60\n\n# How much timestamps can differ while being considered identical (seconds)\nTIME_FUZZ = 60\n\n#class Timer(object):\n# \"\"\"A simple lap timer. On each call of lap(), it returns the elapsed time since the last call.\"\"\"\n# def __init__(self):\n# self.last = datetime.datetime.now()\n# self.start = self.last\n# def lap(self):\n# old = self.last\n# self.last = datetime.datetime.now()\n# return (self.last - old).total_seconds()\n# def total(self):\n# self.lap()\n# return (self.last - self.start).total_seconds()\n# def delay(self, seconds):\n# # Delays for the number of seconds since the last lap.\n# # Reset the lap counter on exit.\n# sleepTime = seconds - self.lap()\n# if sleepTime > 0:\n# print('Sleeping for {} seconds'.format(sleepTime))\n# sleep(sleepTime)\n# else:\n# print('Period already expired ({}s ago)'.format(-sleepTime))\n# self.lap()\n\ndef getLatestUrl(channel, offset=0):\n \"\"\" Gets the URL to the most recent GOES-R image of the specified channel.\"\"\"\n \n url = DIR_LIST.format(date=datetime.datetime.utcnow() - datetime.timedelta(seconds=offset), channel=channel)\n if offset == 0: print('Fetching file list:', url)\n text = urlopen(url, timeout=15).read().decode('utf-8')\n try: obj = json.loads(text)['items'][-1]\n except KeyError:\n # If nothing matches this hour, try an hour ago.\n if offset == 0:\n print(' - No data found for this hour; trying an hour ago')\n return getLatestUrl(channel, offset=3600)\n # If that also fails, die.\n print('No files matched the query.')\n raise\n return obj\n\ndef downloadFile(src, dest, size = 0):\n handle = urlopen(src, timeout=15)\n chunk = 'not an empty string'\n\n with open(dest, 'wb') as output:\n # Download the file!\n while chunk:\n print('Downloaded{: 4.1f} of{: 0.1f}MB: {}'.format(output.tell()/1E6, int(size)/1E6, dest), end='\\r')\n chunk = handle.read(CHUNK_SIZE)\n output.write(chunk)\n output.flush()\n print()\n\ndef getLatestData(channel, obj, last=''):\n timer = Timer()\n filename = TEMP + '/' + os.path.basename(obj['name'])\n\n # Download file to /tmp and convert to netCDF3. If it's there already, cool.\n if not os.path.isfile(filename):\n try:\n downloadFile(obj['mediaLink'], filename, obj['size'])\n except:\n # If something goes wrong, kill the file rather than leaving a corrupted download.\n os.remove(filename)\n raise\n else: # file already existed\n print('Downloaded:', filename, timer.lap())\n\n # Read it as CDF; pull out the reflectance data.\n print(' - Reading netCDF', timer.lap())\n with netcdf.netcdf_file(filename, 'r') as g16nc:\n print(' - Extracting reflectance', timer.lap())\n reflectance = g16nc.variables['CMI'][:] # Extract the reflectance\n\n zoom_factor = [FINAL_SIZE[0]/reflectance.shape[0], FINAL_SIZE[1]/reflectance.shape[1]]\n print(' - Channel {} is {} by {}; resizing by {}'.format(channel, g16nc.variables['CMI'].shape[0], g16nc.variables['CMI'].shape[1], zoom_factor), timer.lap())\n reflectance = zoom(reflectance, zoom_factor, order=1)\n\n # Optional: delete the netcdf to avoid clogging up the disk.\n # (On a 10GB disk, that's important)\n #os.remove(filename)\n\n print(' - Ensuring all values are positive', timer.lap())\n np.maximum(reflectance, 0, reflectance)\n\n print(' - Applying gamma correction', timer.lap())\n reflectance = reflectance ** 0.55\n\n print(' - Scaling for improved contrast'.format(channel), timer.lap())\n if channel != 13:\n reflectance *= 5\n\n print(' - Converting to image', timer.lap())\n image = Image.fromarray(reflectance).convert(mode='L')\n \n gc.collect()\n\n print(' - Total time:', timer.total())\n return image\n\ndef makeImage(lastTime = 0):\n timer = Timer()\n print('Downloading latest images')\n # Decide which file to download (obj includes filesize, a link, and some other stuff)\n obj = {} # Obj is a dictionary of file attributes - the latest image availiable for the specified channel.\n for channel in [1, 2, 3, 13]:\n obj[channel] = getLatestUrl(channel)\n obj[channel]['time'] = int(obj[channel]['name'].split('_')[-1][1:-3])\n\n # Pick out a timestamp to use elsewhere.\n timestamp = obj[1]['time']\n\n # Check that all timestamps are \"close\"\n if ((-TIME_FUZZ <= (obj[1]['time'] - obj[2]['time']) <= TIME_FUZZ)\n and (-TIME_FUZZ <= (obj[1]['time'] - obj[3]['time']) <= TIME_FUZZ)\n and (-TIME_FUZZ <= (obj[2]['time'] - obj[3]['time']) <= TIME_FUZZ)):\n print('Images are time-synchronous ({}, {}, and {})'.format(\n obj[1]['time'],\n obj[2]['time'],\n obj[3]['time']))\n else:\n # If not, try again later.\n print('Images are not time-synchronous ({}, {}, and {})'.format(\n obj[1]['time'],\n obj[2]['time'],\n obj[3]['time']))\n return lastTime\n\n # Check that the image has updated (no sense making duplicates)\n if timestamp == lastTime:\n print('Images have not changed since last check ({})'.format(obj[1]['time']))\n return lastTime\n\n # Getting to work - insert a break.\n print()\n\n blue = getLatestData(1, obj[1]) # Load Channel 1 - Blue (Visible 0.47 µm)\n red = getLatestData(2, obj[2]) # Load channel 2 - Red (visible 0.64 µm)\n veggie = getLatestData(3, obj[3]) # Load Channel 3 - Veggie (NIR 1.6 µm)\n cloud = getLatestData(13, obj[13]) # Load Channel 6 - Cloud particle size (NIR 2.2 µm)\n\n # Clean up the NC files before continuing.\n gc.collect()\n\n print('Making a pseudo-green channel', timer.lap())\n # Derived from Planet Labs data, CC > 0.9\n # true_green = 0.48358168 * ch_2 + 0.45706946 * ch_1 + 0.06038137 * ch_3\n green = ImageChops.add(Image.eval(blue, lambda x: x*0.45706946),\n ImageChops.add(Image.eval(red, lambda x: x*0.48358168),\n Image.eval(veggie, lambda x: x*0.06038137 )))\n\n print('Colorizing channels', timer.lap())\n red = ImageOps.colorize(red, (0, 0, 0), (255, 0, 0))\n veggie = ImageOps.colorize(veggie, (0, 0, 0), (0, 255, 0))\n green = ImageOps.colorize(green , (0, 0, 0), (0, 255, 0))\n blue = ImageOps.colorize(blue, (0, 0, 0), (0, 0, 255))\n cloud = ImageOps.colorize(cloud, (0, 0, 0), (255, 255, 255))\n cloud = ImageOps.equalize(cloud)\n red.save(STORAGE+'/red-{}.jpg'.format(timestamp))\n veggie.save(STORAGE+'/veggie-{}.jpg'.format(timestamp))\n green.save(STORAGE+'/green-{}.jpg'.format(timestamp))\n blue.save(STORAGE+'/blue-{}.jpg'.format(timestamp))\n cloud.save(STORAGE+'/cloud-{}.jpg'.format(timestamp))\n\n print('Generating geocolor and truecolor outputs', timer.lap())\n #geocolor = ImageChops.add(ImageChops.add(red, veggie), blue)\n #geocolor.save(STORAGE+'/geocolor-{}.png'.format(timestamp))\n\n truecolor = ImageChops.add(ImageChops.add(red, green), blue)\n truecolor.save(STORAGE+'/truecolor-{}.jpg'.format(timestamp))\n #truecolor.resize(THUMB_SIZE).save(STORAGE+'/truecolor-thumb-{}.jpg'.format(timestamp))\n cloudcolor = ImageChops.screen(ImageChops.add(ImageChops.add(red, green), blue),cloud)\n cloudcolor.save(STORAGE+'/cloudcolor-{}.jpg'.format(timestamp))\n\n # Make a symlink pointing to the latest for javascript to point at.\n try: os.remove( STORAGE+'/truecolor-latest.jpg')\n except FileNotFoundError: pass\n os.symlink(STORAGE+'/truecolor-{}.jpg'.format(timestamp), STORAGE+'/truecolor-latest.jpg')\n try: os.remove( STORAGE+'/truecolor-thumb-latest.jpg')\n except FileNotFoundError: pass\n os.symlink(STORAGE+'/truecolor-thumb-{}.jpg'.format(timestamp), STORAGE+'/truecolor-thumb-latest.jpg')\n\n\n print('Done!', timer.lap())\n print('Total time:', timer.total())\n print()\n\n return timestamp\n\ndef get_channels_descriptions(channels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]):\n descriptions = {} # Obj is a dictionary of file attributes - the latest image availiable for the specified channel.\n for channel in channels:\n descriptions[channel] = getLatestUrl(channel)\n #descriptions[channel]['time'] = int(descriptions[channel]['name'].split('_')[-1][1:-3])\n descriptions[channel]['time'] = datetime.datetime.strptime(descriptions[channel]['updated'], '%Y-%m-%dT%H:%M:%S.%fZ')\n # calculate time covariance\n times = np.array([[descriptions[channel]['time']]*len(channels) for channel in channels])\n if (times - times.T).max().total_seconds() > TIME_FUZZ:\n print('Warning: time covariances greater than threshold: %0.2f seconds'%((times - times.T).max().total_seconds()))\n return descriptions\n\ndef get_channel_file(channel, description):\n filename = TEMP + '/' + os.path.basename(description['name'])\n # Download file to /tmp and convert to netCDF3. If it's there already, cool.\n if not os.path.isfile(filename):\n try:\n downloadFile(description['mediaLink'], filename, description['size'])\n except:\n # If something goes wrong, kill the file rather than leaving a corrupted download.\n os.remove(filename)\n raise\n else: # file already existed\n print('Downloaded:', filename)\n description['filepath'] = filename\n\ndef create_rgb(desc=None):\n channels = [1, 2, 3]\n if desc == None:\n desc = get_channels_descriptions(channels)\n for i in channels:\n get_channel_file(i,desc[i])\n # create channels blue, red and veggie\n b = np.array(h5py.File(desc[1]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/b.shape[0], FINAL_SIZE[1]/b.shape[1]]\n b = zoom(b, zoom_f, order=1)\n r = np.array(h5py.File(desc[2]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/r.shape[0], FINAL_SIZE[1]/r.shape[1]]\n r = zoom(r, zoom_f, order=1)\n v = np.array(h5py.File(desc[3]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/v.shape[0], FINAL_SIZE[1]/v.shape[1]]\n v = zoom(v, zoom_f, order=1)\n # make syntetic green channel\n g = b * 0.45706946 + r * 0.48358168 + v * 0.06038137\n\n # normalize to 0,1 interval for plotting with plt.imshow()\n\n r = (r-r.min())/(r.max()-r.min())\n g = (g-g.min())/(g.max()-g.min()) \n b = (b-b.min())/(b.max()-b.min())\n \n # create composite RGB image\n img = np.zeros((FINAL_SIZE[0], FINAL_SIZE[1], 3))\n img[:, :, 0] = r\n img[:, :, 1] = g\n img[:, :, 2] = b\n return img\n\ndef create_rgb13(desc=None):\n channels = [1, 2, 3, 13]\n if desc == None:\n desc = get_channels_descriptions(channels)\n for i in channels:\n get_channel_file(i,desc[i])\n # create channels blue, red, veggie and clouds\n b = np.array(h5py.File(desc[1]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/b.shape[0], FINAL_SIZE[1]/b.shape[1]]\n b = zoom(b, zoom_f, order=1)\n r = np.array(h5py.File(desc[2]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/r.shape[0], FINAL_SIZE[1]/r.shape[1]]\n r = zoom(r, zoom_f, order=1)\n v = np.array(h5py.File(desc[3]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/v.shape[0], FINAL_SIZE[1]/v.shape[1]]\n v = zoom(v, zoom_f, order=1)\n c = np.array(h5py.File(desc[13]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/c.shape[0], FINAL_SIZE[1]/c.shape[1]]\n c = zoom(c, zoom_f, order=1)\n # make syntetic green channel\n g = b * 0.45706946 + r * 0.48358168 + v * 0.06038137\n r1 = (r-r.min())/(r.max()-r.min())\n g1 = (g-g.min())/(g.max()-g.min()) \n b1 = (b-b.min())/(b.max()-b.min())\n mask = ((r1+g1+b1) < 0.01).astype(int)\n c *= mask\n c //=4\n r = r-c\n g = g-c\n b = b-c\n # normalize to 0,1 interval for plotting with plt.imshow()\n r = (r-r.min())/(r.max()-r.min())\n g = (g-g.min())/(g.max()-g.min()) \n b = (b-b.min())/(b.max()-b.min())\n # create composite RGB image\n img = np.zeros((FINAL_SIZE[0], FINAL_SIZE[1], 3))\n img[:, :, 0] = r\n img[:, :, 1] = g\n img[:, :, 2] = b\n return img\n\ndef create_NDVI(desc=None):\n '''Create a Normalized Difference Vegetation Index image\n NIR - red\n NDVI = -----------\n NIR + red\n By definition this image is grey scale, but can by plotted using summer_r or\n YIGn color maps in plt.imshow()\n '''\n channels = [2, 3]\n if desc == None:\n desc = get_channels_descriptions(channels)\n for i in channels:\n get_channel_file(i,desc[i])\n # create channels blue, red, veggie and clouds\n r = np.array(h5py.File(desc[2]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/r.shape[0], FINAL_SIZE[1]/r.shape[1]]\n r = zoom(r, zoom_f, order=1)\n v = np.array(h5py.File(desc[3]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/v.shape[0], FINAL_SIZE[1]/v.shape[1]]\n v = zoom(v, zoom_f, order=1)\n img = (v-r)/(v+r)\n return img\n\ndef create_image(channel, desc=None):\n if desc == None:\n desc = get_channels_descriptions(channels)\n get_channel_file(channel, desc[channel])\n img = np.array(h5py.File(desc[channel]['filepath'],'r')['CMI'])\n zoom_f = [FINAL_SIZE[0]/img.shape[0], FINAL_SIZE[1]/img.shape[1]]\n img = zoom(img, zoom_f, order=1)\n return img\n","sub_path":"myGOES16.sci.py","file_name":"myGOES16.sci.py","file_ext":"py","file_size_in_byte":15207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"529605092","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('musics', '0002_auto_20150215_1530'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='score',\n name='musescore_user',\n field=models.IntegerField(blank=True, verbose_name='Musescore UserID'),\n preserve_default=True,\n ),\n ]\n","sub_path":"musics/migrations/0003_auto_20150215_1542.py","file_name":"0003_auto_20150215_1542.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"259517377","text":"## Descrição: Este programa irá simular um sistema de gerenciamento da DAC, realizando operações como impressão, ordenação, inclusão, remoção e busca de alunos matriculados em cada turma.\n## Entrada: uma lista com n inteiros (RA de cada aluno) e uma lista de operações a serem realizadas finalizada pelo caractere s.\n## Saída:Deverá ser impressa a lista como lida na entrada e, para cada operação 'p' realizada deve ser impressa a lista no estado atual, dadas as operações realizadas anteriormente. Quando o programa ler o operador s, que representa a operação de sair, o programa deve encerrar a execução. No caso da busca binária, os índices da busca para cada passo devem ser impressos, independente do RA estar na lista ou não. Para isso, basta imprimir o índice da posição do meio da lista durante a busca.\n## FERNANDO DOS REIS SANTOS FILHO - RA: 23447\n\nfrom aux import *\n\ndef p():\n for i in range (len(lista)-1):\n lista[i]=int(lista[i])\n print (lista[i], end=\" \")\n print (lista[len(lista)-1]); ##IMPRIMIR\n\ndef inserir(aux):\n lista.append(int(aux))\n\n if (ordenacao == \"c\"):\n insertionSortCrescente(lista)\n elif (ordenacao == \"d\"):\n insertionSortDecrescente(lista)\n\n\n\nordenacao = 0\n\nlista = input().split() ##Le a lista de RAs\n\nwhile (True):\n aux = input().split()\n\n if (aux[0] == \"s\"): ## SAIR do programa\n break;\n\n if (aux[0] == \"p\"): ## Executar a função IMPRIMIR\n try:\n p()\n except IndexError:\n continue\n\n if (aux[0] == \"c\"): ## Executar a função ORDENACAO CRESCENTE\n try:\n insertionSortCrescente(lista)\n ordenacao = \"c\"\n except IndexError:\n continue\n\n if (aux[0] == \"d\"): ## Executar a função ORDENACAO DECRESCENTE\n try:\n insertionSortDecrescente(lista)\n ordenacao =\"d\"\n except IndexError:\n continue\n\n if (aux[0] == \"i\"): ## Executar a função INSERIR\n inserir(aux[1])\n\n if (aux[0] == \"r\"): ## Executar a função REMOVER\n list.remove() ## !!\n","sub_path":"lab14.py","file_name":"lab14.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"524434245","text":"import pandas as pd\n\ndef interpolate(dataframe, freq='15Min', units='kW'):\n\tadjustment_hashmap = {'kW':1, 'kWh':4, 'W':0.001, 'Wh':0.004}\n\tstart_date = dataframe.first_valid_index().date()\n\tend_date = dataframe.last_valid_index().date() + pd.Timedelta(days=1)\n\n\tdaterange = pd.date_range(start_date, end_date, freq=freq, closed='left')\n\tdataframe = pd.concat([dataframe * adjustment_hashmap[units], pd.DataFrame(index=daterange)])\n\tdataframe.index.names = [const.TIMESTAMP]\n\treturn dataframe[~dataframe.index.duplicated(keep='first')].sort_index().interpolate()\n\ndef convert(dataframe, start_prefix=None, end_prefix=None):\n\tconversion_hashmap = {\n\t\t'kilo':1000,\n\t\t'mega':1000000,\n\t\t'giga':1000000000,\n\t\t'tera':1000000000000\n\t}\n\n\t# datetimeindex.freq.delta","sub_path":"enerlytics/util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"214689035","text":"import numpy as np\r\nimport pandas as pd\r\nimport pickle\r\n\r\nclass ReliableSource():\r\n\r\n def __init__(self):\r\n path = \"/content/drive/My Drive/MLFall2020/the-feature-finders/AlternusVera/ReliableSource/data.csv\"\r\n \r\n \r\n def FeatureFinders_getSourceReliabilityScore(self, source): # return between 0 and 1, being 0 = True, 1 = Fake\r\n path = \"/content/drive/My Drive/MLFall2020/the-feature-finders/AlternusVera/ReliableSource/data.csv\"\r\n fakeNewsSites = pd.read_csv(path)\r\n for index, row in fakeNewsSites.iterrows():\r\n score = 100\r\n if (row['Type of site'] == 'Some fake stories'):\r\n score = 50\r\n fakeNewsSites.at[index, 'fake_score'] = score\r\n\r\n if (source == \"\"):\r\n return 0\r\n #print(source)\r\n d = fakeNewsSites[fakeNewsSites['Site name'].str.match(r'\\b' + source + r'\\b')]\r\n #print(d)\r\n if d.shape[0] > 0:\r\n return d.iloc[0]['fake_score']\r\n\r\n # if (d['fake_score'].empty):\r\n # return 0\r\n # return int(d['fake_score'].values)\r\n return 0;\r\n\r\n def FeatureFinders_getReliabilityBySource(self,src):\r\n x = self.FeatureFinders_getSourceReliabilityScore(src)\r\n xTrain = np.array(x).reshape(-1, 1)\r\n\r\n readfile = open('/content/drive/My Drive/MLFall2020/the-feature-finders/AlternusVera/pickled-model/ReliableSourceLabelmodel', 'rb')\r\n best_clf = pickle.load(readfile)\r\n\r\n xPpredicted = best_clf.predict(xTrain)\r\n print(xPpredicted)\r\n xPredicedProb = best_clf.predict_proba(xTrain)[:,1]\r\n #xPredicedProb = best_clf.predict_proba(xTrain)\r\n #print(xPredicedProb)\r\n return 1 - float(xPredicedProb)","sub_path":"AlternusVeraReliableSource.py","file_name":"AlternusVeraReliableSource.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"307271148","text":"#!/usr/bin/env python\n\nfrom canari.maltego.entities import NSRecord, DNSName, IPv4Address\nfrom canari.maltego.message import Label, UIMessage\nfrom sploitego.scapytools.dns import nslookup\nfrom canari.framework import configure\nfrom canari.maltego.utils import debug\nfrom canari.maltego.html import Table\nfrom canari.config import config\nfrom scapy.all import DNS\n\n\n__author__ = 'Nadeem Douba'\n__copyright__ = 'Copyright 2012, Sploitego Project'\n__credits__ = []\n\n__license__ = 'GPL'\n__version__ = '0.1'\n__maintainer__ = 'Nadeem Douba'\n__email__ = 'ndouba@gmail.com'\n__status__ = 'Development'\n\n__all__ = [\n 'dotransform'\n]\n\n\n@configure(\n label='To DNS Names [Cache Snoop]',\n description='This transform performs DNS cache snooping on the target DNS server for the Alexa top 500 list.',\n uuids=[\n 'sploitego.v2.IPv4AddressToDNSName_CacheSnoop',\n 'sploitego.v2.NSRecordToDNSName_CacheSnoop'\n ],\n inputs=[\n ( 'Reconnaissance', IPv4Address ),\n ( 'Reconnaissance', NSRecord )\n ]\n)\ndef dotransform(request, response):\n ip = request.value\n ans = nslookup(\"www.google.ca\", nameserver=ip)\n if ans is not None:\n for site in config['dnscachesnoop/wordlist']:\n debug('Resolving %s' % site)\n ans = nslookup(site, nameserver=ip, rd=0)\n if not ans[DNS].ancount:\n ans = nslookup('www.%s' % site, nameserver=ip, rd=0)\n if ans[DNS].ancount:\n e = DNSName(site)\n t = Table(['Name', 'Query Class', 'Query Type', 'Data', 'TTL'], 'Cached Answers')\n for i in range(0, ans[DNS].ancount):\n rr = ans[DNS].an[i]\n t.addrow([\n rr.rrname.rstrip('.'),\n rr.sprintf('%rclass%'),\n rr.sprintf('%type%'),\n rr.rdata.rstrip('.'),\n rr.sprintf('%ttl%')\n ])\n e += Label('Cached Answers', t, type='text/html')\n response += e\n else:\n response += UIMessage('DNS server did not respond to initial DNS request.')\n return response","sub_path":"src/sploitego/transforms/dnscachesnoop.py","file_name":"dnscachesnoop.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"376362186","text":"IMPORT_DIRS = [\n \"windows\",\n \"utils\",\n \"/home/epics/src/R3.14.9-SL-5/base/lib/linux-x86_64\"\n]\n\nAPP_NAME = 'TemplateApp'\n\nENABLE_ERROR_DECORATOR = False\nPROMPT_ON_QUIT = False\nUSE_SIMULATED_PVS = True\n\nSETTINGS_FILE_NAME = \".TemplateApp/settings.json\"\n\nclass KEY(object):\n X_MM_START = 'x_start'\n X_MM_STOP = 'x_stop'\n Z_MM_START = 'z_start'\n Z_MM_STOP = 'z_stop'\n COMMAND = 'cmd'\n KEY = 'key'\n PV = 'pv'\n FORMAT = 'format'\n TEXT = 'text'\n FONT = 'font'\n FONT_SIZE = 'font-size'\n HEIGHT = 'height'\n WIDTH = 'width'\n ALIGN = 'align'\n COLOR = 'color'\n ITEM = 'item'\n LINE_WIDTH = 'line_width'\n LINE_STYLE = 'line_style'\n QT_LAYER = 'z_value'\n\n\n\n","sub_path":"old_tests/template_sandbox/.DONT_USE_TemplateApp/src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"383798872","text":"#!/usr/bin/env python\n\"\"\" Provides AVKButton Widget Class\n\n\"\"\"\n\nfrom tkinter import Button\nfrom ..Styles import AVKButtonStyles\n\n__author__ = \"Andrew Vorndran\"\n__copyright__ = \"Copyright 2018, Andrew Vorndran\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Andrew Vorndran\"\n__email__ = \"andvornd@iu.edu\"\n\nclass AVKButton(Button):\n\tdef __init__(self,\n\t pmaster,\n\t ptext=None,\n\t pcommand=None,\n\t pstyle=\"DEFAULT\"):\n\t\t\"\"\"\n\t\tInitialization Function\n\t\t:param pmaster: Master Frame\n\t\t:param ptext: Text to display on button\n\t\t:param pcommand: Command button will execute\n\t\t:param pstyle: User-defined style or default as string\n\t\t\"\"\"\n\t\tButton.__init__(self, pmaster)\n\t\tself.config(text=ptext)\n\t\tself.config(command=pcommand)\n\n\t\ttry:\n\t\t\tself.avkbConfigure(AVKButtonStyles[pstyle])\n\t\texcept KeyError:\n\t\t\traise AVKButton.InvalidAVKButtonTypeError(pstyle)\n\n\tdef avkbConfigure(self, pconfig):\n\t\t\"\"\"\n\t\tConfigures style of AVKButton Based on configuration dictionary\n\t\t:param pconfig: Configuration dictionary from AVKButtonStyles\n\t\t:return: None\n\t\t\"\"\"\n\t\tself.configure(font=pconfig['font'])\n\t\tself.configure(fg=pconfig['fontColor'])\n\t\tself.configure(bg=pconfig['backgroundColor'])\n\t\tself.configure(activeforeground=pconfig['activeFontColor'])\n\t\tself.configure(activebackground=pconfig['activeBackgroundColor'])\n\t\tself.configure(relief=pconfig['relief'])\n\n\tclass InvalidAVKButtonTypeError(Exception):\n\t\tdef __init__(self, key):\n\t\t\t\"\"\"\n\t\t\tInitialization Function\n\t\t\t:param key: Key that caused error meaning this AVKButton Style doesn't exist\n\t\t\t\"\"\"\n\t\t\tException.__init__(self, \"\\nInvalid AVButton type: {0}\".format(key))\n","sub_path":"Widgets/AVKButton.py","file_name":"AVKButton.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"308206905","text":"import threading, time\nfrom decimal import *\nfrom trading_bot import *\nfees = {\"spot\": Decimal(0.001), \"market\": Decimal(0.005)}\nstarting_amount = 1000\n\n# Initialize new bot\nbot = TradingBot(fees, starting_amount)\n\n# Custom rules\nwhile True:\n # Manage alerts\n if bot.alerts:\n alert = bot.alerts[-1]\n ema4 = alert['4ma']\n token = alert['token']\n stable_balance = bot.wallet['stable']\n\n # Make sure bot has current price \n if not token in bot.prices:\n continue\n \n current_price = Decimal(bot.prices[token])\n bot.alerts.pop()\n\n if bot.wallet['stable'] > 10:\n if ema4 == sorted(ema4):\n stop_loss = current_price - current_price * Decimal(0.01)\n stop_gain = current_price + current_price * Decimal(0.01)\n\n bot.market_buy(token, bot.wallet['stable'])\n bot.limit_sell(token, bot.wallet[token], stop_loss)\n bot.limit_sell(token, bot.wallet[token], stop_gain)\n #bot.limit_buy(token, bot.wallet['stable'], current_price + current_price * Decimal(0.01))\n #bot.trailing_stop_loss(token, bot.wallet[token], current_price, 0.002)\n \n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"266681058","text":"#!/usr/local/python/bin/python3\n# coding:utf-8\nimport socketserver\nimport os\nimport sys\nimport logging\nfrom subprocess import getoutput\n# from Crypto.Cipher import AES\nimport struct\nfrom tools.IPy import IP\nimport re, time\nimport threading\nimport signal\n\n\nHOST = \"\"\nPORT = 12777\nADDR = (HOST, PORT)\nALLOWED_HOSTS = [\"119.23.52.178\", \"172.16.90.0/24\", \"127.0.0.1\"]\n\n\nclass threading_install(threading.Thread):\n\tdef __init__(self,cmd):\n\t\tthreading.Thread.__init__(self)\n\t\tself.cmd = cmd\n\n\tdef run(self):\n\t\toutput = getoutput('/bin/bash '+self.cmd)\n\t\tlogfile = self.cmd.split('/')[-1].split('.')[0]+'.log'\n\t\tfd = open(logfile, 'wb')\n\t\tfd.write(output)\n\t\tfd.flush()\n\t\tfd.close()\n\ndef threading_main(cmd):\n\tt = threading_install(cmd)\n\tt.setDaemon(True)\n\tt.start()\n\n\n\n#insert iptables rule for myself\ndef insert_iptables_rule():\n\tIPT = \"/sbin/iptables\",\"/usr/sbin/iptables\",\n\tfor path in IPT:\n\t\tif os.path.exists(path):\n\t\t\tIPT_PATH = path\n\tif 'dpt:'+str(PORT) not in getoutput(IPT_PATH + ' -L -nv'):\n\t\tIPT_COMMAND = IPT_PATH + \" -I INPUT -m state --state NEW -m tcp -p tcp --dport \" + str(PORT) + \" -j ACCEPT\"\n\t\tos.system(IPT_COMMAND)\n\n\n#let current process become a daemon\ndef create_daemon(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):\n\t# 忽略终端I/O信号,STOP信号\n\tsignal.signal(signal.SIGTTOU, signal.SIG_IGN)\n\tsignal.signal(signal.SIGTTIN, signal.SIG_IGN)\n\tsignal.signal(signal.SIGTSTP, signal.SIG_IGN)\n\tsignal.signal(signal.SIGHUP, signal.SIG_IGN)\n\n\t# 结束父进程,使得子进程成为后台进程\n\ttry:\n\t\tif os.fork() > 0:\n\t\t\tsys.exit(0)\n\texcept OSError as error:\n\t\tprint('fork #1 failed: %d (%s)' % (error.errno, error.strerror))\n\t\tsys.exit(1)\n\n\t# 建立一个新的进程组,在这个新的进程组中,子进程成为这个进程组的首进程,以使该进程脱离所有终端\n\tos.setsid()\n\n\t# 再次新建一个子进程,退出父进程,保证该进程不是进程组长,同时让该进程无法再打开一个新的终端\n\ttry:\n\t\tpid = os.fork()\n\t\tif pid > 0:\n\t\t\tprint('Daemon PID %d' % pid)\n\t\t\tsys.exit(0)\n\texcept OSError as error:\n\t\tprint('fork #2 failed: %d (%s)' % (error.errno, error.strerror))\n\t\tsys.exit(1)\n\n\tfor f in sys.stdout, sys.stderr: f.flush()\n\tMAXFD = os.sysconf('SC_OPEN_MAX')\n\tfor i in range(3, MAXFD):\n\t\ttry:\n\t\t\tos.close(i)\n\t\texcept:\n\t\t\tpass\n\tsi = open(stdin, 'rb', 0)\n\tso = open(stdout, 'ab', 0)\n\tse = open(stderr, 'ab', 0)\n\tos.dup2(si.fileno(), sys.stdin.fileno())\n\tos.dup2(so.fileno(), sys.stdout.fileno())\n\tos.dup2(se.fileno(), sys.stderr.fileno())\n\n\t# 改变工作目录,使得进程不与任何文件系统联系\n\tos.chdir('/')\n\n\t# 将文件屏蔽字设置为0\n\tos.umask(0)\n\n\t# 忽略SIGCHLD信号\n\tsignal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\n\n# #AES 256 Encrypt\n# class mycrypt(object):\n# \tdef __init__(self,key):\n# \t\tself.key = key\n# \t\tself.mode = AES.MODE_CBC\n#\n# \tdef myencrypt(self,text):\n# \t\tcryptor = AES.new(self.key,self.mode)\n# \t\tlength = 16\n# \t\tcount = text.count('')\n# \t\tif count < length:\n# \t\t\tadd = (length-count) + 1\n# \t\t\ttext = text + (' ' * add)\n# \t\telif count > length:\n# \t\t\tadd = (length-(count % length)) + 1\n# \t\t\ttext = text + (' ' * add)\n# \t\tself.ciphertext = cryptor.encrypt(text)\n# \t\treturn self.ciphertext\n#\n#\n# \tdef mydecrypt(self,text):\n# \t\tcryptor = AES.new(key,self.mode)\n# \t\tplain_text = cryptor.decrypt(text)\n# \t\treturn plain_text\n\n\n# save formated log to file\ndef loginfo(info, level):\n\tlogger = logging.getLogger()\n\t# handler = logging.FileHandler('/tmp/server.log')\n\thandler = logging.FileHandler('server.log')\n\tlogflt = logging.Formatter(\"%(levelname)s [%(asctime)s]: %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\thandler.setFormatter(logflt)\n\tlogger.addHandler(handler)\n\tlevels = {\"CRITICAL\": 50, \"ERROR\": 40, \"WARNING\": 30, \"INFO\": 20, \"DEBUG\": 10}\n\tfor key in levels:\n\t\tif level == key:\n\t\t\tlogger.setLevel(levels[key])\n\t\t\teval(\"logging.\"+key.lower()+\"(\"+'\"'+info+'\"'+\")\")\n\tlogger.removeHandler(handler)\n\n\n#loginfo('error info...','ERROR')\n\n\n#class which process commands that receive from client\nclass commands(object):\n\tdef __init__(self, cmd):\n\t\tself.cmd = cmd\n\t\tloginfo('Got Command: %s' % cmd,'INFO')\n\t\tprint(\"Got Command: %s\" % cmd)\n\n\tdef check_cmds(self):\n\t\tif self.cmd == \"getsysinfo\":\n\t\t\treturn self.get_sysinfo()\n\t\telif self.cmd ==\"help\":\n\t\t\treturn \"::Valid commands are: getsysinfo getnic getvga getharddrive getparttion getcpu getmem getifvirtued system help\"\n\t\telif \"system\" in self.cmd:\n\t\t\treturn self.system(self.cmd.split('system'))\n\t\telif \"gethardware\" in self.cmd:\n\t\t\treturn self.get_hardware()\n\t\telif self.cmd == \"getnic\":\n\t\t\treturn self.get_nic()\n\t\telif self.cmd == \"getvga\":\n\t\t\treturn self.get_vga()\n\t\telif self.cmd == \"getharddrive\":\n\t\t\treturn self.get_harddrive()\n\t\telif self.cmd == \"getparttion\":\n\t\t\treturn self.get_parttion()\n\t\telif self.cmd == \"getcpu\":\n\t\t\treturn self.get_cpu()\n\t\telif self.cmd == \"getmem\":\n\t\t\treturn self.get_mem()\n\t\telif self.cmd == \"getifvirtued\":\n\t\t\treturn self.get_virtualized()\n\t\telif self.cmd == \"getuptime\":\n\t\t\treturn self.get_uptime()\n\t\telif self.cmd == \"getloadavg\":\n\t\t\treturn self.get_loadavg()\n\t\telif \"install\" in self.cmd:\n\t\t\treturn self.install(self.cmd.split('install'))\n\t\telif self.cmd == \"\":\n\t\t\treturn ''\n\t\telse:\n\t\t\treturn \"::Please input legal command!\"\n\n\n\tdef get_sysinfo(self):\n\t\tr = '\\r\\n'\n\t\tissue = self.get_issue()\n\t\tos = getoutput('uname -o')\n\t\tmachine = getoutput('uname -m')\n\t\tkernel = getoutput('uname -r')\n\t\treturn issue+r+os+r+machine+r+kernel\n\n\tdef get_hardware(self):\n\t\treturn self.get_nic() + '\\n' + self.get_vga().strip() + '\\n' + self.get_harddrive().strip()\n\n\tdef get_bin_path(self):\n\t\tlspci_path = \"/usr/bin/lspci\",\"/sbin/lspci\",\"/usr/sbin/lspci\",\n\t\tfor bin in lspci_path:\n\t\t\tif os.path.exists(bin):\n\t\t\t\tself.lspci = bin\n\t\t\t\tbreak\n\n\tdef get_pci(self):\n\t\tself.get_bin_path()\n\t\treturn getoutput(self.lspci)\n\n\tdef get_harddrive(self):\n\t\tif \"SATA\" in self.get_pci():\n\t\t\treturn getoutput(self.lspci+\" | awk -F ':' '/SATA/ {print $3}'\")\n\t\telif \"RAID\" in self.get_pci():\n\t\t\treturn getoutput(self.lspci+\" | awk -F ':' '/RAID/ {print $3}'\")\n\n\tdef get_nic(self):\n\t\tself.get_bin_path()\n\t\tether_nic = getoutput(self.lspci+\" | awk -F ':' '/Ether/ {print $3}'\")\n\t\treturn ether_nic\n\n\tdef get_vga(self):\n\t\tself.get_bin_path()\n\t\treturn getoutput(self.lspci+\"| awk -F ':' '/VGA/ {print $3}'\")\n\n\tdef get_parttion(self):\n\t\treturn getoutput(\"fdisk -l | awk '/\\/dev\\// {print}'\")\n\n\tdef get_cpu(self):\n\t\treturn getoutput(\"cat /proc/cpuinfo | grep 'model name' | cut -d: -f2 | sed 's/^ //'\")\n\n\tdef get_virtualized(self):\n\t\tif \"vmx\" or \"vme\" or \"svm\" in getoutput(\"cat /proc/cpuinfo\"):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\tdef get_mem(self):\n\t\tmem = int(getoutput(\"cat /proc/meminfo | grep 'MemTotal' | cut -d: -f2 | sed 's/^\\s*//' | cut -d' ' -f1\"))/1024\n\t\treturn \"%s MB\" % mem\n\n\tdef system(self,parms):\n\t\tif parms[1] != \"\":\n\t\t\tif \"rm\" in parms[1]:\n\t\t\t\treturn \"Dangerous! Make sure **the path** you specified!.\\r\\n\"+getoutput(parms[1])\n\t\t\treturn getoutput(parms[1])\n\t\telse:\n\t\t\treturn \"\"\n\n\tdef install(self,parms):\n\t\tif parms[1] != \"\":\n\t\t\tthreading_main(parms[1].strip())\n\n\tdef get_issue(self):\n\t\tissue_file = '/etc/issue'\n\t\tif not os.path.exists(issue_file):\n\t\t\treturn getoutput('uname -o')\n\t\tf = open(issue_file)\n\t\tlines = f.readlines()\n\t\tf.close()\n\t\tfor line in lines:\n\t\t\tif 'Arch' in line:\n\t\t\t\treturn \"ArchLinux\"\n\t\t\telif 'CentOS' in line:\n\t\t\t\treturn \"CentOS\"\n\t\t\telif 'Ubuntu' in line:\n\t\t\t\treturn \"Ubuntu\"\n\t\t\telif 'Fedora' in line:\n\t\t\t\treturn 'Fedora'\n\t\t\telse:\n\t\t\t\treturn \"Linux\"\n\n\tdef get_uptime(self):\n\t\treturn getoutput(\"uptime | cut -d',' -f 1\").strip()\n\n\tdef get_loadavg(self):\n\t\tload1,load2,load3 = os.getloadavg()\n\t\treturn str(load1) + ' ' + str(load2) + ' ' + str(load3)\n\n\nclass MyRequestHandler(socketserver.BaseRequestHandler):\n\tdef handle(self):\n\t\tip_address = str(self.client_address[0])\n\t\tport = str(self.client_address[1])\n\t\tprint('ip_address:%s, port:%s' % (ip_address, port))\n\t\thosts = []\n\t\tcidr = []\n\t\tfor host in ALLOWED_HOSTS:\n\t\t\tif '/' in host:\n\t\t\t\tcidr.append(host)\n\t\t\telse:\n\t\t\t\thosts.append(host)\n\t\tfor net in cidr:\n\t\t\tfor ip in IP(net):\n\t\t\t\thosts.append(str(ip))\n\t\tif ip_address in hosts:\n\t\t\tprint('::Connected from: ', self.client_address)\n\t\t\tloginfo(\"Connected from: %s:%s\" % (ip_address, port), 'INFO')\n\t\telse:\n\t\t\tself.request.send('Not Allowed Here!\\n'.encode())\n\t\t\tprint('::Forbidden Host from: ', self.client_address)\n\t\t\tloginfo(\"Forbidden Host from: %s:%s\" % (ip_address, port), 'INFO')\n\t\t\tself.finish()\n\n\t\twhile True:\n\t\t\tBUF_SIZE = struct.calcsize('!1024s')\n\t\t\tbuffer = self.request.recv(BUF_SIZE)\n\t\t\tprint('len(buffer):%s' % len(buffer))\n\t\t\tif len(buffer) == 1024:\n\t\t\t\tdata = struct.unpack('!1024s', buffer)[0]\n\t\t\telse:\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\tdata = data.decode('utf8').replace('\\x00', '')\n\t\t\tprint('data xx:%s' % data)\n\t\t\tif data == 'byebye':\n\t\t\t\ttry:\n\t\t\t\t\tself.request.send(\"seeyou!\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tloginfo('%s:%s Send failed! %s' % (ip_address,port,e),'ERROR')\n\t\t\t\tprint(\"::%s:%s Leaving server.\\r\\n\" % (ip_address,port))\n\t\t\t\tloginfo(\"%s:%s Leaving server.\" % (ip_address,port),'INFO')\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\t#if in putfile mode\n\t\t\t# cmd in client is like this: \"putfile /client/side/file.jpg /server/side/file.jpg APPEDN_BIN\"\n\t\t\tif \"putfile\" in data:\n\t\t\t\tfilename = data.split(' ')[2]\n\t\t\t\ttry:\n\t\t\t\t\tmod = data.split(' ')[3]\n\t\t\t\t\tif mod == 'WRITE_BIN': mod = 'wb'\n\t\t\t\t\telif mod == 'WRITE_ASC': mod = 'w'\n\t\t\t\t\telif mod == \"APPEND_BIN\": mod = 'ab+'\n\t\t\t\t\telif mod == \"APPEND_ASC\": mod = 'a+'\n\t\t\t\texcept:\n\t\t\t\t\tmod = 'wb'\n\t\t\t\tfd = open(filename, mod)\n\t\t\t\twhile True:\n\t\t\t\t\tcontent = self.request.recv(1024)\n\t\t\t\t\tif not content:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfd.write(content.decode('hex'))\n\t\t\t\tfd.flush()\n\t\t\t\tfd.close()\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\t#if in getfile mode\n\t\t\tif \"getfile\" in data:\n\t\t\t\tfilename = data.split(' ')[1]\n\t\t\t\t#if file not exist or filename is a directory, send error information to client\n\t\t\t\tif not os.path.exists(filename) or os.path.isdir(filename):\n\t\t\t\t\tself.request.sendall(struct.pack('!128s','File not found, please check the path!'))\n\t\t\t\telse:\n\t\t\t\t\tself.request.sendall(struct.pack('!128s','File Found! Will Transfer Now!'))\n\t\t\t\tprint('filename:%s' % filename)\n\t\t\t\tfd = open(filename,'rb')\n\t\t\t\t# send data in a loop\n\t\t\t\twhile True:\n\t\t\t\t\tdata = fd.read(1024)\n\t\t\t\t\tif not data:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tself.request.send(data.encode('hex'))\n\t\t\t\tfd.close()\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\t\t\t# process command that receive from client\n\t\t\tcmd_output = commands(data)\n\t\t\ttry:\n\t\t\t\tlines = cmd_output.check_cmds()\n\t\t\t\tprint('lines:%s' % lines)\n\t\t\t\tself.request.send(lines.encode())\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\t\t\texcept Exception as e:\n\t\t\t\tloginfo('%s:%s Send failed! %s' % (ip_address, port, e), 'ERROR')\n\t\t\t\tself.finish()\n\t\t\t\tbreak\n\n\ndef main():\n\tsocketserver.ThreadingTCPServer.allow_reuse_address = True\n\ttcp_srv = socketserver.ThreadingTCPServer(ADDR, MyRequestHandler)\n\tprint('::waiting for connecting...')\n\ttcp_srv.serve_forever()\n\n\nif __name__ == '__main__':\n\tif os.getuid() != 0:\n\t\tprint(\"This server process should be running by root!\")\n\t\tsys.exit(1)\n\t# insert_iptables_rule()\n\tdaemon_log_path = os.getcwd() + \"/daemon.log\"\n\tcreate_daemon('/dev/null', daemon_log_path, daemon_log_path)\n\tmain()\n","sub_path":"admins/cs/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"116103263","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import division\r\nfrom os import listdir, path, system, makedirs, remove\r\nfrom shutil import rmtree\r\nimport cv2\r\n\r\n\r\ndef main():\r\n in_path = \"in\"\r\n tmp_path = \"tmp\"\r\n out_path = \"out\"\r\n external_path = \"external\"\r\n\r\n try:\r\n rmtree(tmp_path)\r\n except:\r\n pass\r\n makedirs(tmp_path, exist_ok=True)\r\n\r\n file_count = 0\r\n\r\n for _file in listdir(in_path):\r\n if file_count % 10 == 0:\r\n print(_file)\r\n input_file = path.join(in_path, _file)\r\n if path.isfile(input_file):\r\n if _file.split(\".\")[-1].lower() != \"ppm\":\r\n # convert to ppm\r\n system(path.join(external_path, \"convert.exe\" + \" \" + input_file\r\n + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \".ppm\")))\r\n src_image = cv2.imread(input_file, -1)\r\n # apply MLAA\r\n system(path.join(external_path, \"mlaa.exe\" + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \".ppm\")\r\n + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \"_AA.ppm\")))\r\n # delete temp ppm file\r\n remove(path.join(tmp_path, _file.split(\".\")[0] + \".ppm\"))\r\n # convert to png\r\n save_file = path.join(out_path, _file.split(\".\")[0] + \".png\")\r\n system(path.join(external_path, \"convert.exe\" + \" \" + path.join(tmp_path, _file.split(\".\")[0] + \"_AA.ppm\")\r\n + \" \" + save_file))\r\n dst_image = cv2.imread(save_file, -1)\r\n if src_image.shape != dst_image.shape:\r\n h, w = src_image.shape\r\n image_resize = cv2.resize(dst_image, (w, h), interpolation=cv2.INTER_CUBIC)\r\n cv2.imwrite(save_file, image_resize)\r\n # delete temp AA ppm file\r\n remove(path.join(tmp_path, _file.split(\".\")[0] + \"_AA.ppm\"))\r\n\r\n file_count += 1\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"287232584","text":"import os\nimport json\nimport pprint\nimport math\nimport re\n\nservices = None\ncfn_spec = None\n\ntf_resources = []\ncfn_types = []\ncfn_occurances = []\ntf_occurances = []\ncfn_exceptions = {\n 'AWS::CloudFormation::CustomResource': 'N/A',\n 'AWS::CloudFormation::Macro': 'N/A',\n 'AWS::CloudFormation::Stack': 'N/A',\n 'AWS::CloudFormation::WaitCondition': 'N/A',\n 'AWS::CloudFormation::WaitConditionHandle': 'N/A',\n 'AWS::EC2::SecurityGroupEgress': 'N/A',\n 'AWS::EC2::SecurityGroupIngress': 'N/A',\n 'AWS::EC2::TrunkInterfaceAssociation': 'N/A',\n 'AWS::ElastiCache::SecurityGroupIngress': 'N/A',\n 'AWS::Redshift::ClusterSecurityGroupIngress': 'N/A',\n 'AWS::Route53::RecordSetGroup': 'N/A',\n 'AWS::SDB::Domain': 'N/A',\n 'AWS::IAM::UserToGroupAddition': 'N/A'\n}\ntf_exceptions = {\n 'aws_cloudformation_stack': 'N/A',\n 'aws_cloudformation_stack_set': 'N/A',\n 'aws_cloudformation_stack_set_instance': 'N/A'\n}\n\nwith open(\"util/cfnspec.json\", \"r\") as f:\n cfn_spec = json.loads(f.read())['ResourceTypes']\n\nwith open(\"util/tf_resources.txt\", \"r\") as f:\n lines = f.read().splitlines()\n for line in lines:\n tf_resources.append(line)\n\nfor cfntype, _ in cfn_spec.items():\n cfn_types.append(cfntype)\n\ncfn_types.append(\"AWS::Lambda::LayerVersionPermission\")\ncfn_types.append(\"AWS::EC2::VPCEndpointService\")\ncfn_types.append(\"AWS::Lambda::LayerVersion\")\ncfn_types.append(\"AWS::EC2::CapacityReservation\")\ncfn_types = set(cfn_types)\n\nwith open(\"js/mappings.js\", \"r\") as f:\n text = f.read()\n lines = text.splitlines()\n cfn_occurances += re.compile(r'(AWS\\:\\:[a-zA-Z0-9]+\\:\\:[a-zA-Z0-9]+)').findall(text)\n tf_occurances += re.compile(r'terraformType\\'\\:\\ \\'(aws(?:\\_[a-zA-Z0-9]+)+)\\'').findall(text)\n\ntotal_services = 0\ntotal_operations = 0\ntotal_unique_occurances = 0\nwith open(\"RESOURCE_COVERAGE.md\", \"w\") as f:\n f.write(\"## CloudFormation Resource Coverage\\n\\n\")\n f.write(\"**%s/%s (%s%%)** Resources Covered\\n\" % (\n len(set(cfn_occurances)) + len(cfn_exceptions),\n len(cfn_types),\n int(math.floor((len(set(cfn_occurances)) + len(cfn_exceptions)) * 100 / len(cfn_types)))\n ))\n\n f.write(\"\\n| Type | Coverage |\\n\")\n f.write(\"| --- | --- |\\n\")\n\n for cfntype in sorted(cfn_types):\n coverage = \"\"\n if cfn_occurances.count(cfntype) > 0:\n coverage = \":thumbsup:\"\n if cfntype in cfn_exceptions:\n coverage = cfn_exceptions[cfntype]\n f.write(\"| *%s* | %s |\\n\" % (cfntype, coverage))\n\n f.write(\"\\n## Terraform Coverage\\n\\n\")\n f.write(\"**%s/%s (%s%%)** Resources Covered\\n\" % (\n len(set(tf_occurances)) + len(tf_exceptions),\n len(tf_resources),\n int(math.floor((len(set(tf_occurances)) + len(tf_exceptions)) * 100 / len(tf_resources)))\n ))\n \n f.write(\"\\n| Type | Coverage |\\n\")\n f.write(\"| --- | --- |\\n\")\n\n for tf_resource in sorted(tf_resources):\n coverage = \"\"\n if tf_occurances.count(tf_resource) > 0:\n coverage = \":thumbsup:\"\n if tf_resource in tf_exceptions:\n coverage = tf_exceptions[tf_resource]\n f.write(\"| *%s* | %s |\\n\" % (tf_resource, coverage))\n","sub_path":"util/generateCoverage.py","file_name":"generateCoverage.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"36703813","text":"import argparse\nimport datetime\nimport os\nimport pprint\nimport sys\n\nimport numpy as np\nimport torch\nfrom env import make_vizdoom_env\nfrom network import DQN\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tianshou.data import Collector, VectorReplayBuffer\nfrom tianshou.policy import ICMPolicy, PPOPolicy\nfrom tianshou.trainer import OnpolicyTrainer\nfrom tianshou.utils import TensorboardLogger, WandbLogger\nfrom tianshou.utils.net.common import ActorCritic\nfrom tianshou.utils.net.discrete import Actor, Critic, IntrinsicCuriosityModule\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--task\", type=str, default=\"D1_basic\")\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--buffer-size\", type=int, default=100000)\n parser.add_argument(\"--lr\", type=float, default=0.00002)\n parser.add_argument(\"--gamma\", type=float, default=0.99)\n parser.add_argument(\"--epoch\", type=int, default=300)\n parser.add_argument(\"--step-per-epoch\", type=int, default=100000)\n parser.add_argument(\"--step-per-collect\", type=int, default=1000)\n parser.add_argument(\"--repeat-per-collect\", type=int, default=4)\n parser.add_argument(\"--batch-size\", type=int, default=256)\n parser.add_argument(\"--hidden-size\", type=int, default=512)\n parser.add_argument(\"--training-num\", type=int, default=10)\n parser.add_argument(\"--test-num\", type=int, default=100)\n parser.add_argument(\"--rew-norm\", type=int, default=False)\n parser.add_argument(\"--vf-coef\", type=float, default=0.5)\n parser.add_argument(\"--ent-coef\", type=float, default=0.01)\n parser.add_argument(\"--gae-lambda\", type=float, default=0.95)\n parser.add_argument(\"--lr-decay\", type=int, default=True)\n parser.add_argument(\"--max-grad-norm\", type=float, default=0.5)\n parser.add_argument(\"--eps-clip\", type=float, default=0.2)\n parser.add_argument(\"--dual-clip\", type=float, default=None)\n parser.add_argument(\"--value-clip\", type=int, default=0)\n parser.add_argument(\"--norm-adv\", type=int, default=1)\n parser.add_argument(\"--recompute-adv\", type=int, default=0)\n parser.add_argument(\"--logdir\", type=str, default=\"log\")\n parser.add_argument(\"--render\", type=float, default=0.0)\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\"--frames-stack\", type=int, default=4)\n parser.add_argument(\"--skip-num\", type=int, default=4)\n parser.add_argument(\"--resume-path\", type=str, default=None)\n parser.add_argument(\"--resume-id\", type=str, default=None)\n parser.add_argument(\n \"--logger\",\n type=str,\n default=\"tensorboard\",\n choices=[\"tensorboard\", \"wandb\"],\n )\n parser.add_argument(\"--wandb-project\", type=str, default=\"vizdoom.benchmark\")\n parser.add_argument(\n \"--watch\",\n default=False,\n action=\"store_true\",\n help=\"watch the play of pre-trained policy only\",\n )\n parser.add_argument(\n \"--save-lmp\",\n default=False,\n action=\"store_true\",\n help=\"save lmp file for replay whole episode\",\n )\n parser.add_argument(\"--save-buffer-name\", type=str, default=None)\n parser.add_argument(\n \"--icm-lr-scale\",\n type=float,\n default=0.0,\n help=\"use intrinsic curiosity module with this lr scale\",\n )\n parser.add_argument(\n \"--icm-reward-scale\",\n type=float,\n default=0.01,\n help=\"scaling factor for intrinsic curiosity reward\",\n )\n parser.add_argument(\n \"--icm-forward-loss-weight\",\n type=float,\n default=0.2,\n help=\"weight for the forward model loss in ICM\",\n )\n return parser.parse_args()\n\n\ndef test_ppo(args=get_args()):\n # make environments\n env, train_envs, test_envs = make_vizdoom_env(\n args.task,\n args.skip_num,\n (args.frames_stack, 84, 84),\n args.save_lmp,\n args.seed,\n args.training_num,\n args.test_num,\n )\n args.state_shape = env.observation_space.shape\n args.action_shape = env.action_space.shape or env.action_space.n\n # should be N_FRAMES x H x W\n print(\"Observations shape:\", args.state_shape)\n print(\"Actions shape:\", args.action_shape)\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n # define model\n net = DQN(\n *args.state_shape,\n args.action_shape,\n device=args.device,\n features_only=True,\n output_dim=args.hidden_size,\n )\n actor = Actor(net, args.action_shape, device=args.device, softmax_output=False)\n critic = Critic(net, device=args.device)\n optim = torch.optim.Adam(ActorCritic(actor, critic).parameters(), lr=args.lr)\n\n lr_scheduler = None\n if args.lr_decay:\n # decay learning rate to 0 linearly\n max_update_num = np.ceil(args.step_per_epoch / args.step_per_collect) * args.epoch\n\n lr_scheduler = LambdaLR(optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)\n\n # define policy\n def dist(p):\n return torch.distributions.Categorical(logits=p)\n\n policy = PPOPolicy(\n actor,\n critic,\n optim,\n dist,\n discount_factor=args.gamma,\n gae_lambda=args.gae_lambda,\n max_grad_norm=args.max_grad_norm,\n vf_coef=args.vf_coef,\n ent_coef=args.ent_coef,\n reward_normalization=args.rew_norm,\n action_scaling=False,\n lr_scheduler=lr_scheduler,\n action_space=env.action_space,\n eps_clip=args.eps_clip,\n value_clip=args.value_clip,\n dual_clip=args.dual_clip,\n advantage_normalization=args.norm_adv,\n recompute_advantage=args.recompute_adv,\n ).to(args.device)\n if args.icm_lr_scale > 0:\n feature_net = DQN(\n *args.state_shape,\n args.action_shape,\n device=args.device,\n features_only=True,\n output_dim=args.hidden_size,\n )\n action_dim = np.prod(args.action_shape)\n feature_dim = feature_net.output_dim\n icm_net = IntrinsicCuriosityModule(\n feature_net.net,\n feature_dim,\n action_dim,\n device=args.device,\n )\n icm_optim = torch.optim.Adam(icm_net.parameters(), lr=args.lr)\n policy = ICMPolicy(\n policy,\n icm_net,\n icm_optim,\n args.icm_lr_scale,\n args.icm_reward_scale,\n args.icm_forward_loss_weight,\n ).to(args.device)\n # load a previous policy\n if args.resume_path:\n policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))\n print(\"Loaded agent from: \", args.resume_path)\n # replay buffer: `save_last_obs` and `stack_num` can be removed together\n # when you have enough RAM\n buffer = VectorReplayBuffer(\n args.buffer_size,\n buffer_num=len(train_envs),\n ignore_obs_next=True,\n save_only_last_obs=True,\n stack_num=args.frames_stack,\n )\n # collector\n train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)\n test_collector = Collector(policy, test_envs, exploration_noise=True)\n\n # log\n now = datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\")\n args.algo_name = \"ppo_icm\" if args.icm_lr_scale > 0 else \"ppo\"\n log_name = os.path.join(args.task, args.algo_name, str(args.seed), now)\n log_path = os.path.join(args.logdir, log_name)\n\n # logger\n if args.logger == \"wandb\":\n logger = WandbLogger(\n save_interval=1,\n name=log_name.replace(os.path.sep, \"__\"),\n run_id=args.resume_id,\n config=args,\n project=args.wandb_project,\n )\n writer = SummaryWriter(log_path)\n writer.add_text(\"args\", str(args))\n if args.logger == \"tensorboard\":\n logger = TensorboardLogger(writer)\n else: # wandb\n logger.load(writer)\n\n def save_best_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, \"policy.pth\"))\n\n def stop_fn(mean_rewards: float) -> bool:\n if env.spec.reward_threshold:\n return mean_rewards >= env.spec.reward_threshold\n return False\n\n # watch agent's performance\n def watch():\n print(\"Setup test envs ...\")\n policy.eval()\n test_envs.seed(args.seed)\n if args.save_buffer_name:\n print(f\"Generate buffer with size {args.buffer_size}\")\n buffer = VectorReplayBuffer(\n args.buffer_size,\n buffer_num=len(test_envs),\n ignore_obs_next=True,\n save_only_last_obs=True,\n stack_num=args.frames_stack,\n )\n collector = Collector(policy, test_envs, buffer, exploration_noise=True)\n result = collector.collect(n_step=args.buffer_size)\n print(f\"Save buffer into {args.save_buffer_name}\")\n # Unfortunately, pickle will cause oom with 1M buffer size\n buffer.save_hdf5(args.save_buffer_name)\n else:\n print(\"Testing agent ...\")\n test_collector.reset()\n result = test_collector.collect(n_episode=args.test_num, render=args.render)\n rew = result[\"rews\"].mean()\n lens = result[\"lens\"].mean() * args.skip_num\n print(f'Mean reward (over {result[\"n/ep\"]} episodes): {rew}')\n print(f'Mean length (over {result[\"n/ep\"]} episodes): {lens}')\n\n if args.watch:\n watch()\n sys.exit(0)\n\n # test train_collector and start filling replay buffer\n train_collector.collect(n_step=args.batch_size * args.training_num)\n # trainer\n result = OnpolicyTrainer(\n policy=policy,\n train_collector=train_collector,\n test_collector=test_collector,\n max_epoch=args.epoch,\n step_per_epoch=args.step_per_epoch,\n repeat_per_collect=args.repeat_per_collect,\n episode_per_test=args.test_num,\n batch_size=args.batch_size,\n step_per_collect=args.step_per_collect,\n stop_fn=stop_fn,\n save_best_fn=save_best_fn,\n logger=logger,\n test_in_train=False,\n ).run()\n\n pprint.pprint(result)\n watch()\n\n\nif __name__ == \"__main__\":\n test_ppo(get_args())\n","sub_path":"examples/vizdoom/vizdoom_ppo.py","file_name":"vizdoom_ppo.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"404887885","text":"\"Implementation of test-runner for nose tests.\"\n\nimport os\n\nimport nose\n\nfrom cosmic_ray.testing.test_runner import TestRunner\nfrom cosmic_ray.util import redirect_stdout, redirect_stderr\n\n\nclass NoseResultsCollector(nose.plugins.Plugin):\n \"Nose plugin that collects results for later analysis.\"\n name = 'cosmic_ray'\n enabled = True\n\n def __init__(self):\n super().__init__()\n self.result = None\n\n def finalize(self, result):\n \"Store result.\"\n self.result = result\n\n\nclass NoseRunner(TestRunner): # pylint: disable=too-few-public-methods\n \"\"\"A TestRunner using nosetest.\n\n This treats `test_args` as a list of arguments to `nose.run()`. The args\n are passed directly to that function. See nose's command line reference\n for a description of what arguments are accepted.\n\n NOTE: ``-s`` is not accepted here!\n \"\"\"\n\n def _run(self):\n argv = ['', '--with-cosmic_ray']\n argv += self.test_args.split()\n collector = NoseResultsCollector()\n\n with open(os.devnull, 'w') as devnull:\n with redirect_stdout(devnull):\n with redirect_stderr(devnull):\n nose.run(argv=argv, plugins=[collector])\n return (collector.result.wasSuccessful(),\n [r[1] for r in collector.result.errors +\n collector.result.failures])\n","sub_path":"plugins/test-runners/nose/cosmic_ray_nose_runner/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"515366281","text":"\"\"\"\nThe module implements the Publish-Subscribe pattern.\n\nThis is the `pubsub push` pattern implementation. See references, below, for context.\nThe Publisher notifies Subscribers via a callback method.\n\nExample:\n To come.\n\n.. _Publish-Subscribe reference:\n https://github.com/hovey/pyschool/blob/f3a60800386c0416af4f129671ef1240cf75ff7b/pubsub/README.md\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import NamedTuple\n\n# import numpy as np\n\n\nclass Subscriber(ABC):\n \"\"\"The base class Subscriber for the Publish-Subscribe pattern.\n\n Classes should inherit from this base class to receive the\n subscribe mechanism of the publish-subscribe pattern.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def publication_callback(self, *, message: str = \"\"):\n \"\"\"The callback method called by a Publisher. Descendants must implement this\n method to inherit from `Subscriber`.\n\n Keyword Arguments:\n message (string): The publication as a message string from the publisher to\n the subscriber via the callback contract. Defaults to `\"\"` (empty string).\n \"\"\"\n pass\n\n\nclass ConcreteSubscriber(Subscriber):\n \"\"\"This class is included as an example of how Subscriber descendants\n could be implemented.\n \"\"\"\n\n def __init__(self, *, name: str, verbose: bool = False):\n super().__init__()\n self.name = name\n self._verbose = verbose\n if verbose:\n print(f\"ConcreteSubscriber {name} created.\")\n\n def publication_callback(self, *, message: str) -> None:\n super().publication_callback(message=message)\n\n if self._verbose:\n print(f\"-> Callback message: '{message}', received by {self.name}.\")\n\n\nclass PublisherEvent(NamedTuple):\n subscribed: str = \"subscribed event was triggered\"\n unsubscribed: str = \"unsubscribed event was triggered\"\n publication: str = \"Publication event was triggered.\"\n paused: str = \"subscription was paused\"\n resumed: str = \"subscription was resumed\"\n\n\nclass Publisher(ABC):\n \"\"\"The base class Publisher for the Publish-Subscribe pattern.\n\n Classes should inherit from this base class to receive the\n publish mechanism of the publish-subscribe pattern.\n\n Attributes:\n _subscribers (dict[Subscriber: callback (str)]): Dictionary map with keys\n as Subscriber objects and values as the string callback methods\n implemented in the Subscriber descendant. The callback string defaults\n to \"update\" if subscribers provide no callback method string.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n # self._subscribers = dict(Subscriber, bool) # initialized as emtpy dictionary\n self._subscribers = dict() # initialized as emtpy dictionary\n self.events = PublisherEvent() # publisher establishes these event strings\n\n def subscribe(self, subscriber: Subscriber, active: bool = True) -> None:\n \"\"\"Creates a subscription of the subscriber to a publisher.\n\n Arguments:\n subscriber (Subscriber): A subscriber of the publication.\n active (bool): If False, the subscriber's publications are paused. If\n True, the subscriber's publications are resumed. Defaults to True.\n \"\"\"\n self._subscribers[subscriber] = active\n subscriber.publication_callback(message=self.events.subscribed)\n\n def unsubscribe(self, subscriber: Subscriber) -> None:\n \"\"\"Deletes a subscriber from a publisher's dictionary of subscribers.\n\n Arguments:\n subscriber (Subscriber): A subscriber of the publication.\n\n Raises:\n KeyError: if the subscriber is not in the publisher's dictionary of\n subscribers.\n \"\"\"\n try:\n subscriber.publication_callback(message=self.events.unsubscribed)\n del self._subscribers[subscriber]\n except KeyError:\n print(f\"Error: subscriber {subscriber} is unknown.\")\n\n def publish(self, message: str = \"\") -> None:\n \"\"\"Publishes a message (string) to subscribers via publish_callback function.\n\n This is the `push` implementation of the Publish-Subscribe pattern.\n \"\"\"\n for subscriber, active in self._subscribers.items():\n if active:\n subscriber.publication_callback(message=message)\n\n def pause(self, subscriber: Subscriber) -> None:\n \"\"\"Retains the connection between publisher and subscriber, but turns off\n notifications from the publisher to subscriber. See also `resume` method.\n\n Arguments:\n subscriber (Subscriber): The subscriber for which updates should be\n paused until `resume` is used.\n\n Raises:\n KeyError: if the subscriber is not in the publisher's dictionary of\n subscribers.\n \"\"\"\n try:\n self._subscribers[subscriber] = False # subscription is paused\n subscriber.publication_callback(message=self.events.paused)\n except KeyError:\n print(f\"Error: subscriber {subscriber} is unknown.\")\n\n def resume(self, subscriber) -> None:\n \"\"\"Retains the connection between publisher and subscriber, but turns on\n notifications from the publisher to subscriber. See also `pause` method.\n\n Arguments:\n subscriber (Subscriber): The subscriber for which updates should be\n resumed until `pause` is used.\n\n Raises:\n KeyError: if the subscriber is not in the publisher's dictionary of subscribers.\n \"\"\"\n try:\n self._subscribers[subscriber] = True # subscription is resumed\n subscriber.publication_callback(message=self.events.resumed)\n except KeyError:\n print(f\"Error: subscriber {subscriber} is unknown.\")\n\n @property\n def subscribers(self) -> dict:\n \"\"\"Returns the publisher's dictionary of current subscribers and their repective\n callbacks.\"\"\"\n return self._subscribers\n","sub_path":"src/pyschool/pattern/publish_subscribe.py","file_name":"publish_subscribe.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"463602936","text":"class TrapezeBuilding(object):\n\n def __init__(self, un_pos_x, un_pos_y, un_pos_z, un_size_x, un_size_y, un_height):\n # MULTIPLY COORD BY City.UNIT_VALUE\n # The front windows:\n # Import Window\n # Move it to (un_pos_x + (un_size_x-1)/2, un_pos_y, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingWindow']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingWindow', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, un_pos_y * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, 1)\n new_obj.scale = (un_size_x, 1, 1)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unZ = un_pos_z + 1\n unY = un_pos_y + 1/3\n while unZ < un_pos_z + un_height and unY <= un_pos_y + un_size_y - 2:\n # Duplicate last placed object\n # Move it to (un_pos_x + (un_size_x-1)/2, unY, unZ)\n obj = bpy.data.objects['_ModernBuildingWindow']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingWindow', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, unY * City.UNIT_VALUE, unZ * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, 1)\n new_obj.scale = (un_size_x, 1, 1)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unY += 1/3\n unZ += 1\n \n # The center part:\n if un_size_y > 2:\n # Import Center\n # Move it to (un_pos_x + (un_size_x-1)/2, un_pos_y + 1/2 + 1/6, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingCenter']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingCenter', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, (un_pos_y + 1/2 + 1/6) * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, 1)\n new_obj.scale = (un_size_x, 1, 1)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unY2 = un_pos_y + 1\n unScaleZ = 2\n while unY2 <= un_pos_y + un_size_y - 9/6:\n # Duplicate last placed Element\n # Move it at (un_pos_x + (un_size_x-1)/2, unY, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingWindow']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingWindow', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, unY2 * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, unScaleZ)\n new_obj.scale = (un_size_x, 1, unScaleZ)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n unScaleZ = min(unScaleZ + 1, unZ - un_pos_z)\n unY2 += 1/3\n \n # The back:\n # Import Back from Modern 2.blend\n # Move it to (un_pos_x + (un_size_x-1)/2, un_pos_y + un_size_y - 1, un_pos_z)\n obj = bpy.data.objects['_ModernBuildingBack']\n mesh = obj.data\n new_obj = bpy.data.objects.new('ModernBuildingBack', mesh)\n new_obj.location = ((un_pos_x + (un_size_x-1) / 2) * City.UNIT_VALUE, (un_pos_y + un_size_y - 1) * City.UNIT_VALUE, un_pos_z * City.UNIT_VALUE)\n \n # Scale it (un_size_x, 1, un_height)\n new_obj.scale = (un_size_x, 1, unZ - un_pos_z)\n \n new_obj.select = True\n scene.objects.link(new_obj)\n \n # Join all objects and remove doubles\n scene.objects.active = new_obj\n bpy.ops.object.join()\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.remove_doubles()\n bpy.ops.object.mode_set(mode='OBJECT')","sub_path":"src/elements/buildings/TrapezeBuilding.py","file_name":"TrapezeBuilding.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"39137364","text":"# -*- coding: utf-8 -*- {{{\n#\n# Your license here\n# }}}\n\nfrom datetime import datetime, timedelta\n\n\nclass FleetRequest:\n \"\"\"\n This class describes input fields required by fleets \n \"\"\"\n def __init__(self, ts=datetime.utcnow(),\n sim_step=timedelta(hours=1),\n start_time=None,\n p=None, q=None, steps=1):\n \"\"\"\n Constructor\n \"\"\"\n # Timestamp in simulation loop: datetime\n self.ts_req = ts\n\n # Simulation time step: timedelta object\n self.sim_step = sim_step\n \n # Initial timestamp in simulation loop: datetime \n # Used for artificial inertia service\n self.start_time = start_time\n\n # Real power request\n self.P_req = p\n\n # Reactive power request\n self.Q_req = q\n\n # NREL WaterHeater only: Number of steps in simulation.\n # This value is always = 1 for the sake of not changing WaterHeater code\n self.steps = 1\n","sub_path":"src/fleet_request.py","file_name":"fleet_request.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"195324432","text":"from flask import Flask, request,jsonify\r\nimport requests\r\nimport json\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/', methods=['GET'])\r\ndef hello():\r\n\treturn 'Hello!!!', 200 # 200 is HTTP the response code to be returned to client\r\n\r\nfrom datetime import datetime\r\n@app.route('/datetime', methods=['GET'])\r\ndef print_today():\r\n\treturn str(datetime.now()), 200 \r\n## Potential Applications \r\n\r\n\r\n@app.route('/users/')\r\ndef get_user(username):\r\n\treturn \"user: \"+str(username),200\r\n \r\n#http://localhost:5000/add?op1=3&op2=4\r\n@app.route('/add', methods=['GET'])\r\ndef add():\r\n if 'op1' in request.args.keys() and 'op2' in request.args.keys():\r\n a = int(request.args['op1'])\r\n b = int(request.args['op2'])\r\n return jsonify({\"operand 1\": a, \"operand 2\": b, \"sum\":a+b}) #return JSON object\r\n else:\r\n return jsonify({'error':'missing parameter(s)'}), 400\r\n \r\n\r\n#request content-type=application/json\r\n#request body format: {\"op1\":3,\"op2\":5}\r\n@app.route('/mul', methods=['POST'])\r\ndef mul():\r\n\t\r\n\tdata = request.json #get json data from request body\r\n\ta = data[\"op1\"]\r\n\tb = data[\"op2\"]\r\n\t\r\n\treturn jsonify({'mul':a*b}),200\r\n \r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=5000, debug=True)\r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"526421369","text":"import shutil \nimport os\nimport sys\n\nsdkVersion = 'YumiMediationSDK_v'+ sys.argv[1]\nthirdPartys = ['PlayableAds','AdColony','AdMob','Applovin','Baidu','Chartboost','Domob','Facebook','GDT','InMobi','IronSource','Mobvista','Unity','Vungle','OneWay']\npodDir = './Pods/'\nmediationSDKPath = podDir + sdkVersion\nthirdPartyPathName = mediationSDKPath +'/YumiMediationThirdPartys'\ndebugcenterPathName = mediationSDKPath +'/YumiMediationDebugCenter-iOS'\nyumiMediationSDKPathName = mediationSDKPath +'/YumiMediationSDK'\n\n# adapter\ndef archiveAdapter(adapterName):\n\tthirdYumiSDKName = 'Yumi' + adapterName\n\tif adapterName == 'PlayableAds':\n\t\tthirdYumiSDKName = adapterName\n\tthirdYumiMediationAdapterName = 'YumiMediation' + adapterName\n\t#copy adapters\n\tsrcAdapterPath = podDir + 'YumiMediationAdapters' + '/'+ thirdYumiMediationAdapterName\n\ttargetAdapterPath = thirdPartyPathName + '/'+ thirdYumiMediationAdapterName\n\tsrcThirdSDKPath = podDir + thirdYumiSDKName\n\ttargetThirdSDkPath = thirdPartyPathName + '/'+ thirdYumiMediationAdapterName + '/' +thirdYumiSDKName\n\n\tshutil.copytree(srcAdapterPath,targetAdapterPath,symlinks=True)\n\tshutil.copytree(srcThirdSDKPath,targetThirdSDkPath,symlinks=True)\n\tshutil.make_archive(targetAdapterPath,'bztar',targetAdapterPath)\n\tshutil.rmtree(targetAdapterPath)\n\n# zip 会变大 ,gztar 和bztar差不多 tar体积没有变化\ndef archiveThirdAdapters():\n\tfor thirdParty in thirdPartys:\n\t\tprint('is copying %s' % thirdParty)\n\t\tarchiveAdapter(thirdParty)\n\n\tif os.path.exists(thirdPartyPathName):\n\t\tprint('is archived Adapters')\n\t\tshutil.make_archive(thirdPartyPathName,'bztar',thirdPartyPathName)\n\t\tshutil.rmtree(thirdPartyPathName)\n\n# debugcenter \ndef archiveDebugcenter():\n\t#copy debugcenter\n\tprint('is copying debugcenter')\n\tsrcDebugcenterPath = podDir + 'YumiMediationDebugCenter-iOS'\n\n\tshutil.copytree(srcDebugcenterPath,debugcenterPathName,symlinks = True)\n\tif os.path.exists(debugcenterPathName):\n\t\tprint('is archived debugcenter')\n\t\tshutil.make_archive(debugcenterPathName,'bztar',debugcenterPathName)\n\t\tshutil.rmtree(debugcenterPathName)\n\ndef archiveYumiMediationSDK():\n\t#copy YumiMediationSDK\n\tprint('is copying YumiMediationSDK')\n\tsrcYumiMediationSDKPath = podDir + 'YumiMediationSDK'\n\tshutil.copytree(srcYumiMediationSDKPath,yumiMediationSDKPathName,symlinks = True)\n\tif os.path.exists(yumiMediationSDKPathName):\n\t\tprint('is archived YumiMediationSDK')\n\t\tshutil.make_archive(yumiMediationSDKPathName,'bztar',yumiMediationSDKPathName)\n\t\tshutil.rmtree(yumiMediationSDKPathName)\n\ndef archiveReleaseSDK():\n\tif os.path.exists(mediationSDKPath):\n\t\tshutil.rmtree(mediationSDKPath)\n\tarchiveThirdAdapters()\n\tarchiveDebugcenter()\n\tarchiveYumiMediationSDK()\n\t#copy xcconfig\n\tpodPath = os.path.dirname(podDir)\n\txcconfigPath = os.path.dirname(podPath) + \"/YumiMediationSDKConfig.xcconfig\"\n\tif os.path.exists(xcconfigPath):\t\n\t\tprint('is copying YumiMediationSDKConfig.xcconfig')\n\t\tshutil.copy(xcconfigPath,mediationSDKPath)\n\n\tshutil.make_archive(mediationSDKPath,'bztar',mediationSDKPath)\n\tprint(\"archive yumi mediation sdk successed\")\n\n# release archive yumi mediation sdk\narchiveReleaseSDK()\n","sub_path":"archivedYumiMediationSDK.py","file_name":"archivedYumiMediationSDK.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"237347575","text":"#!/usr/bin/python3\n\"\"\"Unittest for Base Model\n\"\"\"\nimport unittest\nimport models\nfrom models.base_model import BaseModel\nimport models.base_model\nimport json\nimport pep8\nimport sys\nimport io\nfrom datetime import datetime\nimport inspect\nimport uuid\nimport time\nimport os\n\n\nclass TestDocsBaseModel(unittest.TestCase):\n \"\"\"SI FUNCIONAN LAS DE DOCUMENTACION\n check for documentation \"\"\"\n\n def test_permissions(self):\n \"\"\" Test for check the permissions \"\"\"\n exist = os.access('models/base_model.py', os.F_OK)\n self.assertTrue(exist)\n read = os.access('models/base_model.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/base_model.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/base_model.py', os.X_OK)\n self.assertTrue(exe)\n\n def test_module_doc(self):\n \"\"\" check for module documentation \"\"\"\n self.assertTrue(len(models.base_model.__doc__) > 0)\n\n def test_class_doc(self):\n \"\"\" check for documentation \"\"\"\n self.assertTrue(len(BaseModel.__doc__) > 0)\n\n def test_method_docs(self):\n \"\"\" check for method documentation \"\"\"\n for func in dir(BaseModel):\n self.assertTrue(len(func.__doc__) > 0)\n\n\nclass TestPep8BaseModel(unittest.TestCase):\n \"\"\"SI FUNCIONA\n check for pep8 validation \"\"\"\n def test_pep8(self):\n \"\"\" test base and test_base for pep8 conformance \"\"\"\n style = pep8.StyleGuide(quiet=True)\n file1 = 'models/base_model.py'\n file2 = 'tests/test_models/test_base_model.py'\n result = style.check_files([file1, file2])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warning).\")\n\n\nclass TestBaseModel(unittest.TestCase):\n \"\"\"\n tests class BaseModel\n \"\"\"\n def tearDown(self):\n \"\"\"clean everything up after running setup\"\"\"\n sys.stdout = sys.__stdout__\n os.remove(\"file.json\")\n\n def test_init(self):\n \"\"\" No funciona\n checks correct instances \"\"\"\n ww = BaseModel()\n ww.name = \"waluigi\"\n ww.my_number = 40\n a_t = {\n \"id\": str,\n \"created_at\": datetime,\n \"updated_at\": datetime,\n \"name\": str,\n \"my_number\": int\n }\n for a, t in a_t.items():\n with self.subTest(a=a, t=t):\n self.assertIn(a, ww.__dict__)\n self.assertEqual(isinstance(ww.__dict__[a], t), True)\n self.assertEqual(ww.name, \"waluigi\")\n self.assertEqual(ww.my_number, 40)\n\n def test_save(self):\n \"\"\"SI FUNCIONA\n check if last updated changes where saved\"\"\"\n hola = BaseModel()\n creado = hola.created_at\n viejo = hola.updated_at\n time.sleep(1)\n hola.save()\n nuevo = hola.updated_at\n self.assertNotEqual(viejo, nuevo)\n self.assertEqual(viejo, creado)\n self.assertNotEqual(nuevo, creado)\n\n def test_uuid(self):\n \"\"\"SI FUNCIONA\n test valid uuid\"\"\"\n pepita = BaseModel()\n cholado = BaseModel()\n\n def is_valid_uuid(val):\n \"\"\"check uuid\"\"\"\n try:\n uuid.UUID(str(val))\n return True\n except ValueError:\n return False\n self.assertEqual(is_valid_uuid(pepita.id), True)\n self.assertNotEqual(pepita.id, cholado.id)\n\n def test_to_dict(self):\n \"\"\"SI FUNCIONA\n test to dictionary for json\"\"\"\n pipelin = BaseModel()\n pipelin.name = \"felipe\"\n pipelin.my_number = 5\n my_dictionary = pipelin.to_dict()\n expected = [\"id\", \"created_at\", \"updated_at\", \"name\", \"my_number\",\n \"__class__\"]\n self.assertCountEqual(my_dictionary.keys(), expected)\n self.assertEqual(my_dictionary['__class__'], \"BaseModel\")\n self.assertEqual(my_dictionary['name'], \"felipe\")\n self.assertEqual(my_dictionary['my_number'], 5)\n\n def test_dict_dt_values(self):\n \"\"\"\n check if attribute datetime values are in the correct output format\n \"\"\"\n box = BaseModel()\n box.name = \"Banana\"\n box.my_number = 25\n d = box.to_dict()\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n self.assertEqual(d[\"__class__\"], \"BaseModel\")\n self.assertEqual(isinstance(d[\"created_at\"], str), True)\n self.assertEqual(isinstance(d[\"updated_at\"], str), True)\n self.assertEqual(d[\"created_at\"], box.created_at.strftime(format))\n self.assertEqual(d[\"updated_at\"], box.updated_at.strftime(format))\n\n def test_datetime(self):\n \"\"\"SI FUNCIONA\n check datetime values\"\"\"\n clock_one = datetime.now()\n one = BaseModel()\n clock_two = datetime.now()\n self.assertTrue(clock_one <= one.created_at <= clock_two)\n time.sleep(1)\n clock_one = datetime.now()\n two = BaseModel()\n clock_two = datetime.now()\n self.assertTrue(clock_one <= two.created_at <= clock_two)\n self.assertEqual(one.created_at, one.updated_at)\n self.assertEqual(two.created_at, two.updated_at)\n self.assertNotEqual(one.created_at, two.created_at)\n self.assertNotEqual(one.updated_at, two.updated_at)\n\n def test_str(self):\n \"\"\"SI FUNCIONA\n test of str\"\"\"\n pepito = BaseModel()\n string = \"[BaseModel] ({}) {}\".format(pepito.id, pepito.__dict__)\n self.assertEqual(string, str(pepito))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"59313444","text":"from orbis import app\nfrom orbis.libs.files_lib import build_file_name\nfrom orbis.libs.files_lib import get_timestamp\n\nimport collections\nimport os\nimport csv\n\n\ndef run(yaml_config: dict, data: dict, results: dict):\n app.logger.info(\"Saving results as csv.\")\n\n file_name = build_file_name(\"collected_results.csv\", yaml_config, raw=True)\n\n response = collections.OrderedDict()\n response[\"date\"] = get_timestamp()\n response[\"name\"] = yaml_config[\"file_name\"]\n\n response[\"macro_precision\"] = results[\"binary_classification\"][\"macro\"][\"precision\"]\n response[\"macro_recall\"] = results[\"binary_classification\"][\"macro\"][\"recall\"]\n response[\"macro_f1_score\"] = results[\"binary_classification\"][\"macro\"][\"f1_score\"]\n\n response[\"micro_precision\"] = results[\"binary_classification\"][\"micro\"][\"precision\"]\n response[\"micro_recall\"] = results[\"binary_classification\"][\"micro\"][\"recall\"]\n response[\"micro_f1_score\"] = results[\"binary_classification\"][\"micro\"][\"f1_score\"]\n\n response[\"has_score\"] = results[\"has_score\"]\n response[\"no_score\"] = results[\"no_score\"]\n response[\"empty_responses\"] = results[\"empty_responses\"]\n\n response[\"aggregator_name\"] = yaml_config[\"aggregator\"][\"service\"][\"name\"]\n response[\"aggregator_profile\"] = yaml_config[\"aggregator\"][\"service\"].get(\"profile\", \"None\")\n response[\"aggregator_limit\"] = yaml_config[\"aggregator\"][\"service\"].get(\"limit\", \"None\")\n response[\"aggregator_location\"] = yaml_config[\"aggregator\"][\"service\"][\"location\"]\n response[\"aggregator_data_set\"] = yaml_config[\"aggregator\"][\"input\"][\"data_set\"][\"name\"]\n\n response[\"evaluator_name\"] = yaml_config[\"evaluator\"][\"name\"]\n response[\"scorer_name\"] = yaml_config[\"scorer\"][\"name\"]\n\n response[\"entities\"] = \" \".join([e for e in yaml_config[\"scorer\"][\"entities\"]])\n response[\"mapping\"] = yaml_config[\"aggregator\"][\"input\"].get(\"mappings\", \"None\")\n response[\"lense\"] = yaml_config[\"aggregator\"][\"input\"].get(\"lenses\", \"None\")\n response[\"filter\"] = yaml_config[\"aggregator\"][\"input\"].get(\"filters\", \"None\")\n\n header = [key for key in response.keys()]\n values = [value for value in response.values()]\n\n if not os.path.isfile(file_name):\n with open(file_name, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t', quotechar=\"'\")\n writer.writerow(header)\n\n with open(file_name, 'a', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t', quotechar=\"'\")\n writer.writerow(values)\n\n app.logger.info(\"Finished saving results as csv.\")\n","sub_path":"src/orbis/plugins/savors/list_results/list_results.py","file_name":"list_results.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"25717633","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\nfrom __future__ import print_function\nfrom __future__ import division\nimport argparse\nimport collections\nimport six\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='translate.py')\n\nparser.add_argument('-ans', required=True,\n help='Path to answers')\nparser.add_argument('-pred', required=True,\n help='Path to predictions')\nparser.add_argument('-output_ranked_r2',\n help='Path to write predictions ranked by R-2.')\nparser.add_argument('-source',\n help='If output ranked prediction, this path points to the input.')\nopt = parser.parse_args()\n\ndef _ngrams(words, n):\n queue = collections.deque(maxlen=n)\n for w in words:\n queue.append(w)\n if len(queue) == n:\n yield tuple(queue)\n\ndef _ngram_counts(words, n):\n return collections.Counter(_ngrams(words, n))\n\ndef _ngram_count(words, n):\n return max(len(words) - n + 1, 0)\n\ndef _counter_overlap(counter1, counter2):\n result = 0\n for k, v in six.iteritems(counter1):\n result += min(v, counter2[k])\n return result\n\ndef _safe_divide(numerator, denominator):\n if denominator > 0:\n return numerator / denominator\n else:\n return 0\n\ndef _safe_f1(matches, recall_total, precision_total, alpha=1):\n recall_score = _safe_divide(matches, recall_total)\n precision_score = _safe_divide(matches, precision_total)\n denom = (1.0 - alpha) * precision_score + alpha * recall_score\n if denom > 0.0:\n return (precision_score * recall_score) / denom\n else:\n return 0.0\n\ndef rouge_n(peer, models, n, alpha=1):\n \"\"\"\n Compute the ROUGE-N score of a peer with respect to one or more models, for\n a given value of `n`.\n \"\"\"\n matches = 0\n recall_total = 0\n peer_counter = _ngram_counts(peer, n)\n for model in models:\n model_counter = _ngram_counts(model, n)\n matches += _counter_overlap(peer_counter, model_counter)\n recall_total += _ngram_count(model, n)\n precision_total = len(models) * _ngram_count(peer, n)\n return _safe_f1(matches, recall_total, precision_total, alpha)\n\ndef rouge_1(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-1 (unigram) score of a peer with respect to one or more\n models.\n \"\"\"\n return rouge_n(peer, models, 1, alpha)\n\ndef rouge_2(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-2 (bigram) score of a peer with respect to one or more\n models.\n \"\"\"\n return rouge_n(peer, models, 2, alpha)\n\ndef rouge_3(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-3 (trigram) score of a peer with respect to one or more\n models.\n \"\"\"\n return rouge_n(peer, models, 3, alpha)\n\ndef lcs(a, b):\n \"\"\"\n Compute the length of the longest common subsequence between two sequences.\n\n Time complexity: O(len(a) * len(b))\n Space complexity: O(min(len(a), len(b)))\n \"\"\"\n # This is an adaptation of the standard LCS dynamic programming algorithm\n # tweaked for lower memory consumption.\n # Sequence a is laid out along the rows, b along the columns.\n # Minimize number of columns to minimize required memory\n if len(a) < len(b):\n a, b = b, a\n # Sequence b now has the minimum length\n # Quit early if one sequence is empty\n if len(b) == 0:\n return 0\n # Use a single buffer to store the counts for the current row, and\n # overwrite it on each pass\n row = [0] * len(b)\n for ai in a:\n left = 0\n diag = 0\n for j, bj in enumerate(b):\n up = row[j]\n if ai == bj:\n value = diag + 1\n else:\n value = max(left, up)\n row[j] = value\n left = value\n diag = up\n # Return the last cell of the last row\n return left\n\ndef rouge_l(peer, models, alpha=1):\n \"\"\"\n Compute the ROUGE-L score of a peer with respect to one or more models.\n \"\"\"\n matches = 0\n recall_total = 0\n for model in models:\n matches += lcs(model, peer)\n recall_total += len(model)\n precision_total = len(models) * len(peer)\n return _safe_f1(matches, recall_total, precision_total, alpha)\n\nif __name__ == \"__main__\":\n answers = []\n for line in open(opt.ans).readlines():\n line_split = line.decode('utf8').strip()\n answers.append([line_split])\n predictions = []\n for line in open(opt.pred).readlines():\n line_split = line.decode('utf8').strip()\n predictions.append(line_split)\n rouge_ones = []\n rouge_twos = []\n rouge_ls = []\n for pred_id in range(len(predictions)):\n rouge_ones.append(rouge_1(predictions[pred_id], answers[pred_id]))\n rouge_twos.append(rouge_2(predictions[pred_id], answers[pred_id]))\n rouge_ls.append(rouge_l(predictions[pred_id], answers[pred_id]))\n print(\"R-1: {:.4f}\".format(sum(rouge_ones)/len(predictions)))\n print(\"R-2: {:.4f}\".format(sum(rouge_twos)/len(predictions)))\n print(\"R-L: {:.4f}\".format(sum(rouge_ls)/len(predictions)))\n if not opt.output_ranked_r2: exit()\n if opt.source:\n sources = []\n for line in open(opt.source).readlines():\n line_split = line.decode('utf8').strip().split('\\t')\n sources.append(line_split[0])\n ranked_ids = np.argsort(rouge_twos)[::-1]\n with open(opt.output_ranked_r2, 'w') as ranked_pred_file:\n ranked_pred_file.write(\"l_id\\trouge_2F\\tpred\\tgold\\torg\\n\")\n for ranked_id in ranked_ids:\n ranked_pred_file.write(\"{}\".format(ranked_ids[ranked_id]+1))\n ranked_pred_file.write('\\t')\n ranked_pred_file.write(\"{:.4f}\".format(rouge_twos[ranked_id]))\n ranked_pred_file.write('\\t')\n ranked_pred_file.write((''.join(predictions[ranked_id])).encode('utf8'))\n ranked_pred_file.write('\\t')\n ranked_pred_file.write((''.join(answers[ranked_id][0])).encode('utf8'))\n ranked_pred_file.write('\\t')\n if opt.source is not None:\n ranked_pred_file.write((''.join(sources[ranked_id])).encode('utf8'))\n else:\n ranked_pred_file.write(u'-')\n ranked_pred_file.write('\\n')\n","sub_path":"get_python_rouge.py","file_name":"get_python_rouge.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"106932018","text":"\"\"\"\n마이쮸 문제\n\"\"\"\n\nfront = rear = None\n\nclass Node:\n def __init__(self, data, n=None, p=None):\n self.data = data\n self.next = n\n self.prev = p\n\ndef isEmpty():\n return front == None\n\ndef enQ(data):\n global front, rear\n # 새로운 노드(객체)생성 & 주소값을 저장한다고 생각\n newNode = Node(data)\n if isEmpty():\n front = newNode\n else:\n rear.next = newNode\n rear = newNode\n\ndef deQ():\n global front, rear\n if isEmpty():\n print(\"Empty\")\n return None\n data = front.data\n front = front.next\n if isEmpty():\n rear = None\n return data\n\nmyzzu = 20\nperson = 0\nget = 1\nwhile myzzu > 0: \n person += 1\n print(f\"{person} 등장\")\n enQ([person,get])\n prev_person, prev_get = deQ()\n myzzu = myzzu - prev_get\n print(f\"{prev_person}이 {prev_get}개 받아갑니다.\")\n print(f\"{myzzu}개 남음\")\n if myzzu < 1:\n print(f\"{prev_person}번 사람이 막타\")\n break\n else:\n enQ([prev_person,prev_get+1])\n ","sub_path":"OnlineJudge/SWExpertAcademy/Example/20190225/example4.py","file_name":"example4.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"351436017","text":"import numpy as np\nimport pylab as py\nfrom functions import *\nfrom flux_box import flux\nfrom math import factorial\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib\nfrom matplotlib import cm\nfrom multiprocessing import Process\nimport multiprocessing as mp\n\n#Builds a labelmap for various moments up to n=10\nlabelmap = {(0,0): \"eD\", (0,1): \"eQ\", (0,2): \"eO\",\n (1,0): \"mD\", (1,1): \"mQ\", (1,2): \"mO\"}\nfor n in range(3, 10):\n labelmap[(0,n)] = \"e\" + str(2**(n+1))\n labelmap[(1,n)] = \"m\" + str(2**(n+1))\n\ndef set_lim(xmin, xmax, xvals, yvals, prev = [0]):\n \"\"\"set xlimit and ylimit of current plot\"\"\"\n ymax = max(((xvals>xmin) & (xvals max(prev):\n py.ylim([0,ymax*1.33])\n return ymax\n\nclass modes:\n def __init__(self, anm, index, direc, out_direc=\"/output/\"):\n self.anm = anm\n self.index = index\n self.I,self.Nfreq,self.J,self.K = anm.shape\n out_direc = direc + out_direc\n\n #compute field incident intensity at center\n (Exr_mid, Exi_mid, Eyr_mid, Eyi_mid, Ezr_mid, Ezi_mid) = [np.zeros(self.Nfreq) for i in range(6)]\n norms = {\"rE_mid_x\": Exr_mid, \"iE_mid_x\": Exi_mid,\n \"rE_mid_y\": Eyr_mid, \"iE_mid_y\": Eyi_mid,\n \"rE_mid_z\": Ezr_mid, \"iE_mid_z\": Ezi_mid}\n for fname in norms:\n filename = \"{0}/{1}.dat\".format(out_direc,fname)\n datafile = open(filename, 'rb')\n data = np.fromfile(datafile)\n norms[fname] = data\n Ex_norm = norms[\"rE_mid_x\"] + 1j*norms[\"iE_mid_x\"]\n Ey_norm = norms[\"rE_mid_y\"] + 1j*norms[\"iE_mid_y\"]\n Ez_norm = norms[\"rE_mid_z\"] + 1j*norms[\"iE_mid_z\"]\n self.Enorm = (np.abs(Ex_norm)**2 + np.abs(Ey_norm)**2 + np.abs(Ez_norm)**2)**.5\n\n #compute flux through 7 faces\n f = flux(direc + \"/input.txt\")\n fcen = f.D[\"freq\"]\n fwid = f.D[\"fwidth\"]\n self.wav = 1000/np.linspace(fcen-fwid/2,fcen+fwid/2,self.Nfreq)\n\n files = {\"front\": \"flux_front.dat\", \"back\": \"flux_back.dat\",\n \"left\": \"flux_left.dat\",\"right\": \"flux_right.dat\",\n \"top\": \"flux_top.dat\", \"bottom\": \"flux_bottom.dat\"}\n norm_file = \"flux_norm.dat\"\n\n self.tot = np.zeros(self.Nfreq) \n for k in files: \n y = f.load_flux(out_direc + files[k])\n self.tot += np.abs(y)\n # if np.average(y) > 0:\n # self.tot += y\n # else:\n # self.tot -= y\n self.norm = f.load_flux(out_direc + norm_file)\n\n def plot_box_scattering(self, area, label=\"Box Flux\"):\n \"\"\"plot the scattering cross-section calculated through the box with given area in micron^2\"\"\"\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n yval =area*self.tot/self.norm \n py.plot(self.wav, yval, linewidth=1.5, label=label)\n set_lim(200,1000,self.wav,yval)\n\n def get_freq_index(self, wavelength):\n \"\"\"return freq_index closest to a given wavelength\"\"\"\n return min(enumerate(np.abs(self.wav-wavelength)), key = lambda p: p[1])[0]\n\n\n def plot_angular(self, freq_index, r, phi, thetapts=100):\n \"\"\"Plot field 2d angular distribution at radius r, angle phi\"\"\"\n k = 2*np.pi*self.index/self.wav[freq_index]\n\n def E(theta):\n retE = np.zeros(3, dtype=np.complex)\n for n in range(1,self.anm.shape[2]+1):\n for m in range(-n, n+1):\n factor = 1j**(n+2*m-1)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n retE += factor*self.anm[0,freq_index,n-1,m+n]*N(n,m,theta,phi,r,k)\n retE += factor*self.anm[1,freq_index,n-1,m+n]*M(n,m,theta,phi,r,k)\n # retE *= k**2\n return retE\n\n def E2(theta):\n Eval = E(theta)\n return np.dot(Eval, np.conj(Eval)).real\n\n theta = np.linspace(0.01,2*np.pi-0.01, thetapts)\n rval = [E2(th) for th in theta]\n \n ax = py.subplot(111, projection='polar')\n py.plot(theta,rval)\n ax.grid(True)\n\n py.title(\"Angular Distribution of $|\\\\mathbf{E}|^2$\", fontsize=18)\n\n def plot_angular_3d(self, freq_index, r, pts=30, axis = True):\n \"\"\"Plot field 3d angular distribution at radius r\"\"\"\n k = 2*np.pi/self.wav[freq_index]\n\n ax = py.subplot2grid((1,1),(0,0), projection='3d')\n if not axis:\n ax.axis('off')\n\n theta = np.linspace(0.01,np.pi-.01,pts)\n phi = np.linspace(0,2*np.pi,pts)\n\n th,ph = np.meshgrid(theta,phi)\n shape = th.shape\n R = np.zeros(shape)\n for i in range(shape[0]):\n for j in range(shape[1]):\n retE = np.zeros(3, dtype=np.complex)\n for n in range(1,self.anm.shape[2]+1):\n for m in range(-n, n+1):\n # if n != 2:\n # continue\n factor = 1j**(n+2*m-1)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n retE += factor*self.anm[0,freq_index,n-1,m+n]*N(n,m,th[i,j],ph[i,j],r,k)\n retE += factor*self.anm[1,freq_index,n-1,m+n]*M(n,m,th[i,j],ph[i,j],r,k)\n\n R[i,j] = np.dot(retE, np.conj(retE)).real\n R /= np.max(R)\n X = R*np.sin(th)*np.cos(ph)\n Y = R*np.sin(th)*np.sin(ph)\n Z = R*np.cos(th)\n colors = np.zeros((shape[0], shape[1], 4))\n for i in range(shape[0]):\n for j in range(shape[1]):\n colors[i,j,:] = cm.rainbow(R[i,j])\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,shade=False, facecolors=colors,linewidth=.1, edgecolors='#000000')\n surf.set_edgecolor('k')\n\n a=1.5\n py.xlim([-a,a])\n py.ylim([-a,a])\n ax.set_zlim(-a,a)\n cset = ax.contourf(X,Y,Z, zdir='x', offset=-a, cmap=cm.coolwarm)\n cset = ax.contourf(X,Y,Z, zdir='y', offset=a, cmap=cm.coolwarm)\n ax.set_xlim3d(-a, a) \n ax.set_ylim3d(-a, a) \n ax.set_zlim3d(-a, a) \n \n def get_E(self, r, theta, phi, freq_index, Nmax = None):\n \"\"\"Determine the E field at postion (r,theta,phi) using up to n=Nmax\"\"\"\n Eval = 0 + 0j\n if Nmax:\n J = Nmax \n else:\n J = self.J\n\n for j in range(J):\n n = j + 1\n for k in range(2*n+1):\n m = -n + k\n Enm = self.Enorm[freq_index]*1j**(n+2*m-1)/(2*np.pi**.5)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n knum = 2*np.pi*index/self.wav[freq_index] \n Eval += knum**2*Enm*self.anm[0,freq_index,j,k]*N(n,m,theta,phi,r,knum)\n Eval += knum**2*Enm*self.anm[1,freq_index,j,k]*M(n,m,theta,phi,r,knum)\n\n return Eval\n\n def plot_anm(self, Nmax = None):\n \"\"\"Plot all momemnts up to n=Nmax, return total sum across these moments\"\"\"\n tot = np.zeros(self.Nfreq)\n if Nmax:\n J = Nmax \n else:\n J = self.J\n \n prev = [0]\n for i in range(self.I):\n for j in range(J):\n y = self.index**3*1000*(2*np.pi/self.wav)**2*(j+1)*(j+2)*np.sum(abs(self.anm[i,:,j,:])**2,axis=1)\n tot += y\n py.plot(self.wav,y, label = labelmap[(i,j)], linewidth=1.5)\n ymax = set_lim(200,1000,self.wav,y,prev=prev)\n prev.append(ymax)\n\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n return tot\n\n #note that r isn't used with analytic integral\n def scattered_flux(self, r, area, Nmax = None):\n \"\"\"Plot all momemnts up to n=Nmax, return total sum across these moments\"\"\"\n flux = np.zeros(self.Nfreq)\n if Nmax:\n J = Nmax \n else:\n J = self.J\n\n prev = [0]\n for j in range(J):\n n = j + 1\n temp_e = np.zeros(self.Nfreq)\n temp_m = np.zeros(self.Nfreq)\n for k in range(2*n+1):\n m = -n + k\n for freq_index in range(self.Nfreq):\n Enm = self.Enorm[freq_index]*1j**(n+2*m-1)/(2*np.pi**.5)*((2*n+1)*factorial(n-m)/factorial(n+m))**.5\n knum = 2*np.pi*self.index/self.wav[freq_index] \n factor = knum**4*np.abs(Enm)**2*np.abs(self.anm[0,freq_index,j,k])**2\n # integ = compute_norm(n,m,r,knum,'e')*r**2\n integ = n*(n+1)/np.abs(Enm)**2/knum**2*self.Enorm[freq_index]**2\n temp_e[freq_index] += factor*integ\n factor = knum**4*np.abs(Enm)**2*np.abs(self.anm[1,freq_index,j,k])**2\n # integ = compute_norm(n,m,r,knum,'m')*r**2\n integ = n*(n+1)/np.abs(Enm)**2/knum**2*self.Enorm[freq_index]**2\n temp_m[freq_index] += factor*integ\n print(n,m) \n y1 = temp_e/self.norm*area*self.index\n y2 = temp_m/self.norm*area*self.index\n flux += (y1+y2)\n\n py.plot(self.wav,y1, label = labelmap[(0,j)], linewidth=1.5)\n py.plot(self.wav,y2, label = labelmap[(1,j)], linewidth=1.5)\n ymax = set_lim(200,1000,self.wav,y1,prev=prev)\n prev.append(ymax)\n ymax = set_lim(200,1000,self.wav,y2,prev=prev)\n prev.append(ymax)\n\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n\n y = flux\n return y\n\n #area could be made part of the class\n def partial_flux(self, r, area, theta1, theta2, phi1, phi2, Nmax = None):\n \"\"\"Plot total scatering cross-section through a given part of the sphere\"\"\"\n flux = np.zeros(self.Nfreq)\n output = mp.Queue()\n processes = []\n cores = 8\n # for freq_index in range(self.Nfreq):\n # def E_sq(theta,phi):\n # Eval = self.get_E(r,theta,phi,freq_index, Nmax)\n # return np.dot(Eval, np.conj(Eval)).real\n # flux[freq_index] = double_integral(E_sq,theta1,theta2,phi1,phi2)\n # print(freq_index)\n\n for i in range(int(self.Nfreq/cores)):\n for c in range(cores):\n freq_index = i*cores + c\n def E_sq(theta,phi):\n Eval = self.get_E(r,theta,phi,freq_index, Nmax)\n return np.dot(Eval, np.conj(Eval)).real\n p = Process(target=double_integral_parallel, args=(E_sq,theta1,theta2,phi1,phi2,freq_index,output))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n while not output.empty():\n out = output.get()\n index = out[0]\n flux_val = out[1]\n flux[index] = flux_val*r**2\n processes = []\n\n scat = flux/self.norm*self.index*area\n\n ymax = set_lim(200,1000,self.wav,scat)\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Scattering Cross Section ($\\mu m^2$)\")\n py.plot(self.wav, scat, label=\"Partial Scattering\", linewidth=1.5)\n\n return scat\n\nif __name__ == \"__main__\":\n anm = np.load(\"mode_output.npy\")\n index = 1.33\n direc = \"/home/john/research/mymeep/projects/silver_nanosphere/\"\n output = \"output/\"\n area = 0.03**2\n wavelength = 700\n\n anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n A = modes(anm,index,direc,output)\n freq_index = A.get_freq_index(wavelength)\n\n # py.figure(6)\n # anm = np.load(\"cluster_azimuthal.npy\")\n # output = \"output_azimuthal/\"\n # anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n # A = modes(anm,index,direc,output)\n # A.plot_box_scattering(area, \"Azi\")\n # anm = np.load(\"cluster_radial.npy\")\n # output = \"output_radial/\"\n # anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n # A = modes(anm,index,direc,output)\n # A.plot_box_scattering(area, \"Rad\")\n # anm = np.load(\"cluster_linear.npy\")\n # output = \"output_linear/\"\n # anm = np.nan_to_num(anm)/10**6 #converts to micron^2\n # A = modes(anm,index,direc,output)\n # A.plot_box_scattering(area, \"Lin\")\n # py.legend()\n\n py.figure(2)\n A.plot_angular_3d(freq_index,10000)\n\n py.figure(4)\n tot = A.scattered_flux(5000,area, Nmax=2)\n py.legend()\n py.xlim((200,1000))\n\n # py.figure(6)\n # A.partial_flux(10000,area,0,np.pi,0,2*np.pi)\n # A.plot_box_scattering(area)\n # py.legend()\n # py.xlim((200,1000))\n\n py.figure(5)\n py.plot(A.wav,tot, linewidth=1.5, label=\"Sum of Moments\")\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Total Scattering Cross Section ($\\mu m^2$)\")\n A.plot_box_scattering(area)\n set_lim(200,1000,A.wav,tot)\n py.legend()\n\n\n py.show()\n","sub_path":"post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":12833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"589022354","text":"from django.urls import path\nfrom . import views\nfrom halls import views as hall_views\nurlpatterns = [\n path('edit/', views.edit, name='review-edit'),\n path('write/', views.write, name='review-write'),\n path('delete/', views.delete, name='review-delete'),\n path('edit//photos', views.review_photos, name='review-photos'),\n path('report/', views.report, name='report'),\n]\n","sub_path":"honesthalls/reviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"310116743","text":"# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass Job(Mo):\n \"\"\"\n The configuration job.\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.config.Job\")\n\n meta.moClassName = \"configJob\"\n meta.rnFormat = \"run-%(name)s\"\n meta.category = MoCategory.REGULAR\n meta.label = \"Job Instance\"\n meta.writeAccessMask = 0x1\n meta.readAccessMask = 0x3\n meta.isDomainable = False\n meta.isReadOnly = False\n meta.isConfigurable = True\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.childClasses.add(\"cobra.model.tag.Tag\")\n meta.childClasses.add(\"cobra.model.fault.Counts\")\n meta.childClasses.add(\"cobra.model.config.ImportP\")\n meta.childClasses.add(\"cobra.model.config.RollbackP\")\n meta.childClasses.add(\"cobra.model.config.ExportP\")\n meta.childClasses.add(\"cobra.model.aaa.RbacAnnotation\")\n meta.childClasses.add(\"cobra.model.fault.Inst\")\n meta.childClasses.add(\"cobra.model.health.Inst\")\n meta.childClasses.add(\"cobra.model.aaa.DomainRolesTuple\")\n meta.childClasses.add(\"cobra.model.config.SubJob\")\n meta.childClasses.add(\"cobra.model.fault.Delegate\")\n meta.childClasses.add(\"cobra.model.tag.Annotation\")\n\n meta.childNamesAndRnPrefix.append((\"cobra.model.tag.Annotation\", \"annotationKey-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.aaa.DomainRolesTuple\", \"domainroles\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.ImportP\", \"configimp-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.ExportP\", \"configexp-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.aaa.RbacAnnotation\", \"rbacDom-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.tag.Tag\", \"tagKey-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Counts\", \"fltCnts\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.RollbackP\", \"snprlb-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Inst\", \"fault-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.health.Inst\", \"health\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.config.SubJob\", \"job-\"))\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Delegate\", \"fd-\"))\n\n meta.parentClasses.add(\"cobra.model.config.JobCont\")\n\n meta.superClasses.add(\"cobra.model.naming.NamedObject\")\n meta.superClasses.add(\"cobra.model.pol.Obj\")\n meta.superClasses.add(\"cobra.model.pol.Comp\")\n\n meta.rnPrefixes = [\n ('run-', True),\n ]\n\n prop = PropMeta(\"str\", \"ack\", \"ack\", 16588, PropCategory.REGULAR)\n prop.label = \"Triggers job deletion\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"ack\", prop)\n\n prop = PropMeta(\"str\", \"annotation\", \"annotation\", 37299, PropCategory.REGULAR)\n prop.label = \"Annotation. Suggested format orchestrator:value\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 128)]\n prop.regex = ['[a-zA-Z0-9_.:-]+']\n meta.props.add(\"annotation\", prop)\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"decryptErrors\", \"decryptErrors\", 21514, PropCategory.REGULAR)\n prop.label = \"Secure field decryption errors\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"decryptErrors\", prop)\n\n prop = PropMeta(\"str\", \"descr\", \"descr\", 5582, PropCategory.REGULAR)\n prop.label = \"Description\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 128)]\n prop.regex = ['[a-zA-Z0-9\\\\!#$%()*,-./:;@ _{|}~?&+]+']\n meta.props.add(\"descr\", prop)\n\n prop = PropMeta(\"str\", \"details\", \"details\", 444, PropCategory.REGULAR)\n prop.label = \"Job Details\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"details\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"executeTime\", \"executeTime\", 20143, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"executeTime\", prop)\n\n prop = PropMeta(\"str\", \"extMngdBy\", \"extMngdBy\", 39438, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"undefined\"\n prop._addConstant(\"msc\", \"msc\", 1)\n prop._addConstant(\"undefined\", \"undefined\", 0)\n meta.props.add(\"extMngdBy\", prop)\n\n prop = PropMeta(\"str\", \"fileName\", \"fileName\", 16238, PropCategory.REGULAR)\n prop.label = \"Export Config File name\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"fileName\", prop)\n\n prop = PropMeta(\"str\", \"lastStepDescr\", \"lastStepDescr\", 20424, PropCategory.REGULAR)\n prop.label = \"Last executed workflow step time\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"lastStepDescr\", prop)\n\n prop = PropMeta(\"str\", \"lastStepIndex\", \"lastStepIndex\", 20425, PropCategory.REGULAR)\n prop.label = \"Last executed workflow step index\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"lastStepIndex\", prop)\n\n prop = PropMeta(\"str\", \"lastStepTime\", \"lastStepTime\", 23299, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"lastStepTime\", prop)\n\n prop = PropMeta(\"str\", \"lcOwn\", \"lcOwn\", 9, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"local\"\n prop._addConstant(\"implicit\", \"implicit\", 4)\n prop._addConstant(\"local\", \"local\", 0)\n prop._addConstant(\"policy\", \"policy\", 1)\n prop._addConstant(\"replica\", \"replica\", 2)\n prop._addConstant(\"resolveOnBehalf\", \"resolvedonbehalf\", 3)\n meta.props.add(\"lcOwn\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"monPolDn\", \"monPolDn\", 13803, PropCategory.REGULAR)\n prop.label = \"Monitoring policy attached to this observable object\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"monPolDn\", prop)\n\n prop = PropMeta(\"str\", \"name\", \"name\", 5994, PropCategory.REGULAR)\n prop.label = \"Name\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n prop.range = [(1, 64)]\n prop.regex = ['[a-zA-Z0-9_.:-]+']\n meta.props.add(\"name\", prop)\n\n prop = PropMeta(\"str\", \"nameAlias\", \"nameAlias\", 28417, PropCategory.REGULAR)\n prop.label = \"Name alias\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 63)]\n prop.regex = ['[a-zA-Z0-9_.-]+']\n meta.props.add(\"nameAlias\", prop)\n\n prop = PropMeta(\"str\", \"operSt\", \"operSt\", 445, PropCategory.REGULAR)\n prop.label = \"Operational State\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"pending\"\n prop._addConstant(\"fail-no-data\", \"fail-no-data\", 7)\n prop._addConstant(\"fail-no-retry\", \"fail-no-retry\", 6)\n prop._addConstant(\"failed\", \"failed\", 5)\n prop._addConstant(\"pending\", \"pending\", 0)\n prop._addConstant(\"retry\", \"retry\", 8)\n prop._addConstant(\"running\", \"running\", 1)\n prop._addConstant(\"success\", \"success\", 2)\n prop._addConstant(\"success-with-warnings\", \"success-with-warnings\", 3)\n prop._addConstant(\"timeout\", \"timeout\", 4)\n meta.props.add(\"operSt\", prop)\n\n prop = PropMeta(\"str\", \"retriesUsed\", \"retriesUsed\", 446, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"retriesUsed\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n prop = PropMeta(\"str\", \"totalStepCount\", \"totalStepCount\", 20426, PropCategory.REGULAR)\n prop.label = \"Number of total workflow steps in the job\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"totalStepCount\", prop)\n\n prop = PropMeta(\"str\", \"type\", \"type\", 20423, PropCategory.REGULAR)\n prop.label = \"Job type\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 1\n prop.defaultValueStr = \"export\"\n prop._addConstant(\"export\", \"export\", 1)\n prop._addConstant(\"import\", \"import\", 0)\n prop._addConstant(\"import-ids\", \"import-ids\", 5)\n prop._addConstant(\"rollback\", \"rollback\", 2)\n prop._addConstant(\"snapshot-download\", \"snapshot-download\", 4)\n prop._addConstant(\"snapshot-upload\", \"snapshot-upload\", 3)\n meta.props.add(\"type\", prop)\n\n prop = PropMeta(\"str\", \"uid\", \"uid\", 8, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"uid\", prop)\n\n prop = PropMeta(\"str\", \"userName\", \"userName\", 20422, PropCategory.REGULAR)\n prop.label = \"User who triggered the job\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"userName\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"name\"))\n\n def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):\n namingVals = [name]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","sub_path":"venv/Lib/site-packages/cobra/modelimpl/config/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":11299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"332400208","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 4 15:27:53 2019\r\n\r\n@author: HOME\r\n\"\"\"\r\n\r\na = \"0.255.56.1\"\r\n\r\ndef is_valid_IP(strng):\r\n split_string = strng.split('.')\r\n string_list = []\r\n if len(split_string) > 4 or len(split_string) < 4:\r\n return False\r\n else:\r\n for i in range(len(split_string)):\r\n temp = split_string[i]\r\n if len(temp) > 1 and temp[0] == '0':\r\n return False\r\n elif temp.isdigit():\r\n string_list.append(int(temp))\r\n else:\r\n return False\r\n for j in range(len(string_list)):\r\n if string_list[j] > 255 or string_list[j] < 0:\r\n return False\r\n return True\r\n \r\nis_valid_IP(a)\r\n\r\n'''\r\ndef is_valid_IP(strng):\r\n lst = strng.split('.')\r\n passed = 0\r\n for sect in lst:\r\n if sect.isdigit():\r\n if sect[0] != '0':\r\n if 0 < int(sect) <= 255:\r\n passed += 1\r\n return passed == 4\r\n'''","sub_path":"CodeWars/ip_validation.py","file_name":"ip_validation.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"100935405","text":"class Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # Max money can get from nums[:i-1] houses.\n dp = [0] * (len(nums) + 2)\n for i in range(2, len(nums)+2):\n dp[i] = max(nums[i-2] + dp[i-2], dp[i-1])\n return dp[-1]\n\n","sub_path":"python2/l0198_house_robber.py","file_name":"l0198_house_robber.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"176975178","text":"###################################################################################\r\n# Author: Ricardo Pereira\r\n# Date: 29-03-2021\r\n# Last Modified data: 01-04-2021\r\n# Abstract: SiamRPN: training data preparation\r\n# Adapted from arbitularov (https://github.com/arbitularov/SiamRPN-PyTorch)\r\n###################################################################################\r\n\r\nimport os\r\nimport sys\r\nimport cv2\r\nimport time\r\nimport random\r\nimport numpy as np\r\n\r\n\r\nimport torch\r\nimport torch.nn\r\nfrom torch.utils.data import Dataset\r\nfrom torchvision import datasets, transforms, utils\r\n\r\nfrom PIL import Image, ImageOps, ImageStat, ImageDraw\r\nfrom config import config\r\n\r\n\r\nclass Anchor_Boxes(object):\r\n\tdef __init__(self):\r\n\t\tself.scales \t\t= config.anchor_scales\t\t# [8,]\r\n\t\tself.ratios \t\t= config.anchor_ratios\t\t# [0.33, 0.5, 1, 2, 3]\r\n\t\tself.anchor_num\t\t= config.anchor_num\t\t\t# 5\r\n\t\tself.base_size\t\t= config.anchor_base_size\t# 8\r\n\t\tself.score_size\t\t= config.score_size\t\t\t# 17\r\n\t\tself.total_stride\t= config.total_stride\t\t# 12\r\n\t\tself.anchors \t\t= self.generate_anchors()\r\n\r\n\tdef generate_anchors(self):\r\n\t\tanchor = np.zeros((self.anchor_num, 4), dtype = np.float32) # shape = (5,4)\r\n\t\tsize \t= self.base_size * self.base_size \t\t\t\t\t # size = 64\r\n\t\tcount \t= 0\r\n\r\n\t\tfor ratio in self.ratios:\r\n\t\t\tws = int(np.sqrt(size / ratio)) # 13, 11, 8, 5, 4\r\n\t\t\ths = int(ws * ratio)\t\t\t# 4, 5, 8, 10, 12\r\n\t\t\tfor scale in self.scales:\r\n\t\t\t\twws = ws * scale \t\t\t# 104, 88, 64, 40, 32\r\n\t\t\t\thhs = hs * scale \t\t\t# 32, 40, 64, 80, 96\r\n\t\t\t\tanchor[count, 0] = 0\r\n\t\t\t\tanchor[count, 1] = 0\r\n\t\t\t\tanchor[count, 2] = wws\r\n\t\t\t\tanchor[count, 3] = hhs\r\n\t\t\t\tcount += 1\r\n\t\t\r\n\t\tanchor = np.tile(anchor, self.score_size * self.score_size).reshape((-1,4)) # (1445, 4)\r\n\t\tori \t= 25\r\n\t\txx, yy = np.meshgrid([ori + self.total_stride * dx for dx in range(self.score_size)], # (17,17)\r\n\t\t\t\t\t\t\t [ori + self.total_stride * dy for dy in range(self.score_size)]) # (17,17)\r\n\t\txx, yy = np.tile(xx.flatten(), (self.anchor_num, 1)).flatten(), \\\r\n\t\t\t\t np.tile(yy.flatten(), (self.anchor_num, 1)).flatten()\r\n\t\tanchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32) # (1445, 4)\r\n\r\n\t\treturn anchor\r\n\r\n\r\n\tdef pos_neg_anchors(self, bbox):\r\n\t\tnorm_anchors = self.anchors_normalization(self.anchors, bbox)\r\n\t\tiou \t\t = self.compute_IoU(self.anchors, bbox).flatten()\r\n\r\n\t\tpos_index = np.where(iou >= config.pos_threshold)[0][:config.num_max_pos]\r\n\t\tneg_index = np.random.choice(np.where(iou < config.neg_threshold)[0], config.num_max_neg, replace = False)\r\n\r\n\t\tlabel \t = np.ones_like(iou) * (- 1)\r\n\t\tlabel[pos_index] = 1\r\n\t\tlabel[neg_index] = 0\r\n\r\n\t\treturn norm_anchors, label\r\n\r\n\r\n\t# Paper's Eq. 3\r\n\tdef anchors_normalization(self, anchors, gt_bbox):\r\n\t\tnorm_anchors = np.zeros_like(anchors, dtype = np.float32)\r\n\t\tnorm_anchors[:,0] = (gt_bbox[0] - anchors[:,0]) / (anchors[:,2] + 1e-6)\r\n\t\tnorm_anchors[:,1] = (gt_bbox[1] - anchors[:,1]) / (anchors[:,3] + 1e-6)\r\n\t\tnorm_anchors[:,2] = np.log((gt_bbox[2] + 1e-6) / (anchors[:,2] + 1e-6))\r\n\t\tnorm_anchors[:,3] = np.log((gt_bbox[3] + 1e-6) / (anchors[:,3] + 1e-6))\r\n\r\n\t\treturn norm_anchors\r\n\r\n\r\n\r\n\r\n\tdef compute_IoU(self, anchors, bbox):\r\n\t\tif np.array(bbox).ndim == 1:\r\n\t\t\tbbox = np.array(bbox)[None, :] # shape = (1, 4)\r\n\t\telse:\r\n\t\t\tbbox = np.array(bbox)\r\n\t\tgt_bbox = np.tile(bbox.reshape(1,-1), (anchors.shape[0], 1))\t# shape = (1445, 4)\r\n\r\n\t\t# Transform cx, cy, w, h => (x1,y1) (x2,y2)\r\n\t\tanchor_x1 = anchors[:, 0] - anchors[:, 2] / 2 + 0.5\r\n\t\tanchor_y1 = anchors[:, 1] - anchors[:, 3] / 2 + 0.5\r\n\t\tanchor_x2 = anchors[:, 0] + anchors[:, 2] / 2 - 0.5\r\n\t\tanchor_y2 = anchors[:, 1] + anchors[:, 3] / 2 - 0.5\r\n\r\n\t\tgt_x1 = gt_bbox[:, 0] - gt_bbox[:, 2] / 2 + 0.5\r\n\t\tgt_y1 = gt_bbox[:, 1] - gt_bbox[:, 3] / 2 + 0.5\r\n\t\tgt_x2 = gt_bbox[:, 0] + gt_bbox[:, 2] / 2 - 0.5\r\n\t\tgt_y2 = gt_bbox[:, 1] + gt_bbox[:, 3] / 2 - 0.5\r\n\r\n\t\t# Edges values\r\n\t\txmax = np.max([anchor_x1, gt_x1], axis=0)\r\n\t\tymax = np.max([anchor_y1, gt_y1], axis=0)\r\n\t\txmin = np.min([anchor_x2, gt_x2], axis=0)\r\n\t\tymin = np.min([anchor_y2, gt_y2], axis=0)\r\n\r\n\t\t# Intersection\r\n\t\tinter_area = np.max([xmin - xmax, np.zeros(xmax.shape)], axis=0) * \\\r\n\t\t\t\t\t np.max([ymin - ymax, np.zeros(ymax.shape)], axis=0)\r\n\r\n\t\t# Area of prediction and ground-truth\r\n\t\tarea_anchor = (anchor_x2 - anchor_x1) * (anchor_y2 - anchor_y1)\r\n\t\tarea_gt \t= (gt_x2 - gt_x1) * (gt_y2 - gt_y1)\r\n\r\n\t\t# Intersection over union\r\n\t\tiou = inter_area / (area_anchor + area_gt - inter_area + 1e-6)\r\n\r\n\t\treturn iou\r\n\r\n\r\n\r\n\r\n# Computes and stores the average and current value\r\nclass AverageMeter(object):\r\n\tdef __init__(self):\r\n\t\tself.reset()\r\n\r\n\tdef reset(self):\r\n\t\tself.val \t= 0\r\n\t\tself.avg \t= 0\r\n\t\tself.sum \t= 0\r\n\t\tself.count \t= 0\r\n\r\n\tdef update(self, val, n = 1):\r\n\t\tself.val \t= val\r\n\t\tself.sum \t+= val * n\r\n\t\tself.count \t+= n\r\n\t\tself.avg \t= self.sum / self.count\r\n\r\n\r\n\r\nclass TrainDataLoader(Dataset):\r\n\tdef __init__(self, data_path, check = False):\r\n\t\tself.max_inter \t\t= config.max_inter\r\n\t\tself.data_path \t\t= data_path\r\n\t\tself.ret \t \t\t= {}\r\n\t\tself.count \t\t\t= 0\r\n\t\tself.tmp_dir\t\t= 'tmp/visualization'\r\n\t\tself.check\t\t\t= check\r\n\t\tself.gen_anchors \t= Anchor_Boxes()\r\n\t\tself.anchors \t\t= self.gen_anchors.anchors\r\n\t\tself.ret['anchors'] = self.anchors\r\n\t\tself.sub_class_dir \t= [sub_class_dir for sub_class_dir in os.listdir(self.data_path) if os.path.isdir(os.path.join(self.data_path, sub_class_dir))]\r\n\r\n\t\tif not os.path.isdir(self.tmp_dir):\r\n\t\t\tos.makedirs(self.tmp_dir)\r\n\r\n\r\n\t# Function to pick template and detection images as well their GT\r\n\tdef VOT_pick_img_pairs(self, index_of_subclass):\r\n\t\tassert index_of_subclass < len(self.sub_class_dir), 'index_of_subclass should less than total classes'\r\n\t\t\r\n\t\t# ------------- Images Path ------------- #\r\n\t\tsub_class_dir_basename \t= self.sub_class_dir[index_of_subclass] # Gymnastics\r\n\t\tsub_class_dir_path \t\t= os.path.join(self.data_path, sub_class_dir_basename) # ..\\Gymnastics\r\n\t\tsub_class_img_name \t\t= [img_name for img_name in os.listdir(sub_class_dir_path) if not img_name.find('.jpg') == -1]\r\n\t\tsub_class_img_name \t\t= sorted(sub_class_img_name) # 000001.jpg ...\r\n\t\tsub_class_img_num \t\t= len(sub_class_img_name) # 207\r\n\t\tsub_class_gt_name \t\t= 'groundtruth.txt'\r\n\r\n\t\tstatus = True\r\n\t\twhile status:\r\n\t\t\tif self.max_inter >= sub_class_img_num-1:\r\n\t\t\t\tself.max_inter = sub_class_img_num//2\r\n\r\n\t\t\t#template_index = np.clip(random.choice(range(0, max(1, sub_class_img_num - self.max_inter))), 0, sub_class_img_num-1)\r\n\t\t\t#detection_index= np.clip(random.choice(range(1, max(2, self.max_inter))) + template_index, 0, sub_class_img_num-1)\r\n\t\t\ttemplate_index = 50\r\n\t\t\tdetection_index = 113\r\n\r\n\t\t\ttemplate_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[template_index])\r\n\t\t\tdetection_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[detection_index])\r\n\t\t\tgt_path\t\t\t\t= os.path.join(sub_class_dir_path, sub_class_gt_name)\r\n\r\n\t\t# ------------- Labels ------------- #\r\n\t\t\twith open(gt_path, 'r') as f:\r\n\t\t\t\tgt_lines \t= f.readlines()\r\n\t\t\ttemplate_gt \t= [abs(int(float(i))) for i in gt_lines[template_index].strip('\\n').split(',')[:4]]\r\n\t\t\tdetection_gt \t= [abs(int(float(i))) for i in gt_lines[detection_index].strip('\\n').split(',')[:4]]\r\n\r\n\t\t\tif template_gt[2]*template_gt[3]*detection_gt[2]*detection_gt[3] != 0:\r\n\t\t\t\tstatus = False\r\n\t\t\telse:\r\n\t\t\t\tprint('Warning: encounter object missing, reinitializing...')\r\n\r\n\t\t# ------------- Save Template and Detection info ------------- #\r\n\t\tself.ret['template_img_idx'] \t\t= template_index\r\n\t\tself.ret['detection_img_idx']\t\t= detection_index\r\n\t\tself.ret['template_img_path']\t\t= template_img_path\r\n\t\tself.ret['detection_img_path']\t\t= detection_img_path\r\n\t\tself.ret['template_target_x1y1wh'] \t= template_gt\r\n\t\tself.ret['detection_target_x1y1wh']\t= detection_gt\r\n\t\ttemplate_x1y1wh, detection_x1y1wh \t= template_gt.copy(), detection_gt.copy()\r\n\t\tself.ret['template_target_xywh']\t= np.array([template_x1y1wh[0]+template_x1y1wh[2]//2, template_x1y1wh[1]+template_x1y1wh[3]//2, template_x1y1wh[2], template_x1y1wh[3]], np.float32)\r\n\t\tself.ret['detection_target_xywh']\t= np.array([detection_x1y1wh[0]+detection_x1y1wh[2]//2, detection_x1y1wh[1]+detection_x1y1wh[3]//2, detection_x1y1wh[2], detection_x1y1wh[3]], np.float32)\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '0_check_template_detection_bb')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\t\t\ttemplate_img \t= Image.open(self.ret['template_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['template_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(template_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_template_img.jpg'.format(self.count))\r\n\t\t\ttemplate_img.save(save_path)\r\n\r\n\t\t\tdetection_img \t= Image.open(self.ret['detection_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['detection_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(detection_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_detection_img.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\t\r\n\tdef VOT_sub_class_img_pairs(self, img_index):\r\n\t\t# ------------- Images Path ------------- #\r\n\t\tsub_class_dir_basename \t= 'Gymnastics' # Gymnastics\r\n\t\tsub_class_dir_path \t\t= os.path.join(self.data_path, sub_class_dir_basename) # ..\\Gymnastics\r\n\t\tsub_class_img_name \t\t= [img_name for img_name in os.listdir(sub_class_dir_path) if not img_name.find('.jpg') == -1]\r\n\t\tsub_class_img_name \t\t= sorted(sub_class_img_name) # 000001.jpg ...\r\n\t\tsub_class_img_num \t\t= len(sub_class_img_name) # 207\r\n\t\tsub_class_gt_name \t\t= 'groundtruth.txt'\r\n\r\n\t\tstatus = True\r\n\t\twhile status:\r\n\r\n\t\t\ttemplate_index = 0\r\n\t\t\tdetection_index = img_index\r\n\r\n\t\t\ttemplate_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[template_index])\r\n\t\t\tdetection_img_path \t= os.path.join(sub_class_dir_path, sub_class_img_name[detection_index])\r\n\t\t\tgt_path\t\t\t\t= os.path.join(sub_class_dir_path, sub_class_gt_name)\r\n\r\n\t\t# ------------- Labels ------------- #\r\n\t\t\twith open(gt_path, 'r') as f:\r\n\t\t\t\tgt_lines \t= f.readlines()\r\n\t\t\ttemplate_gt \t= [abs(int(float(i))) for i in gt_lines[template_index].strip('\\n').split(',')[:4]]\r\n\t\t\tdetection_gt \t= [abs(int(float(i))) for i in gt_lines[detection_index].strip('\\n').split(',')[:4]]\r\n\r\n\t\t\tif template_gt[2]*template_gt[3]*detection_gt[2]*detection_gt[3] != 0:\r\n\t\t\t\tstatus = False\r\n\t\t\telse:\r\n\t\t\t\tprint('Warning: encounter object missing, reinitializing...')\r\n\r\n\t\t# ------------- Save Template and Detection info ------------- #\r\n\t\tself.ret['template_img_idx'] \t\t= template_index\r\n\t\tself.ret['detection_img_idx']\t\t= detection_index\r\n\t\tself.ret['template_img_path']\t\t= template_img_path\r\n\t\tself.ret['detection_img_path']\t\t= detection_img_path\r\n\t\tself.ret['template_target_x1y1wh'] \t= template_gt\r\n\t\tself.ret['detection_target_x1y1wh']\t= detection_gt\r\n\t\ttemplate_x1y1wh, detection_x1y1wh \t= template_gt.copy(), detection_gt.copy()\r\n\t\tself.ret['template_target_xywh']\t= np.array([template_x1y1wh[0]+template_x1y1wh[2]//2, template_x1y1wh[1]+template_x1y1wh[3]//2, template_x1y1wh[2], template_x1y1wh[3]], np.float32)\r\n\t\tself.ret['detection_target_xywh']\t= np.array([detection_x1y1wh[0]+detection_x1y1wh[2]//2, detection_x1y1wh[1]+detection_x1y1wh[3]//2, detection_x1y1wh[2], detection_x1y1wh[3]], np.float32)\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '0_check_template_detection_bb')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\t\t\ttemplate_img \t= Image.open(self.ret['template_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['template_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(template_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_template_img.jpg'.format(self.count))\r\n\t\t\ttemplate_img.save(save_path)\r\n\r\n\t\t\tdetection_img \t= Image.open(self.ret['detection_img_path'])\r\n\t\t\tx,y,w,h \t\t= self.ret['detection_target_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(detection_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_detection_img.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\r\n\t# Function to pre-process template and detection images\r\n\tdef imgs_pre_processing (self):\r\n\r\n\t\tdef window_size(bbox, size_z, size_x, context_amount):\r\n\t\t\tcx, cy, w, h = bbox\r\n\r\n\t\t\t# Paper's Eqs. 12 and 15\r\n\t\t\twc_xz \t= w + context_amount * (w + h) \t# w + p, where p = (w+h)/2\r\n\t\t\thc_xz \t= h + context_amount * (w + h)\t# h + p, where p = (w+h)/2\r\n\t\t\ts_z \t= int(np.sqrt(wc_xz * hc_xz))\t# s_z = A\r\n\t\t\tscale_z\t= size_z / s_z\r\n\t\t\t#s_x \t= s_z * size_x / size_z # -> approx 2*A \r\n\t\t\ts_x \t= s_z * 2\t# 2*A\r\n\r\n\t\t\treturn s_z, s_x, scale_z\r\n\r\n\t\t# ------------- Template ------------- #\r\n\t\ttemplate_img \t\t= Image.open(self.ret['template_img_path'])\r\n\t\ttemplate_img \t\t= np.array(template_img)\r\n\t\ttemplate_img_mean \t= np.mean(template_img, axis=(0, 1))\r\n\r\n\t\ts_z, s_x, scale = window_size(self.ret['template_target_xywh'],\r\n\t\t\tconfig.template_img_size, config.detection_img_size, config.context)\r\n\r\n\t\ttemplate_crop_img, scale_z = self.crop_and_pad(template_img, self.ret['template_target_xywh'],\r\n\t\t\tconfig.template_img_size, s_z, 'Template', template_img_mean)\r\n\r\n\t\tself.ret['template_crop_img'] = template_crop_img\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '1_check_template_detection_bb_in padding')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\t\t\ttemplate_img = Image.fromarray(self.ret['template_crop_img'].copy(),'RGB')\r\n\t\t\tsave_path \t = os.path.join(check_dir_path, 'idx_{:04d}_template_cropped_resized.jpg'.format(self.count))\r\n\t\t\ttemplate_img.save(save_path)\r\n\r\n\t\t# ------------- Detection ------------- #\r\n\t\tdetection_img \t \t= Image.open(self.ret['detection_img_path'])\r\n\t\tdetection_img \t\t= np.array(detection_img)\r\n\t\tdetection_img_mean\t= np.mean(detection_img, axis=(0, 1))\r\n\t\tcx, cy, w, h \t\t= self.ret['detection_target_xywh']\r\n\r\n\t\tdetection_crop_img, scale_x = self.crop_and_pad(detection_img, self.ret['detection_target_xywh'],\r\n\t\t\tconfig.detection_img_size, s_x, 'Detection', detection_img_mean)\r\n\r\n\t\tsize_x \t= config.detection_img_size\r\n\t\tw_x \t= w * scale_x\r\n\t\th_x \t= h * scale_x\r\n\r\n\t\tx1, y1 \t= int(round((size_x + 1) / 2 - w_x / 2)), int(round((size_x + 1) / 2 - h_x / 2))\r\n\t\tx2, y2 \t= int(round((size_x + 1) / 2 + w_x / 2)), int(round((size_x + 1) / 2 + h_x / 2))\r\n\t\tcx \t\t= int(round(x1 + w_x / 2))\r\n\t\tcy \t\t= int(round(y1 + h_x / 2))\r\n\r\n\r\n\t\tself.ret['detection_crop_img'] \t\t\t= detection_crop_img\r\n\t\tself.ret['detection_crop_resized_xywh']\t= np.array((cx, cy, w_x, h_x), dtype = np.int16)\r\n\r\n\r\n\t\tif self.check:\r\n\t\t\tdetection_img = Image.fromarray(self.ret['detection_crop_img'].copy(),'RGB')\r\n\t\t\tsave_path \t = os.path.join(check_dir_path, 'idx_{:04d}_detection_padding_resized.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\t\t\tx, y, w, h \t= self.ret['detection_crop_resized_xywh'].copy()\r\n\t\t\tx1,y1,x2,y2 \t= int(x-w//2), int(y-h//2), int(x+w//2), int(y+h//2)\r\n\t\t\tdraw \t\t\t= ImageDraw.Draw(detection_img)\r\n\t\t\tdraw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=2, fill='red')\r\n\t\t\tsave_path \t\t= os.path.join(check_dir_path, 'idx_{:04d}_detection_padding_resized_bb.jpg'.format(self.count))\r\n\t\t\tdetection_img.save(save_path)\r\n\r\n\r\n\tdef crop_and_pad(self, img, bbox, model_sz, original_sz, img_type, img_mean = None):\r\n\r\n\t\tdef round_up(value):\r\n\t\t\treturn round(value + 1e-6 + 1000) - 1000\r\n\r\n\t\tcx, cy, w, h \t= bbox\r\n\t\timg_h, img_w, k = img.shape\r\n\t\t\r\n\t\txmin = cx - (original_sz - 1) / 2\r\n\t\txmax = xmin + original_sz - 1\r\n\t\tymin = cy - (original_sz - 1) / 2\r\n\t\tymax = ymin + original_sz - 1\r\n\r\n\t\tleft \t= int(round_up(max(0., -xmin)))\r\n\t\ttop \t= int(round_up(max(0., -ymin)))\r\n\t\tright \t= int(round_up(max(0., xmax - img_w + 1)))\r\n\t\tbottom \t= int(round_up(max(0., ymax - img_h + 1)))\r\n\r\n\t\txmin = int(round_up(xmin + left))\r\n\t\txmax = int(round_up(xmax + left))\r\n\t\tymin = int(round_up(ymin + top))\r\n\t\tymax = int(round_up(ymax + top))\r\n\r\n\t\tif any([top, bottom, left, right]):\r\n\t\t\tret_img = np.zeros((img_h + top + bottom, img_w + left + right, k), np.uint8)\r\n\t\t\tret_img[top:top + img_h, left:left + img_w, :] = img\r\n\t\t\tif top: \r\n\t\t\t\tret_img[0:top, left:left + img_w, :] = img_mean\r\n\t\t\tif bottom:\r\n\t\t\t\tret_img[img_h + top:, left:left + img_w, :] = img_mean\r\n\t\t\tif left:\r\n\t\t\t\tret_img[:, 0:left, :] = img_mean\r\n\t\t\tif right:\r\n\t\t\t\tret_img[:, img_w + left:, :] = img_mean\r\n\t\t\timg_patch_original = ret_img[int(ymin):int(ymax + 1), int(xmin):int(xmax + 1), :]\r\n\t\telse:\r\n\t\t\timg_patch_original = img[int(ymin):int(ymax + 1), int(xmin):int(xmax + 1), :]\r\n\r\n\t\tif not np.array_equal(model_sz, original_sz):\r\n\t\t\timg_patch = cv2.resize(img_patch_original, (model_sz, model_sz))\r\n\t\telse:\r\n\t\t\timg_patch = img_patch_original\r\n\r\n\t\tscale = model_sz / img_patch_original.shape[0]\r\n\r\n\t\treturn img_patch, scale\r\n\r\n\r\n\tdef pick_pos_neg_anchors(self):\r\n\t\tnorm_anchors, pos_neg_anchors = self.gen_anchors.pos_neg_anchors(self.ret['detection_crop_resized_xywh'])\r\n\r\n\t\tself.ret['norm_anchors'] \t= norm_anchors\r\n\t\tself.ret['pos_neg_anchors']\t= pos_neg_anchors\r\n\r\n\t\tif self.check:\r\n\t\t\tcheck_dir_path = os.path.join(self.tmp_dir, '2_check_anchor_boxes')\r\n\t\t\tif not os.path.exists(check_dir_path):\r\n\t\t\t\tos.makedirs(check_dir_path)\r\n\r\n\r\n\t\t\tdetection_img = Image.fromarray(self.ret['detection_crop_img'].copy(),'RGB')\r\n\t\t\tdetection_img_all_anchors = detection_img.copy()\r\n\t\t\tdraw \t\t = ImageDraw.Draw(detection_img_all_anchors)\r\n\t\t\tx, y, w, h \t = self.ret['detection_crop_resized_xywh'].copy()\r\n\r\n\t\t\t# ------------- Draw all generated Anchor Boxes ------------- #\r\n\t\t\t# Transform anchors cx, cy, w, h => (x1,y1) (x2,y2)\r\n\t\t\tanchor_x1 = self.anchors[:, 0] - self.anchors[:, 2] / 2 + 0.5\r\n\t\t\tanchor_y1 = self.anchors[:, 1] - self.anchors[:, 3] / 2 + 0.5\r\n\t\t\tanchor_x2 = self.anchors[:, 0] + self.anchors[:, 2] / 2 - 0.5\r\n\t\t\tanchor_y2 = self.anchors[:, 1] + self.anchors[:, 3] / 2 - 0.5\r\n\r\n\t\t\tfor idx in range(self.anchors.shape[0]):\r\n\t\t\t\tan_x1, an_y1, an_x2, an_y2 = anchor_x1[idx], anchor_y1[idx], anchor_x2[idx], anchor_y2[idx]\r\n\t\t\t\tdraw.line([(an_x1, an_y1), (an_x2, an_y1), (an_x2, an_y2), (an_x1, an_y2), (an_x1, an_y1)], width=1, fill='blue')\r\n\r\n\t\t\tsave_path = os.path.join(check_dir_path, 'idx_{:04d}_detection_all_anchor_boxes.jpg'.format(self.count))\r\n\t\t\tdetection_img_all_anchors.save(save_path)\r\n\r\n\t\t\t# ------------- Draw positive and negative Anchor Boxes ------------- #\r\n\t\t\tdetection_img_pos_neg_anchors = detection_img.copy()\r\n\t\t\tdraw = ImageDraw.Draw(detection_img_pos_neg_anchors)\r\n\r\n\t\t\tanchor_labels = self.ret['pos_neg_anchors']\r\n\t\t\tpos_index = np.where(anchor_labels == 1)[0]\r\n\t\t\tneg_index = np.where(anchor_labels == 0)[0]\r\n\r\n\t\t\tfor idx, pos_idx in enumerate(pos_index):\r\n\t\t\t\tan_x1, an_y1, an_x2, an_y2 = anchor_x1[pos_idx], anchor_y1[pos_idx], anchor_x2[pos_idx], anchor_y2[pos_idx]\r\n\t\t\t\tdraw.line([(an_x1, an_y1), (an_x2, an_y1), (an_x2, an_y2), (an_x1, an_y2), (an_x1, an_y1)], width=1, fill='green')\r\n\t\t\tsave_path = os.path.join(check_dir_path, 'idx_{:04d}_detection_pos_anchor_boxes.jpg'.format(self.count))\r\n\t\t\tdetection_img_pos_neg_anchors.save(save_path)\r\n\r\n\t\t\tfor idx, neg_idx in enumerate(neg_index):\r\n\t\t\t\tan_x1, an_y1, an_x2, an_y2 = anchor_x1[neg_idx], anchor_y1[neg_idx], anchor_x2[neg_idx], anchor_y2[neg_idx]\r\n\t\t\t\tdraw.line([(an_x1, an_y1), (an_x2, an_y1), (an_x2, an_y2), (an_x1, an_y2), (an_x1, an_y1)], width=1, fill='red')\r\n\t\t\tsave_path = os.path.join(check_dir_path, 'idx_{:04d}_detection_pos_neg_anchor_boxes.jpg'.format(self.count))\r\n\t\t\tdetection_img_pos_neg_anchors.save(save_path)\r\n\r\n\r\n\tdef transform(self):\r\n\t\ttransform \t\t = transforms.Compose([transforms.ToTensor()])\r\n\r\n\t\ttemplate_tensor = transform(self.ret['template_crop_img'].copy())\r\n\t\tdetection_tensor = transform(self.ret['detection_crop_img'].copy())\r\n\r\n\t\tself.ret['template_tensor'] \t\t= template_tensor #shape = [1, 3, 127, 127]\r\n\t\tself.ret['detection_tensor'] \t\t= detection_tensor\r\n\t\tself.ret['norm_anchors_tensor']\t \t= torch.Tensor(self.ret['norm_anchors'])\r\n\t\tself.ret['pos_neg_anchors_tensor']\t= torch.Tensor(self.ret['pos_neg_anchors'])\r\n\r\n\r\n\tdef __len__(self):\r\n\t\treturn(207)\r\n\t\t#return len(self.sub_class_dir)\r\n\r\n\tdef __getitem__(self, index):\r\n\t\tself.VOT_sub_class_img_pairs(index)\r\n\t\tself.imgs_pre_processing()\r\n\t\tself.pick_pos_neg_anchors()\r\n\t\tself.transform()\r\n\t\tself.count += 1\r\n\t\t\r\n\t\treturn self.ret['template_tensor'], self.ret['detection_tensor'], self.ret['norm_anchors_tensor'], self.ret['pos_neg_anchors_tensor']\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tdataset_path = 'S:\\\\Datasets\\\\VOT2013\\\\Data\\\\'\r\n\r\n\ttrain_loader = TrainDataLoader(dataset_path, check = True)\r\n\tindex_list \t = range(train_loader.__len__())\r\n\tfor i in range(1):\r\n\t\tprint('\\nImage ' + str(i))\r\n\t\ttrain_loader.__getitem__(random.choice(index_list))\r\n\t\r\n\r\n\r\n","sub_path":"data_train.py","file_name":"data_train.py","file_ext":"py","file_size_in_byte":21159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"522812235","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPeekaboo Download\n\"\"\"\n\nfrom scrappeekaboo import ScrapPeekaboo\nfrom dbmanager import DBManager\nfrom filemanager import FileManager\n\n\n# first run\ndef setup():\n db = DBManager()\n db.reset()\n\n\n# get album links\ndef get_album_links():\n db = DBManager()\n scrap = ScrapPeekaboo()\n scrap.db = db\n scrap.get_album_links()\n\n\n# get sources for files, comments ect\ndef get_album_content():\n db = DBManager()\n scrap = ScrapPeekaboo()\n scrap.db = db\n scrap.get_album_content(db.get_album_links())\n\n\n# download files\ndef download_files():\n db = DBManager()\n fm = FileManager()\n scrap = ScrapPeekaboo(fm)\n scrap.db = db\n scrap.download_all_files()\n\n\n\n scrap.get_album_content(db.get_album_links())\n\n\n# view database\ndef view():\n db = DBManager()\n db.sel(\"SELECT * FROM File \")\n db.sel(\"SELECT * FROM Comment \")\n\n\n# view()\n# get_album_links()\n# get_album_content()\ndownload_files()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"563207986","text":"\"\"\"Unit tests for the data model.\"\"\"\n\nimport json\nimport unittest\n\n\nclass DataModelTest(unittest.TestCase):\n \"\"\"Unit tests for the data model.\"\"\"\n\n def setUp(self):\n with open(\"datamodel.json\") as datamodel_json:\n self.datamodel = json.load(datamodel_json)\n\n def test_top_level_keys(self):\n \"\"\"Test that the top level keys are correct.\"\"\"\n self.assertEqual(set([\"metrics\", \"subjects\", \"sources\"]), set(self.datamodel.keys()))\n\n def test_metrics_have_sources(self):\n \"\"\"Test that each metric has one or more sources.\"\"\"\n for metric in self.datamodel[\"metrics\"].values():\n self.assertTrue(len(metric[\"sources\"]) >= 1)\n\n def test_source_parameter_metrics(self):\n \"\"\"Test that the metrics listed for source parameters are metrics supported by the source.\"\"\"\n for source_id, source in self.datamodel[\"sources\"].items():\n for parameter in source[\"parameters\"].values():\n for metric in parameter[\"metrics\"]:\n self.assertTrue(source_id in self.datamodel[\"metrics\"][metric][\"sources\"])\n\n def test_metric_source_parameters(self):\n \"\"\"Test that the sources have at least one parameter for each metric supported by the source.\"\"\"\n for metric_id, metric in self.datamodel[\"metrics\"].items():\n parameter_metrics = []\n for source in metric[\"sources\"]:\n for parameter in self.datamodel[\"sources\"][source][\"parameters\"].values():\n parameter_metrics.extend(parameter[\"metrics\"])\n self.assertTrue(metric_id in parameter_metrics)\n\n def test_multiple_choice_paramters(self):\n \"\"\"Test that multiple choice parameters have both a default value and a list of options.\"\"\"\n for source in self.datamodel[\"sources\"].values():\n for parameter in source[\"parameters\"].values():\n if parameter[\"type\"] == \"multiple_choice\":\n self.assertTrue(\"default_value\" in parameter)\n self.assertTrue(\"values\" in parameter)\n\n def test_addition(self):\n \"\"\"Test each metric had its addition defined correctly.\"\"\"\n for metric in self.datamodel[\"metrics\"].values():\n self.assertTrue(metric[\"addition\"] in (\"max\", \"min\", \"sum\"))\n\n def test_mandatory_parameters(self):\n \"\"\"Test that each metric has a mandatory field with true or false value.\"\"\"\n for source_id, source in self.datamodel[\"sources\"].items():\n for parameter_id, parameter_values in source[\"parameters\"].items():\n self.assertTrue(\n \"mandatory\" in parameter_values,\n f\"The parameter '{parameter_id}' of source '{source_id}' has no 'mandatory' field\")\n self.assertTrue(\n parameter_values[\"mandatory\"] in (True, False),\n f\"The 'mandatory' field of parameter '{parameter_id}' of source '{source_id}' is neither \"\n \"true nor false\")\n\n def test_default_source(self):\n \"\"\"Test that each metric has a default source, and that the default source is listed as possible source.\"\"\"\n for metric in self.datamodel[\"metrics\"].values():\n self.assertTrue(metric[\"default_source\"] in metric[\"sources\"])\n","sub_path":"components/server/tests/unittests/test_datamodel.py","file_name":"test_datamodel.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"506023567","text":"\"\"\"\nGraphic shows a sample comparison of contribution maps for SAI vs. SNR of TREFHT\n\nAuthor : Zachary M. Labe\nDate : 11 August 2022\nVersion : 1 - testing ANN architectures for calculating years since SAI\n\"\"\"\n\n### Import packages\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as c\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport cmocean\nimport cmasher as cmr\nimport numpy as np\nimport calc_Utilities as UT\nimport calc_Stats as dSS\nimport calc_dataFunctions as df\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n###############################################################################\n###############################################################################\n###############################################################################\n### Data preliminaries \nletters = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"o\",\"p\"]\nreg_nameq = ['Globe','NH','SH','Arctic','Antarctic','narrowTropics','SEAsia','NorthAfrica','Amazon']\nlabels = ['Globe','N. Hemisphere','S. Hemisphere','Arctic','Antarctic','Tropics','Southeast Asia','Central Africa','Amazon']\nregionboxes = ['Arctic','Antarctic','narrowTropics','SEAsia','NorthAfrica','Amazon']\ndirectorydata = '/Users/zlabe/Documents/Research/SolarIntervention/Data/'\ndirectoryfigure = '/Users/zlabe/Documents/Research/SolarIntervention/Figures/'\n###############################################################################\n###############################################################################\nmodelGCMs = ['ARISE']\ndatasetsingle = ['ARISE']\nseasons = ['annual']\nmonthlychoice = seasons[0]\n###############################################################################\n###############################################################################\nland_only = True\nocean_only = False\nravelyearsbinary = False\nravelbinary = False\nlensalso = True\nrandomalso = False\nravel_modelens = False\nravelmodeltime = False\ntimeper = 'historical'\nshuffletype = 'GAUSS'\n###############################################################################\n###############################################################################\nyearsall = np.arange(2035,2069+1,1)\nyearsarise = np.arange(2035,2069+1,1)\nnumOfEns = 10\ndataset_obs = 'ERA5'\n###############################################################################\n###############################################################################\nnum_of_class = len(modelGCMs)\nensTypeExperi = 'ENS'\n###############################################################################\n###############################################################################\n############################################################################### \n### Read in data\nreg_name = 'Globe'\nlat_bounds,lon_bounds = UT.regions(reg_name)\n\ncont = np.load(directorydata + 'TREFHT-SAI_period2Contributions.npz')\ncontribution = cont['cont'][:]\nlats = cont['lat'][:]\nlons = cont['lon'][:]\nsnr = np.load(directorydata + 'TREFHT-SAI_period2SNR.npz')\nsnrall = snr['snr']\n\nlon2,lat2 = np.meshgrid(lons,lats)\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Graphs\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([])\ndef setcolor(x, color):\n for m in x:\n for t in x[m][1]:\n t.set_color(color)\n \nfig = plt.figure(figsize=(9,3))\nax = plt.subplot(121)\n\nvar = contribution\nlimit = np.arange(-0.005,0.00501,0.0001)\nbarlim = np.round(np.arange(-0.005,0.00501,0.005),3)\nlabel = r'\\textbf{SAI-1.5 Temperature [Input$\\times$Weights]}'\n\nm = Basemap(projection='robin',lon_0=0,resolution='l',area_thresh=10000)\nm.drawcoastlines(color='darkgrey',linewidth=0.4)\n\nparallels = np.arange(-90,91,30)\nmeridians = np.arange(-180,180,60)\npar=m.drawparallels(parallels,labels=[False,False,False,False],linewidth=0.3,\n color='w',fontsize=4,zorder=40)\nmer=m.drawmeridians(meridians,labels=[False,False,False,False],linewidth=0.3,\n fontsize=4,color='w',zorder=40)\n\ncircle = m.drawmapboundary(fill_color='dimgray',color='dimgray',\n linewidth=1)\ncircle.set_clip_on(False)\n\ncs1 = m.contourf(lon2,lat2,var,limit,extend='both',latlon=True)\n\ncs1.set_cmap(cmocean.cm.balance)\nm.drawlsmask(land_color=(0,0,0,0),ocean_color='dimgray',lakes=False,zorder=11)\n\ncbar1 = plt.colorbar(cs1,orientation='horizontal',\n extend='both',extendfrac=0.07,drawedges=False,\n fraction=0.03,pad=0.06)\ncbar1.set_label(label,fontsize=10,color='dimgrey',labelpad=2) \ncbar1.set_ticks(barlim)\ncbar1.set_ticklabels(list(map(str,barlim)))\ncbar1.ax.tick_params(axis='x', size=.01,labelsize=8)\ncbar1.outline.set_edgecolor('dimgrey')\n\n###############################################################################\n###############################################################################\n###############################################################################\nax = plt.subplot(122)\n\nvar = snrall\nlimit = np.arange(0,2.1,0.25)\nbarlim = np.round(np.arange(0,2.1,1),2)\n\nlabel = r'\\textbf{SAI-1.5 Temperature [Signal-To-Noise]}'\n\nm = Basemap(projection='robin',lon_0=0,resolution='l',area_thresh=10000)\nm.drawcoastlines(color='darkgrey',linewidth=0.4)\n\nparallels = np.arange(-90,91,30)\nmeridians = np.arange(-180,180,60)\npar=m.drawparallels(parallels,labels=[False,False,False,False],linewidth=0.3,\n color='w',fontsize=4,zorder=40)\nmer=m.drawmeridians(meridians,labels=[False,False,False,False],linewidth=0.3,\n fontsize=4,color='w',zorder=40)\n\ncircle = m.drawmapboundary(fill_color='dimgray',color='dimgray',\n linewidth=1)\ncircle.set_clip_on(False)\n\ncs1 = m.contourf(lon2,lat2,var,limit,extend='max',latlon=True)\n\ncs1.set_cmap(cmr.torch)\nm.drawlsmask(land_color=(0,0,0,0),ocean_color='dimgray',lakes=False,zorder=11)\n\ncbar1 = plt.colorbar(cs1,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False,\n fraction=0.03,pad=0.06)\ncbar1.set_label(label,fontsize=10,color='dimgrey',labelpad=1.1) \ncbar1.set_ticks(barlim)\ncbar1.set_ticklabels(list(map(str,barlim)))\ncbar1.ax.tick_params(axis='x', size=.01,labelsize=8)\ncbar1.outline.set_edgecolor('dimgrey')\n\nplt.tight_layout()\n###############################################################################\n###############################################################################\n###############################################################################\n### Add text\nplt.annotate(r'\\textbf{[%s]}' % letters[0],\n textcoords='figure fraction',\n xy=(0,0), xytext=(0.485,0.87),\n fontsize=10,color='k',alpha=1,ha='right')\nplt.annotate(r'\\textbf{[%s]}' % letters[1],\n textcoords='figure fraction',\n xy=(0,0), xytext=(0.98,0.87),\n fontsize=10,color='k',alpha=1,ha='right')\n\nplt.savefig(directoryfigure + 'SampleMaps_SNR-Contributions_TREFHT_period2.png',dpi=1000)\n","sub_path":"Scripts/plot_SampleMaps_SNR-Contributions_TREFHT_period2.py","file_name":"plot_SampleMaps_SNR-Contributions_TREFHT_period2.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"560900183","text":"from flask import Flask, render_template, request,redirect, url_for\nimport pickle\n\n#import nltk\n#nltk.download('stopwords');\n#nltk.download('punkt');\nimport spacy\nnlp = spacy.load('en_core_web_sm');\n\nKW_dict = pickle.load(open(\"Keyword.pkl\", \"rb\"))\nKW_dict_s = pickle.load(open(\"Keyword_s.pkl\", \"rb\"))\nTopic_dict = pickle.load(open(\"Topic.pkl\", \"rb\"))\n\n#from spacy.lang.en.stop_words import STOP_WORDS\n#stopwords = list(STOP_WORDS)\n#stopwords = nltk.corpus.stopwords.words('english')\n#stopwords.extend(['the','i','you','a','c','slu','them','she','he','company'])\n#stopwords.extend(KW_dict.keys())\n\ndef get_recommendation(user_prf, KW_dict):\n '''\n def stopword_RMV(sent):\n res = []\n for word in sent.split():\n if word.lower() not in stopwords:\n res.append(word)\n return ' '.join(res)\n '''\n #doc0 = nlp(stopword_RMV(user_prf))\n doc0 = nlp(user_prf)\n score_dict = {}\n for k, v in KW_dict.items():\n temp_doc = nlp(v)\n score_dict[k] = doc0.similarity(temp_doc)\n\n sorted_score = sorted(score_dict.items(), key=lambda kv: kv[1], reverse=True)\n\n rcm_company = []\n for i in range(5):\n rcm_company.append('#' + str(i + 1) + ': ' + str(sorted_score[i][0].capitalize()))\n\n return rcm_company\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('login.html')\n\n@app.route('/login', methods=['POST','GET'])\ndef login():\n if request.method == 'POST':\n target_company = request.form['target_company']\n target = target_company.lower()\n company = target_company.capitalize()\n return render_template('graph.html', company=company, target=target)\n if request.method == 'GET':\n user_input = request.args.get('user_input')\n return rcm(user_input)\n #return redirect(url_for('rcm', sent = user_input))\n return render_template('login.html')\n\n@app.route('/graph')\ndef graph():\n\n return render_template('graph.html')\n\n@app.route('/rcm')\ndef rcm(sent,KW_dict = KW_dict_s):\n rcm_company=get_recommendation(user_prf=sent, KW_dict=KW_dict)\n c0 = rcm_company[0].split(':')[-1]\n c1 = rcm_company[1].split(':')[-1]\n c2 = rcm_company[2].split(':')[-1]\n c3 = rcm_company[3].split(':')[-1]\n c4 = rcm_company[4].split(':')[-1]\n return render_template('rcm.html',sent=sent,c0=c0,c1=c1,c2=c2,c3=c3,c4=c4)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"572550024","text":"#!/usr/bin/env python3.6\n#-*- coding:utf-8 -*-\n\n\nimport os\nimport requests\n# import urllib.request\nimport redis\nimport random\nimport math\n\n\n\nheight = [2,3,10,5,7,8,9]\n\nif len(height) % 2 == 0 :\n i = int(len(height) / 2)\n j = i\nelse:\n i = math.floor(len(height)/2)\n j = i\n\narea = (j - i) * min(height[i], height[j])\n\nrange_i = i\nfor x in range(0, range_i+1):\n print(x)\n pass\n tmp_i = range_i - x\n tmp_area = (j - tmp_i) * min(height[tmp_i], height[j])\n if tmp_area > area :\n area = tmp_area\n i = tmp_i\n\nrange_j = j\nfor y in range(0, len(height)- range_j):\n tmp_j = range_j + y\n tmp_area = (tmp_j - i) * min(height[i], height[tmp_j])\n if tmp_area > area :\n area = tmp_area\n j = tmp_j\n\nprint(area, i, j)\n\n ","sub_path":"python3/11.盛最多水的容器.py","file_name":"11.盛最多水的容器.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"603644084","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\n\n\ndef callback(data):\n rospy.loginfo(\"movement: \" + str(data.linear.x))\n\n\ndef listener():\n rospy.init_node('display', anonymous=True)\n rospy.Subscriber(\"cmd_vel\", Twist, callback)\n rospy.spin()\n\n\nif __name__ == '__main__':\n listener()","sub_path":"src/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"402208308","text":"from ravestate.module import Module\nfrom ravestate.constraint import s\nfrom ravestate.property import PropertyBase\nfrom ravestate.state import state\nfrom ravestate.receptor import receptor\nfrom ravestate.wrappers import ContextWrapper\n\nimport ravestate_rawio\nimport ravestate_interloc\nfrom ravestate_verbaliser.verbaliser import get_phrase_list\nimport ravestate_phrases_basic_en\nimport ravestate_ontology\n\nfrom scientio.ontology.node import Node\nfrom scientio.session import Session\nfrom scientio.ontology.ontology import Ontology\n\nfrom reggol import get_logger\nlogger = get_logger(__name__)\n\nDEFAULT_INTERLOC_ID = \"terminal_user\"\n\nwith Module(name=\"consoleio\"):\n\n @state(cond=s(\":startup\"), read=\"interloc:all\")\n def console_input(ctx: ContextWrapper):\n\n @receptor(ctx_wrap=ctx, write=\"rawio:in\")\n def write_console_input(ctx_input, value: str):\n ctx_input[\"rawio:in\"] = value\n\n @receptor(ctx_wrap=ctx, write=\"interloc:all\")\n def push_console_interloc(ctx: ContextWrapper, console_node: Node):\n if ctx.push(parentpath=\"interloc:all\", child=PropertyBase(name=DEFAULT_INTERLOC_ID, default_value=console_node)):\n logger.debug(f\"Pushed {console_node} to interloc:all\")\n\n @receptor(ctx_wrap=ctx, write=\"interloc:all\")\n def pop_console_interloc(ctx: ContextWrapper):\n if ctx.pop(f\"interloc:all:{DEFAULT_INTERLOC_ID}\"):\n logger.debug(f\"Popped interloc:all:{DEFAULT_INTERLOC_ID}\")\n\n while not ctx.shutting_down():\n input_value = input(\"> \")\n write_console_input(input_value)\n\n console_interloc_exists = f\"interloc:all:{DEFAULT_INTERLOC_ID}\" in ctx.enum(\"interloc:all\")\n # push Node if you got a greeting\n if input_value.strip() in get_phrase_list(\"greeting\") and not console_interloc_exists:\n # set up scientio\n sess: Session = ravestate_ontology.get_session()\n onto: Ontology = ravestate_ontology.get_ontology()\n\n # create scientio Node of type Person\n query = Node(metatype=onto.get_type(\"Person\"))\n query.set_name(\"x\")\n console_node_list = sess.retrieve(query)\n if not console_node_list:\n console_node = sess.create(query)\n logger.info(f\"Created new Node in scientio session: {console_node}\")\n elif len(console_node_list) == 1:\n console_node = console_node_list[0]\n else:\n logger.error(f'Found multiple Persons with name {DEFAULT_INTERLOC_ID} in scientio session. Cannot push node to interloc:all!')\n continue\n\n # push interloc-Node\n push_console_interloc(console_node)\n\n # pop Node if you got a farewell\n elif input_value.strip() in get_phrase_list(\"farewells\") and console_interloc_exists:\n pop_console_interloc()\n\n\n @state(read=\"rawio:out\")\n def console_output(ctx):\n print(ctx[\"rawio:out\"])\n","sub_path":"modules/ravestate_conio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"586884783","text":"from selenium import webdriver\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nimport selenium.webdriver.support.expected_conditions as ec\r\nfrom selenium.webdriver.common.by import By\r\nimport openpyxl\r\nfrom openpyxl.styles import Font, Color, colors\r\nfrom datetime import date\r\nimport time\r\nimport os\r\nimport tkinter\r\nfrom tkinter import Tk\r\nfrom tkinter import Label\r\nfrom tkinter import LEFT\r\nfrom tkinter import RIGHT\r\nfrom tkinter import Entry\r\nfrom tkinter import Button\r\nimport tkinter as tk\r\n\r\ndef letsgo():\r\n job=e1.get()\r\n link=e2.get()\r\n def valuegrab(homovalue):\r\n nounits=\"\"\r\n for letter in homovalue:\r\n if letter !=\" \":\r\n nounits+=letter\r\n elif letter ==\" \":\r\n return float(nounits)\r\n #wait for page to load\r\n #blastresults=input('Paste link to BLAST results here:\\n')\r\n #print('Opening BLAST Results Link...')\r\n driver= webdriver.Chrome()\r\n wait= WebDriverWait(driver, 1000)\r\n driver.get(link)\r\n driver.maximize_window()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"alignments\"]/div[1]/table/tbody/tr[2]/td[1]')))\r\n #importing the primers from BLAST\r\n #done=input('Press \"Enter\" once Primer-BLASTing is complete.')\r\n print('Exracting Results...')\r\n primerlib={}\r\n primer1=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[1]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer1]=[]\r\n primer2=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[1]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer2]=[]\r\n primer3=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[2]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer3]=[]\r\n primer4=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[2]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer4]=[]\r\n primer5=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[3]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer5]=[]\r\n primer6=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[3]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer6]=[]\r\n primer7=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[4]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer7]=[]\r\n primer8=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[4]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer8]=[]\r\n primer9=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[5]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer9]=[]\r\n primer10=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[5]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer10]=[]\r\n primer11=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[6]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer11]=[]\r\n primer12=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[6]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer12]=[]\r\n primer13=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[7]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer13]=[]\r\n primer14=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[7]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer14]=[]\r\n primer15=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[8]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer15]=[]\r\n primer16=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[8]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer16]=[]\r\n primer17=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[9]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer17]=[]\r\n primer18=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[9]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer18]=[]\r\n primer19=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[10]/table/tbody/tr[2]/td[1]').text\r\n primerlib[primer19]=[]\r\n primer20=driver.find_element_by_xpath('//*[@id=\"alignments\"]/div[10]/table/tbody/tr[3]/td[1]').text\r\n primerlib[primer20]=[]\r\n\r\n print('Opening IDT Oligo Analyzer...')\r\n #Set up Web driver\r\n driver.get('https://www.idtdna.com/calc/analyzer/')\r\n driver.maximize_window()\r\n #wait.until(ec.presence_of_element_located((By.XPATH,'//*[@id=\"modal-holiday\"]/div/div/div[3]/a')))\r\n #dumbbutton=driver.find_element_by_xpath('//*[@id=\"modal-holiday\"]/div/div/div[3]/a')\r\n #dumbbutton.click()\r\n #wait.until(ec.visibility_of_element_located((By.ID,'UserName')))\r\n #sign into IDT\r\n username=driver.find_element_by_id('UserName')\r\n username.send_keys(e3.get())\r\n password=driver.find_element_by_id('Password')\r\n password.send_keys(e4.get())\r\n login=driver.find_element_by_id('login-button')\r\n login.click()\r\n #wait for the page to load\r\n wait= WebDriverWait(driver, 1000)\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"textarea-sequence\"]')))\r\n #grab the Text Box\r\n oligoinput=driver.find_element_by_xpath('//*[@id=\"textarea-sequence\"]')\r\n #Collect Hairpin, Homodimer, and Heterodimer Values for Each Primer\r\n print('Analyzing Primers...')\r\n x=0\r\n primerlist=[]\r\n for item in primerlib:\r\n primerlist.append(item)\r\n for primer in primerlib:\r\n #clear the textbox\r\n driver.find_element_by_xpath('//*[@id=\"textarea-sequence\"]').clear()\r\n #enter the Primer Seq\r\n oligoinput.send_keys(primer)\r\n #Melt Temp Analyze\r\n Tm=driver.find_element_by_id('analyze-button')\r\n Tm.click()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[1]/div[3]/div/div/table/tbody/tr[5]/td[2]/span')))\r\n melted=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[1]/div[3]/div/div/table/tbody/tr[5]/td[2]/span').text\r\n melted=valuegrab(melted)\r\n primerlib[primer].append(melted)\r\n #Hairpin Analyze\r\n checkhp=driver.find_element_by_id('hairpin-button')\r\n checkhp.click()\r\n #wait for it to load the data\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[2]/div[7]/div/div/table/tbody/tr[2]/td[3]')))\r\n #grab hairpin value\r\n hairpinvalue=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[2]/div[7]/div/div/table/tbody/tr[2]/td[3]').text\r\n hairpinvalue=float(hairpinvalue)\r\n primerlib[primer].append(hairpinvalue)\r\n #Homodimer Analyze\r\n checkhod=driver.find_element_by_xpath('//*[@id=\"rmenu\"]/div/div[6]/button')\r\n checkhod.click()\r\n #Grab Homodimer Value\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[3]/div[4]/span[1]')))\r\n homodimervalue=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[3]/div[4]/span[1]').text\r\n homodimervalue=valuegrab(homodimervalue)\r\n #Add Homodimer Value to Library\r\n primerlib[primer].append(homodimervalue)\r\n #Check out the Heterodimer Values\r\n if x%2 ==0:\r\n heterobutton=driver.find_element_by_xpath('//*[@id=\"rmenu\"]/div/div[8]/button')\r\n heterobutton.click()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[4]/div[3]/div[4]/div/div/textarea')))\r\n heterotext=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[4]/div[3]/div[4]/div/div/textarea')\r\n heterotext.send_keys(primerlist[x+1])\r\n heterobutton=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[4]/div[3]/div[5]/div/div/button[2]')\r\n heterobutton.click()\r\n wait.until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"OAResults\"]/div/div[4]/div[6]/span[1]')))\r\n heterovalue=driver.find_element_by_xpath('//*[@id=\"OAResults\"]/div/div[4]/div[6]/span[1]').text\r\n heterovalue=valuegrab(heterovalue)\r\n primerlib[primer].append(heterovalue)\r\n x+=1\r\n today=date.today()\r\n print('Evaluating Primers...')\r\n #check if they're actually solid Primers\r\n x=0\r\n for primer in primerlib:\r\n gold=0\r\n if primerlib[primer][1] >=-1:\r\n gold+=1\r\n if primerlib[primer][2] >-4:\r\n gold+=1\r\n if x%2==0:\r\n if primerlib[primer][3] >-4:\r\n gold +=1\r\n primerlib[primer].append(gold)\r\n x+=1\r\n\r\n goldlist=[]\r\n for primer in primerlib:\r\n goldlist.append(primerlib[primer][-1])\r\n\r\n hitlist=[]\r\n x=0\r\n for value in range(0,20):\r\n if x%2==0:\r\n combo=goldlist[value]+goldlist[value+1]\r\n hitlist.append(combo)\r\n x+=1\r\n\r\n best=max(hitlist)\r\n bestlistpos=[]\r\n y=0\r\n for value in hitlist:\r\n if value == best:\r\n bestlistpos.append(y)\r\n y+=1\r\n\r\n print(\"Exporting Results...\")\r\n #Open the Workbook\r\n wb=openpyxl.Workbook()\r\n sheet1=wb.active\r\n #Make the Template for the Exported Excel Sheet\r\n sheet1.cell(column=1, row=1).value=str(today)+\" \"+job+\" QuickPrime Results\"\r\n sheet1.cell(column=1, row=2).value=\"Sequences:\"\r\n sheet1.cell(column=2, row=2).value=\"Melting Temp:\"\r\n sheet1.cell(column=3, row=2).value=\"Hairpin Delta G:\"\r\n sheet1.cell(column=4, row=2).value=\"Homo-dimer Delta G:\"\r\n sheet1.cell(column=5, row=2).value=\"Hetero-dimer Delta G:\"\r\n #Export the Data from the dictionary\r\n x=0\r\n for row in range(3,23):\r\n seqrow=sheet1.cell(column=1, row=row)\r\n seqrow.value=primerlist[x]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n temprow=sheet1.cell(column=2, row=row)\r\n libkey=primerlist[x]\r\n temprow.value=primerlib[libkey][0]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n hairrow=sheet1.cell(column=3, row=row)\r\n libkey=primerlist[x]\r\n hairrow.value=primerlib[libkey][1]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n selfrow=sheet1.cell(column=4, row=row)\r\n libkey=primerlist[x]\r\n selfrow.value=primerlib[libkey][2]\r\n x+=1\r\n x=0\r\n for row in range(3,23):\r\n if x%2==0:\r\n hetrow=sheet1.cell(column=5, row=row)\r\n libkey=primerlist[x]\r\n hetrow.value=primerlib[libkey][3]\r\n x+=1\r\n for pos in bestlistpos:\r\n pos=(pos*2)+3\r\n sheet1.cell(column=1, row=pos).font=Font(color=colors.BLUE)\r\n sheet1.cell(column=1, row=pos+1).font=Font(color=colors.BLUE)\r\n\r\n #Save the Workbook\r\n print(\"Saving Excel Sheet...\")\r\n today=date.today()\r\n filename=str(today)+\" \"+job+' QuickPrime Results.xlsx'\r\n wb.save('D:/QuickPrimeFiles/OutFiles/'+filename)\r\n os.startfile('D:/QuickPrimeFiles/OutFiles'+filename)\r\n print('Done!')\r\n\r\n\r\n\r\n#GUI Stuff\r\nmaster = tk.Tk()\r\ntk.Label(master, text=\"Enter Job Title:\").grid(row=0)\r\ntk.Label(master, text=\"Enter Blast Results Link:\").grid(row=1)\r\ntk.Label(master, text=\"IDT Username:\").grid(row=2)\r\ntk.Label(master, text=\"IDT Password:\").grid(row=3)\r\n\r\ne1 = tk.Entry(master)\r\ne2 = tk.Entry(master)\r\ne3 = tk.Entry(master)\r\ne4 = tk.Entry(master)\r\n\r\ne1.grid(row=0, column=1)\r\ne2.grid(row=1, column=1)\r\ne3.grid(row=2, column=1)\r\ne4.grid(row=3, column=1)\r\n\r\ntk.Button(master, text='Quit', command=master.quit).grid(row=5, column=0, sticky=tk.W, pady=10, padx=10)\r\ntk.Button(master, text='Begin QuickPrime', command=letsgo).grid(row=5, column=1, sticky=tk.W, pady=10, padx=10)\r\n\r\n\r\ntk.mainloop()\r\n","sub_path":"GUIQuickPrime.py","file_name":"GUIQuickPrime.py","file_ext":"py","file_size_in_byte":11312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"294142447","text":"import pytest\nimport os\nimport sys\nimport json\n\nfrom ipwb import indexer\n\nfrom . import testUtil as ipwbTest\n\n\ndef isValidSURT(surt):\n return True # The surt library does not yet have a way to check this\n\n\ndef isValidDatetime(dt):\n return len(dt) == 14 and dt.isdigit()\n\n\ndef isValidJSON(jsonIn):\n try:\n j = json.loads(json.dumps(jsonIn))\n except ValueError:\n return False\n return True\n\n\ndef checkCDXJFields(cdxjEntry):\n (surt, dt, json) = cdxjEntry.split(' ', 2)\n validSURT = isValidSURT(surt)\n validDT = isValidDatetime(dt)\n validJSON = isValidJSON(json)\n\n return validSURT and validDT and validJSON\n\n\ndef checkIPWBJSONFieldPresesence(jsonStr):\n keys = json.loads(jsonStr)\n return 'locator' in keys and 'mime_type' in keys and 'status_code' in keys\n\n\n@pytest.mark.ipfsDaemonInteraction\ndef test_push():\n \"\"\"\n Read WARC, manipulate content to ensure uniqueness, push to IPFS\n WARC should result in two CDXJ entries with three space-limited fields\n each: surt URI, datetime, JSON\n JSON should contain AT LEAST locator, mime_type, and status fields\n \"\"\"\n newWARCPath = ipwbTest.createUniqueWARC()\n # use ipwb indexer to push\n cdxjList = indexer.indexFileAt(newWARCPath, quiet=True)\n cdxj = '\\n'.join(cdxjList)\n\n firstEntry = cdxj.split('\\n')[0]\n firstNonMetadataEntry = ''\n for line in cdxj.split('\\n'):\n if line[0] != '!':\n firstNonMetadataEntry = line\n break\n\n assert checkCDXJFields(firstNonMetadataEntry)\n firstEntryLastField = firstNonMetadataEntry.split(' ', 2)[2]\n assert checkIPWBJSONFieldPresesence(firstEntryLastField)\n","sub_path":"tests/test_randomized_add.py","file_name":"test_randomized_add.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"604098202","text":"\n# coding: utf-8\n# some parametes should be adjusted in here\nclass Constants(object):\n def __init__(self, n_vocab):\n self.Lr = 0.0001\n self.Embedding_size = 250\n self.Content_represent = 250\n self.Style_represent = 500\n self.Ey_filters = [1, 2, 3, 4, 5]\n self.Ey_num_filters = 100\n self.D_filters = [2, 3, 4, 5, 6]\n self.D_num_filters = 100\n self.Ds_filters = [1, 2, 3, 4]\n self.Ds_num_filters = 100\n self.Hidden_size = 248\n self.N_vocab = n_vocab\n self.Temper = 0.0001\n self.Max_len = 40\n self.Min_len = 6 # 6 is the max window size of the filters\n \n\n","sub_path":"ipynb/Constant.py","file_name":"Constant.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"84908051","text":"import logging\nimport argparse\nimport socket\nfrom src.tcp_protobuf_pb2 import Request, Response\nimport threading\n\n\nclass Client():\n def __init__(self, server_ip, server_tcp_port):\n self.server_ip = server_ip\n self.server_tcp_port = server_tcp_port\n\n def recv(self, sock):\n message = b''\n while True:\n data = sock.recv(1024)\n if not data:\n break\n message += data\n # result是从client发来的信息\n result = Response()\n result.ParseFromString(message)\n return result\n\n def send(self, sock, strbits):\n # 先发送一个数据长度过去,来确定结束位置\n sock.sendall((str(len(strbits)) + '\\n\\n').encode('utf-8'))\n # 再发送消息的具体内容\n sock.sendall(strbits)\n\n # 每个操作都单独建立一次TCP连接\n def get(self, key):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server_ip, self.server_tcp_port))\n # 通信格式使用proto文件内的定义,里面有3个成员变量:operation,key,value\n message_bit = Request(operation='get', key=key).SerializeToString()\n self.send(sock, message_bit)\n result = self.recv(sock)\n print(result)\n sock.close()\n\n def put(self, key, value):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server_ip, self.server_tcp_port))\n message_bit = Request(operation='put', key=key, value=value).SerializeToString()\n self.send(sock, message_bit)\n result = self.recv(sock)\n print(result)\n sock.close()\n\n def delete(self, key):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server_ip, self.server_tcp_port))\n message_bit = Request(operation='delete', key=key).SerializeToString()\n self.send(sock, message_bit)\n result = self.recv(sock)\n print(result)\n sock.close()\n\n\ndef client_start(args):\n print('Please press Ctrl+C to exit the client.')\n client = Client(args.server_ip, args.server_tcp_port)\n try:\n while True:\n # 输入一个操作\n operation = input('input operation: ')\n if operation == 'put':\n key = input('input key: ')\n value = input('input value: ')\n client.put(key, value)\n elif operation == 'get':\n key = input('input key: ')\n client.get(key)\n elif operation == 'delete':\n key = input('input key: ')\n client.delete(key)\n else:\n print('operation error!')\n except KeyboardInterrupt:\n print('Close client!')\n\n\nif __name__ == '__main__':\n # 参数设置\n parser = argparse.ArgumentParser(description='Client.')\n parser.add_argument('--server-ip', type=str, default='localhost', metavar='N', help='The ip of dictionary server.')\n parser.add_argument('--server-tcp-port', type=int, default=8000, metavar='N', help='The ip of dictionary server.')\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO)\n client_start(args)","sub_path":"18340013_���琮昊_Project/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"259203749","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n # https://www.bilibili.com/video/BV1VK411A7Gm\n # (n logn) time and O(1) memory\n def merge(self, h1, h2):\n dummy = tail = ListNode()\n while h1 and h2:\n if h1.val < h2.val:\n tail.next = h1\n h1 = h1.next\n else:\n tail.next = h2\n h2 = h2.next\n tail = tail.next\n tail.next = h1 or h2\n return dummy.next\n\n # fast and slow pointer\n def sortList(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n pre = None\n slow = head\n fast = head\n while fast and fast.next:\n pre = slow\n slow = slow.next\n fast = fast.next.next\n pre.next = None\n return self.merge(self.sortList(head), self.sortList(slow))\n\n\nif __name__ == '__main__':\n l1 = ListNode(-1, ListNode(5, ListNode(3, ListNode(4, ListNode(0)))))\n\n solution = Solution()\n newList = solution.sortList(l1)\n print(\"\\nAfter:\")\n tmp_list = newList\n while tmp_list != None:\n print(str(tmp_list.val), end='->')\n tmp_list = tmp_list.next\n l1 = ListNode(4, ListNode(2, ListNode(1, ListNode(3))))\n\n newList = solution.sortList(l1)\n print(\"\\nAfter:\")\n tmp_list = newList\n while tmp_list != None:\n print(str(tmp_list.val), end='->')\n tmp_list = tmp_list.next","sub_path":"LinkedList/148-SortList.py","file_name":"148-SortList.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"516942763","text":"from graph import Graph\nfrom util import Stack\n\ndef earliest_ancestor(ancestors, starting_node):\n # set up empty graph\n g = Graph()\n # Create list for possible ancestors\n possible_ancestors = []\n\n # add 11 vertices to the graph\n for n in range(1, 12):\n g.add_vertex(n)\n\n # Add the edges\n for ancestor in ancestors:\n # ancestor at position 0 for first vertex \n # and position 1 for the connecting vertex\n g.add_edge(ancestor[0], ancestor[1])\n # Create a list of starting vertices to iterate through\n starting_nodes = [10, 2, 4, 11]\n # iterate through each starting node\n for id in starting_nodes:\n # create tempty stack\n path = Stack()\n # add vertex id to path stack\n path.push([id])\n # create empty set for visited vertices\n visited = set()\n\n # while path stack isn't empty\n while path.size() > 0:\n # pop vertex path from path stack\n vert = path.pop()\n # check if last vertex in path is in visited\n if vert[-1] not in visited:\n # check if vertex is the vertex we want\n if vert[-1] == starting_node:\n # append vertex into list of possible ancestors\n possible_ancestors.append(vert[0])\n else:\n # add vertex to visited list\n visited.add(vert[-1])\n # iterate through vertex's neighbors\n for n in g.get_neighbors(vert[-1]):\n # create list for second path\n second_path = list(vert)\n # append neighbor to second path\n second_path.append(n)\n # push second path into path stack\n path.push(second_path)\n # return -1 if ancestor is equal to starting node (no ancestor)\n if min(possible_ancestors) == starting_node:\n return -1\n # return 10 if 10 is in possible_ancestors list\n elif 10 in possible_ancestors:\n return 10\n # return the minimum of the possible ancestors list if list isn't empty\n elif len(possible_ancestors) > 0:\n return min(possible_ancestors)\n # return -1 for any other scenario (ex; starting node not found in graph)\n else:\n return -1\n \nif __name__ == \"__main__\":\n test_ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\n print(earliest_ancestor(test_ancestors, 6))\n print(earliest_ancestor(test_ancestors, 10))\n print(earliest_ancestor(test_ancestors, 9))","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"34"}
+{"seq_id":"300845253","text":"from xml.etree import ElementTree as ET\n\nHGAP=10\nVHAP=20\nLAY_LEFT = 0\nLAY_RIGHT = 1\nLAY_ROOT = 2\n\nclass _node_parser:\n def parse_node(self, ctx, xml_root, xml_node, child, root, node):\n node.go_child(ctx, xml_root, xml_node, child, root);\n\n def parse_icon(self, ctx, xml_root, xml_node, child, root, node):\n node.icon = child.attrib['BUILTIN']\n \n def parse(self, ns, xml_root, xml_node, child, root, node):\n f = getattr(self, \"parse_\"+child.tag, None)\n if f:\n f(ns, xml_root, xml_node, child, root, node)\n\nclass _node_base:\n def __init__(self):\n self.childs = [];\n\n def go(self, ctx, xml_root, xml_node, root):\n self.id = xml_node.attrib['ID']\n self.text = xml_node.attrib['TEXT']\n ctx.root_stack.append(self);\n for child in xml_node.getchildren():\n ctx.parser.parse(ctx, xml_root, xml_node, child, root, self)\n\n def back(self, ctx, xml_root, xml_node, root):\n pass\n\nclass _node_root(_node_base):\n def __init__(self):\n self.childs = []\n self.left_childs = []\n self.right_childs = []\n\n def go_child(self, ctx, xml_root, xml_node, child, root):\n n = None;\n if child.attrib['POSITION'] == \"left\":\n n = _node_left()\n self.left_childs.append(n)\n else:\n n = _node_right()\n self.right_childs.append(n)\n ctx.ns.push(xml_node, child, self, n, True);\n self.childs.append(n)\n \n def back(self, ctx, xml_root, xml_node, root):\n ctx.html_string += '