diff --git "a/5280.jsonl" "b/5280.jsonl" new file mode 100644--- /dev/null +++ "b/5280.jsonl" @@ -0,0 +1,760 @@ +{"seq_id":"6467003","text":"#!/usr/bin/env python\n#coding:utf-8\n\n#: File : yield.py\n#: Version : 1.0\n#: Author : Frank Liu\n#: Created : 2018/3/8\n#: License : \n#: Purpose : 整个流程无锁,由一个线程执行,produce和consumer协作完成任务,故称为“协程”,而非线程的抢占式多任务\n\n\nimport time\n\ndef consumer(): #consumer函数是一个generator(生成器),把一个consumer传入produce,首先调用c.next()\n r = ''\n while True:\n n = yield r #consumer通过yield拿到消息,处理,又通过yield把结果传回\n print('test n:%s' % n)\n if not n:\n return\n print('[CONSUMER] Consuminggggggg %s...' % n)\n time.sleep(1)\n r = '200 ok'\n\ndef produce(c): #生产者\n c.next() #启动生成器\n n = 0 #produce拿到consumer处理的结果,继续生产下一条消息\n while n < 5:\n n = n+1\n print('[PRODUCER]Producing %s...' %n)\n r = c.send(n) # 一旦生产了东西,通过c.send(n)切换到consumer执行\n print('[PRODUCER]Consumerrrrrrrrr return:%s' %r)\n c.close() # produce决定不生产了,通过c.close()关闭consumer,整个过程结束\n\nif __name__ == '__main__':\n c = consumer()\n produce(c)\n\n","sub_path":"func/yield.py","file_name":"yield.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299387098","text":"# Word Jumble\n#\n# The computer picks a random word and then \"jumbles\" it\n# The player has to guess the original word\n\nimport random\n\n# create a sequence of words to choose from\nWORDS = (\"python\", \"jumble\", \"easy\", \"difficult\", \"answer\", \"xylophone\")\n\n# pick one word randomly from the sequence\nword = random.choice(WORDS)\n\n# hint - first and last letter of a word\nFIRST_LETTER = word[0]\nLAST_LETTER = word[len(word) - 1]\n\n# create a variable to use later to see if the guess is correct\ncorrect = word\n\n# starting points\npoints = 10\n\n# create a jumbled version of the word\njumble = \"\"\nwhile word:\n position = random.randrange(len(word))\n jumble += word[position]\n word = word[:position] + word[position + 1:]\n\n# start the game\nprint(\n \"\"\"\n Welcome to Word Jumble!\n\n Unscramble the letters to make a word.\n (Press the enter key at the prompt to quit.)\n \"\"\"\n)\n\nprint(\"The jumble is:\", jumble)\n\nprint(\"\\nPoints:\", points)\nguess = input(\"\\nYour guess: \")\n\nwhile guess != correct and guess != \"\":\n print(\"\\nSorry that's not it. You can use a hint.\")\n print(\"If you do, you will loose a point.\")\n print(\"You start with 10 points.\")\n\n hint = input(\"\\nDo you need a hint (Yes/No): \")\n hint.lower()\n\n # give the player a hint if wanted; reduce points\n if hint == \"yes\" or hint == \"y\":\n print(\"\\nFirst letter:\", FIRST_LETTER)\n print(\"\\nLast letter:\", LAST_LETTER)\n points -= 1\n\n # end the game if the points reach 0\n if points == 1:\n print(\"\\nUsing one more hint will end your game.\")\n elif points == 0:\n print(\"\\nSorry you are out of points.\")\n break\n\n print(\"\\nPoints:\", points)\n guess = input(\"\\nYour guess: \")\n\nif guess == correct:\n print(\"\\n\\aThat's it! You guessed it!\")\n\nprint(\"\\nThanks for playing. Your score:\", points)\n\ninput(\"\\n\\nPress the enter key to exit.\")\n","sub_path":"chapter_4 - for, strings, tuples/word_jumble.py","file_name":"word_jumble.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"1657381","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'base.views.index', name='biweb_index'),\n url(r'^base/', include('base.urls')),\n url(r'^accounting/', include('accounting.urls')),\n\n]\n\n\n\n","sub_path":"biweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"423513625","text":"import os\n\n'''\n------------------------------------------------------------------------\nFirm Size by Productivity\n------------------------------------------------------------------------\n'''\n\nfrom firm7_parameters import *\nfrom firm7_functions import *\nfrom firm7_zspace import *\nfrom firm7_kspace import *\nfrom firm7_geneq import *\nfrom firm7_eqwage import *\n\n# Plot investment rule as a function of productivity\n# plt.figure()\nfig, ax = plt.subplots()\nind = np.argmin(np.absolute(kvec - kstar)) # find where kstar is in grid\nax.plot(z, optI[:, ind - dens * 5] / kvec[ind - dens * 5], 'k', label='k = ' +\n str(kvec[ind - dens * 5]))\nax.plot(z, optI[:, ind] / kvec[ind], 'k:', label='k = ' + str(kvec[ind]))\nax.plot(z, optI[:, ind + dens * 5] / kvec[ind + dens * 5], 'k--', label='k = '\n + str(kvec[ind + dens * 5]))\n# The frame is matplotlib.patches.Rectangle instance surrounding the legend.\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\n# Set the fontsize\nfor label in legend.get_texts():\n label.set_fontsize('large')\nfor label in legend.get_lines():\n label.set_linewidth(1.5) # the legend line width\nplt.xlabel('Productivity')\nplt.ylabel('Optimal Investment Rate')\nplt.title('Policy Function, Investment - stochastic firm w/ adjustment ' +\n 'costs')\noutput_path = os.path.join(output_dir, 'invest_z_firm7')\nplt.savefig(output_path, dpi=200, bbox_inches=\"tight\")\n# plt.show()\nplt.close()\n\n\n# Plot the stationary distribution\nfig, ax = plt.subplots()\nax.plot(np.log(z), Gamma.sum(axis=1))\nplt.xlabel('Productivity')\nplt.ylabel('Density')\nplt.title('Stationary Distribution over Productivity')\noutput_path = os.path.join(output_dir, 'SD_z_firm7')\nplt.savefig(output_path, dpi=200, bbox_inches=\"tight\")\n# plt.show()\nplt.close()\n","sub_path":"Code/firm7_sep/firm7_invprodplot.py","file_name":"firm7_invprodplot.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"627660001","text":"import scrapy\n\n\nclass SinanewSpider(scrapy.Spider):\n name = 'sinanew'\n allowed_domains = [\"baidu.com\"]\n start_urls = ['https://www.bbc.com/news/blogs/the_papers']\n\n def parse(self, response):\n print(response)\n\n hxs = Selector(response=response).xpath('//li')\n print(len(hxs))\n\n \n","sub_path":"scrapt/project/project/spiders/sinanew.py","file_name":"sinanew.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"307021367","text":"import os\n\nfrom flask import Flask, render_template, request, jsonify\nfrom . import db\n\n\n# create and configure the app\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_mapping(\n SECRET_KEY=\"dev\", DATABASE=os.path.join(app.instance_path, \"mensajeria.sqlite\")\n)\ntest_config = None\nif test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile(\"config.py\", silent=True)\nelse:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n# ensure the instance folder exists\ntry:\n os.makedirs(app.instance_path)\nexcept OSError:\n pass\n\n# a simple page that says hello\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef inicio():\n return render_template(\"index.html\")\n\n\n@app.route(\"/enviar\", methods=[\"GET\", \"POST\"])\ndef envio():\n if request.method == \"POST\":\n data = request.get_json(force=True, silent=True)\n sender = data[\"uEnviador\"]\n receiver = data[\"uRecibidor\"]\n message = data[\"uContenido\"]\n response = {\"status\": \"success\"}\n try:\n database = db.get_db()\n database.execute(\n \"INSERT INTO messages (sender, receiver, content) VALUES (?, ?, ?)\",\n (sender, receiver, message),\n )\n database.commit()\n except:\n raise\n\n response[\"status\"] = \"error\"\n return jsonify(response)\n\n else:\n return render_template(\"new_message.html\")\n\n\n@app.route(\"/leer\", methods=[\"GET\", \"POST\"])\ndef leer():\n if request.method == \"POST\":\n user = request.form[\"user_name\"]\n print(user)\n database = db.get_db()\n messages = database.execute(\n \"SELECT sender, content FROM messages WHERE receiver = ?\", (user,)\n ).fetchall()\n if messages:\n return render_template(\"inbox.html\", user=user, messages=messages)\n\n else:\n return render_template(\"inbox_select.html\", vacio=True)\n\n else:\n return render_template(\"inbox_select.html\", vacio=False)\n\n\ndb.init_app(app)\n","sub_path":"mensajeria/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99813794","text":"# Definition for singly-linked list with a random pointer.\n# class RandomListNode:\n# def __init__(self, x):\n# self.label = x\n# self.next = None\n# self.random = None\n\nclass Solution:\n # @param head, a RandomListNode\n # @return a RandomListNode\n def copyRandomList(self, head):\n if head == None:\n return None\n cur = head\n while cur != None:\n node = RandomListNode(cur.label)\n node.next = cur.next\n cur.next = node\n cur = node.next\n\n cur = head\n while cur != None:\n if cur.random != None:\n cur.next.random = cur.random.next\n cur = cur.next.next\n\n dummy = RandomListNode(-1)\n cur = head\n p = dummy\n while cur != None:\n p.next = cur.next\n p = p.next\n cur.next = cur.next.next\n cur = cur.next\n\n return dummy.next\n","sub_path":"leetans/cpList.py","file_name":"cpList.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"374484705","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pylinkirc/plugins/automode.py\n# Compiled at: 2020-04-11 03:31:40\n# Size of source mod 2**32: 14840 bytes\n__doc__ = '\\nautomode.py - Provide simple channel ACL management by giving prefix modes to users matching\\nhostmasks or exttargets.\\n'\nimport collections, string\nfrom pylinkirc import conf, structures, utils, world\nfrom pylinkirc.coremods import permissions\nfrom pylinkirc.log import log\nmydesc = 'The \\x02Automode\\x02 plugin provides simple channel ACL management by giving prefix modes to users matching hostmasks or exttargets.'\nmodebot = utils.register_service('automode', default_nick='Automode', desc=mydesc)\nreply = modebot.reply\nerror = modebot.error\ndbname = conf.get_database_name('automode')\ndatastore = structures.JSONDataStore('automode', dbname, default_db=(collections.defaultdict(dict)))\ndb = datastore.store\ndefault_permissions = {'$ircop': ['automode.manage.relay_owned', 'automode.sync.relay_owned',\n 'automode.list']}\n\ndef _join_db_channels(irc):\n \"\"\"\n Joins the Automode service client to channels on the current network in its DB.\n \"\"\"\n if not irc.connected.is_set():\n log.debug('(%s) _join_db_channels: aborting, network not ready yet', irc.name)\n return\n for entry in db:\n netname, channel = entry.split('#', 1)\n channel = '#' + channel\n if netname == irc.name:\n modebot.add_persistent_channel(irc, 'automode', channel)\n\n\ndef main(irc=None):\n \"\"\"Main function, called during plugin loading.\"\"\"\n datastore.load()\n permissions.add_default_permissions(default_permissions)\n if irc:\n for ircobj in world.networkobjects.values():\n _join_db_channels(ircobj)\n\n\ndef die(irc=None):\n \"\"\"Saves the Automode database and quit.\"\"\"\n datastore.die()\n permissions.remove_default_permissions(default_permissions)\n utils.unregister_service('automode')\n\n\ndef _check_automode_access(irc, uid, channel, command):\n \"\"\"Checks the caller's access to Automode.\"\"\"\n log.debug('(%s) Automode: checking access for %s/%s for %s capability on %s', irc.name, uid, irc.get_hostmask(uid), command, channel)\n baseperm = 'automode.%s' % command\n try:\n perms = [baseperm, baseperm + '.*', '%s.%s' % (baseperm, channel)]\n return permissions.check_permissions(irc, uid, perms)\n except utils.NotAuthorizedError:\n if not command.startswith('remote'):\n log.debug('(%s) Automode: falling back to automode.%s.relay_owned', irc.name, command)\n permissions.check_permissions(irc, uid, [baseperm + '.relay_owned'], also_show=perms)\n relay = world.plugins.get('relay')\n if relay is None:\n raise utils.NotAuthorizedError('You are not authorized to use Automode when Relay is disabled. You are missing one of the following permissions: %s or %s.%s' % (\n baseperm, baseperm, channel))\n elif (\n irc.name, channel) not in relay.db:\n raise utils.NotAuthorizedError('The network you are on does not own the relay channel %s.' % channel)\n return True\n raise\n\n\ndef match(irc, channel, uids=None):\n \"\"\"\n Set modes on matching users. If uids is not given, check all users in the channel and give\n them modes as needed.\n \"\"\"\n if isinstance(channel, int) or str(channel).startswith(tuple(string.digits)):\n channel = '#' + str(channel)\n else:\n dbentry = db.get(irc.name + channel)\n if not irc.has_cap('has-irc-modes'):\n log.debug('(%s) automode: skipping match() because IRC modes are not supported on this protocol', irc.name)\n return\n if dbentry is None:\n return\n modebot_uid = modebot.uids.get(irc.name)\n outgoing_modes = []\n uids = uids or irc.channels[channel].users\n for mask, modes in dbentry.items():\n for uid in uids:\n if irc.match_host(mask, uid):\n outgoing_modes += [('+' + mode, uid) for mode in modes if mode in irc.prefixmodes]\n log.debug('(%s) automode: Filtered mode list of %s to %s (protocol:%s)', irc.name, modes, outgoing_modes, irc.protoname)\n\n if outgoing_modes:\n if modebot_uid not in irc.users:\n modebot_uid = irc.sid\n log.debug('(%s) automode: sending modes from modebot_uid %s', irc.name, modebot_uid)\n irc.mode(modebot_uid, channel, outgoing_modes)\n irc.call_hooks([modebot_uid, 'AUTOMODE_MODE',\n {'target':channel, \n 'modes':outgoing_modes, 'parse_as':'MODE'}])\n\n\ndef handle_endburst(irc, source, command, args):\n \"\"\"ENDBURST hook handler - used to join the Automode service to channels where it has entries.\"\"\"\n if source == irc.uplink:\n _join_db_channels(irc)\n\n\nutils.add_hook(handle_endburst, 'ENDBURST')\n\ndef handle_join(irc, source, command, args):\n \"\"\"\n Automode JOIN listener. This sets modes accordingly if the person joining matches a mask in the\n ACL.\n \"\"\"\n channel = irc.to_lower(args['channel'])\n match(irc, channel, args['users'])\n\n\nutils.add_hook(handle_join, 'JOIN')\nutils.add_hook(handle_join, 'PYLINK_RELAY_JOIN')\nutils.add_hook(handle_join, 'PYLINK_SERVICE_JOIN')\n\ndef handle_services_login(irc, source, command, args):\n \"\"\"\n Handles services login change, to trigger Automode matching.\n \"\"\"\n for channel in irc.users[source].channels:\n match(irc, channel, [source])\n\n\nutils.add_hook(handle_services_login, 'CLIENT_SERVICES_LOGIN')\nutils.add_hook(handle_services_login, 'PYLINK_RELAY_SERVICES_LOGIN')\n\ndef _get_channel_pair(irc, source, chanpair, perm=None):\n \"\"\"\n Fetches the network and channel given a channel pair, also optionally checking the caller's permissions.\n \"\"\"\n log.debug('(%s) Looking up chanpair %s', irc.name, chanpair)\n if '#' not in chanpair:\n if chanpair.startswith(tuple(string.digits)):\n chanpair = '#' + chanpair\n else:\n try:\n network, channel = chanpair.split('#', 1)\n except ValueError:\n raise ValueError('Invalid channel pair %r' % chanpair)\n\n channel = '#' + channel\n channel = irc.to_lower(channel)\n if network:\n ircobj = world.networkobjects.get(network)\n else:\n ircobj = irc\n if not ircobj:\n raise ValueError('Unknown network %s' % network)\n if perm is not None:\n if ircobj.name != irc.name:\n perm = 'remote' + perm\n _check_automode_access(irc, source, channel, perm)\n return (\n ircobj, channel)\n\n\ndef setacc(irc, source, args):\n \"\"\" \n\n Assigns the given prefix mode characters to the given mask for the channel given. Extended targets are supported for masks - use this to your advantage!\n\n Channel pairs are also supported (for operations on remote channels), using the form \"network#channel\".\n\n Examples:\n\n \\x02SETACC #channel *!*@localhost ohv\n\n \\x02SETACC #channel $account v\n\n \\x02SETACC othernet#channel $ircop:Network?Administrator qo\n\n \\x02SETACC #staffchan $channel:#mainchan:op o\n \"\"\"\n if not irc.has_cap('has-irc-modes'):\n error(irc, 'IRC style modes are not supported on this protocol.')\n return\n try:\n chanpair, mask, modes = args\n except ValueError:\n error(irc, 'Invalid arguments given. Needs 3: channel, mask, mode list.')\n return\n else:\n ircobj, channel = _get_channel_pair(irc, source, chanpair, perm='manage')\n dbentry = db[(ircobj.name + channel)]\n modes = modes.lstrip('+')\n dbentry[mask] = modes\n log.info('(%s) %s set modes +%s for %s on %s', ircobj.name, irc.get_hostmask(source), modes, mask, channel)\n reply(irc, 'Done. \\x02%s\\x02 now has modes \\x02+%s\\x02 in \\x02%s\\x02.' % (mask, modes, channel))\n modebot.add_persistent_channel(ircobj, 'automode', channel)\n\n\nmodebot.add_cmd(setacc, aliases=('setaccess', 'set'), featured=True)\n\ndef delacc(irc, source, args):\n \"\"\" \n\n Removes the Automode entry for the given mask or range string, if they exist.\n\n Range strings are indices (entry numbers) or ranges of them joined together with commas: e.g.\n \"1\", \"2-10\", \"1,3,5-8\". Entry numbers are shown by LISTACC.\n \"\"\"\n try:\n chanpair, mask = args\n except ValueError:\n error(irc, 'Invalid arguments given. Needs 2: channel, mask')\n return\n else:\n ircobj, channel = _get_channel_pair(irc, source, chanpair, perm='manage')\n dbentry = db.get(ircobj.name + channel)\n if dbentry is None:\n error(irc, 'No Automode access entries exist for \\x02%s\\x02.' % channel)\n return\n if mask in dbentry:\n del dbentry[mask]\n log.info('(%s) %s removed modes for %s on %s', ircobj.name, irc.get_hostmask(source), mask, channel)\n reply(irc, 'Done. Removed the Automode access entry for \\x02%s\\x02 in \\x02%s\\x02.' % (mask, channel))\n else:\n try:\n new_keys = utils.remove_range(mask, sorted(dbentry.keys()))\n except ValueError:\n error(irc, 'No Automode access entry for \\x02%s\\x02 exists in \\x02%s\\x02.' % (mask, channel))\n return\n else:\n removed = []\n source_host = irc.get_hostmask(source)\n for mask_entry in dbentry.copy():\n if mask_entry not in new_keys:\n del dbentry[mask_entry]\n log.info('(%s) %s removed modes for %s on %s', ircobj.name, source_host, mask_entry, channel)\n removed.append(mask_entry)\n\n reply(irc, 'Done. Removed \\x02%d\\x02 entries on \\x02%s\\x02: %s' % (len(removed), channel, ', '.join(removed)))\n if not dbentry:\n log.debug('Automode: purging empty channel pair %s/%s', ircobj.name, channel)\n del db[ircobj.name + channel]\n modebot.remove_persistent_channel(ircobj, 'automode', channel)\n\n\nmodebot.add_cmd(delacc, aliases=('delaccess', 'del'), featured=True)\n\ndef listacc(irc, source, args):\n \"\"\"\n\n Lists all Automode entries for the given channel.\"\"\"\n try:\n chanpair = args[0]\n except IndexError:\n error(irc, 'Invalid arguments given. Needs 1: channel.')\n return\n else:\n ircobj, channel = _get_channel_pair(irc, source, chanpair, perm='list')\n dbentry = db.get(ircobj.name + channel)\n if not dbentry:\n error(irc, 'No Automode access entries exist for \\x02%s\\x02.' % channel)\n return\n reply(irc, ('Showing Automode entries for \\x02%s\\x02:' % channel), private=True)\n for entrynum, entry in enumerate((sorted(dbentry.items())), start=1):\n mask, modes = entry\n reply(irc, ('[%s] \\x02%s\\x02 has modes +\\x02%s\\x02' % (entrynum, mask, modes)), private=True)\n\n reply(irc, 'End of Automode entries list.', private=True)\n\n\nmodebot.add_cmd(listacc, featured=True, aliases=('listaccess', ))\n\ndef save(irc, source, args):\n \"\"\"takes no arguments.\n\n Saves the Automode database to disk.\"\"\"\n permissions.check_permissions(irc, source, ['automode.savedb'])\n datastore.save()\n reply(irc, 'Done.')\n\n\nmodebot.add_cmd(save)\n\ndef syncacc(irc, source, args):\n \"\"\"\n\n Syncs Automode access lists to the channel.\n \"\"\"\n try:\n chanpair = args[0]\n except IndexError:\n error(irc, 'Invalid arguments given. Needs 1: channel.')\n return\n else:\n ircobj, channel = _get_channel_pair(irc, source, chanpair, perm='sync')\n log.info('(%s) %s synced modes on %s', ircobj.name, irc.get_hostmask(source), channel)\n match(ircobj, channel)\n reply(irc, 'Done.')\n\n\nmodebot.add_cmd(syncacc, featured=True, aliases=('sync', 'syncaccess'))\n\ndef clearacc(irc, source, args):\n \"\"\"\n\n Removes all Automode entries for the given channel.\n \"\"\"\n try:\n chanpair = args[0]\n except IndexError:\n error(irc, 'Invalid arguments given. Needs 1: channel.')\n return\n else:\n ircobj, channel = _get_channel_pair(irc, source, chanpair, perm='clear')\n if db.get(ircobj.name + channel):\n del db[ircobj.name + channel]\n log.info('(%s) %s cleared modes on %s', ircobj.name, irc.get_hostmask(source), channel)\n reply(irc, 'Done. Removed all Automode access entries for \\x02%s\\x02.' % channel)\n modebot.remove_persistent_channel(ircobj, 'automode', channel)\n else:\n error(irc, 'No Automode access entries exist for \\x02%s\\x02.' % channel)\n\n\nmodebot.add_cmd(clearacc, aliases=('clearaccess', 'clear'), featured=True)","sub_path":"pycfiles/pylinkmobile-0.3.0.tar/automode.cpython-36.py","file_name":"automode.cpython-36.py","file_ext":"py","file_size_in_byte":13042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"345300166","text":"#!/usr/bin/python\n# Filename: pr48.py\nimport time;\n\nti = time.time();\nnum = 0;\n\nfor i in range(1, 1001):\n num += i**i;\n\nprint(str(num)[-10:]);\nprint(\"run time: \", time.time() - ti);\n\n# one liner!\n# str(sum([i**i for i in range(1,1001)]))[-10:]\n","sub_path":"pr48.py","file_name":"pr48.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80917675","text":"# coding=utf-8\n# Coded by Aleksandr Rodionov\n# rexarrior@yandex.ru\n\n\n# other imports---------------------------------------------------------\nimport os.path\nfrom datetime import date\n\nfrom dateutil import parser\n# License: Apache Software License, BSD License (Dual License)\n\n# imports Core modules--------------------------------------------------\nimport final_analysis\nimport models\nimport rough_analysis\nimport visualizer\nimport converters\nfrom web_crawler import ksrf\n# methods---------------------------------------------------------------\n\n\n# internal methods------------------------------------------------------\nDECISIONS_FOLDER_NAME = 'Decision files'\nJSON_HEADERS_FILENAME = 'DecisionHeaders.json'\nPICKLE_HEADERS_FILENAME = 'DecisionHeaders.pickle'\nPATH_TO_JSON_HEADERS = os.path.join(DECISIONS_FOLDER_NAME,\n JSON_HEADERS_FILENAME)\nPATH_TO_PICKLE_HEADERS = os.path.join(DECISIONS_FOLDER_NAME,\n PICKLE_HEADERS_FILENAME)\nPATH_TO_JSON_GRAPH = 'graph.json'\n\nMY_DEBUG = False\n\ndef collect_headers(pathToFileForSave, pagesNum=None):\n headersOld = ksrf.get_decision_headers(pagesNum)\n headersNew = converters.convert_to_class_format(headersOld, models.DocumentHeader)\n converters.save_pickle(headersNew, pathToFileForSave)\n return headersNew\n\n\ndef check_text_location_for_headers(headers, folder):\n '''\n Find files of the documents of the given headers\n and add path to file in Header.text_location if file was found\n '''\n for key in headers:\n # generate a possible path according to previously established rules\n pathToTextLocation = ksrf.get_possible_text_location(\n key, folder, ext='txt')\n # if path is exist put it to header\n if (os.path.exists(pathToTextLocation)):\n headers[key].text_location = pathToTextLocation\n\n\ndef download_texts_for_headers(headers, folder=DECISIONS_FOLDER_NAME):\n for key in headers:\n if (isinstance(headers[key], models.Header) and\n (headers[key].text_location is None or\n not os.path.exists(headers[key].text_location))):\n oldFormatHeader = headers[key].convert_to_dict()\n ksrf.download_decision_texts({key: oldFormatHeader}, folder)\n\n\ndef load_graph(pathToGraph=PATH_TO_JSON_GRAPH):\n '''\n Load the stored earlier graph from the given filename,\n unpack it with JSON and return as\n [[nodes], [edges: [from, to, weight]]\n '''\n return converters.load_json(pathToGraph)\n\n\n# TO DO: Rewrite function after rewriting final_analysis module\ndef load_and_visualize(pathTograph=PATH_TO_JSON_GRAPH):\n '''\n Load the stored earlier graph from the given filename and\n Visualize it with Visualizer module.\n '''\n graph = load_graph(pathTograph)\n visualizer.visualize_link_graph(graph, 20, 1, (20, 20))\n\n\n# api methods-----------------------------------------------------------\n\n\ndef process_period(\n firstDateOfDocsForProcessing=None, lastDateOfDocsForProcessing=None,\n docTypesForProcessing=None,\n firstDateForNodes=None, lastDateForNodes=None,\n nodesIndegreeRange=None, nodesOutdegreeRange=None, nodesTypes=None,\n includeIsolatedNodes=True,\n firstDateFrom=None, lastDateFrom=None, docTypesFrom=None,\n firstDateTo=None, lastDateTo=None, docTypesTo=None,\n weightsRange=None,\n graphOutputFilePath=PATH_TO_JSON_GRAPH,\n showPicture=True, isNeedReloadHeaders=False):\n '''\n Process decisions from the date specified as firstDate to\n the date specified as lastDate.\n Write a graph of result of the processing and, if it was specified,\n draw graph and show it to user.\n '''\n if isinstance(firstDateOfDocsForProcessing, str):\n firstDateOfDocsForProcessing = parser.parse(\n firstDateOfDocsForProcessing, dayfirst=True).date()\n if isinstance(lastDateOfDocsForProcessing, str):\n lastDateOfDocsForProcessing = parser.parse(\n lastDateOfDocsForProcessing, dayfirst=True).date()\n if (firstDateOfDocsForProcessing is not None and\n lastDateOfDocsForProcessing is not None and\n firstDateOfDocsForProcessing > lastDateOfDocsForProcessing):\n raise ValueError(\"date error: The first date is later\"\n \"than the last date.\")\n\n if isinstance(firstDateForNodes, str):\n firstDateForNodes = parser.parse(\n firstDateForNodes, dayfirst=True).date()\n if isinstance(lastDateForNodes, str):\n lastDateForNodes = parser.parse(\n lastDateForNodes, dayfirst=True).date()\n if (firstDateForNodes is not None and\n lastDateForNodes is not None and\n firstDateForNodes > lastDateForNodes):\n raise ValueError(\"date error: The first date is later\"\n \"than the last date.\")\n\n if isinstance(firstDateFrom, str):\n firstDateFrom = parser.parse(\n firstDateFrom, dayfirst=True).date()\n if isinstance(lastDateFrom, str):\n lastDateFrom = parser.parse(\n lastDateFrom, dayfirst=True).date()\n if (firstDateFrom is not None and\n lastDateFrom is not None and\n firstDateFrom > lastDateFrom):\n raise ValueError(\"date error: The first date is later than the last date.\")\n\n if isinstance(firstDateTo, str):\n firstDateTo = parser.parse(\n firstDateTo, dayfirst=True).date()\n if isinstance(lastDateTo, str):\n lastDateTo = parser.parse(\n lastDateTo, dayfirst=True).date()\n if (firstDateTo is not None and\n lastDateTo is not None and\n firstDateTo > lastDateTo):\n raise ValueError(\"date error: The first date is later than the last date.\")\n\n decisionsHeaders = {}\n if (isNeedReloadHeaders or not os.path.exists(PATH_TO_PICKLE_HEADERS)):\n num = 3 # stub, del after web_crawler updating\n decisionsHeaders = collect_headers(PATH_TO_PICKLE_HEADERS, num)\n else:\n decisionsHeaders = converters.load_pickle(PATH_TO_PICKLE_HEADERS)\n\n hFilter = models.HeadersFilter(\n docTypesForProcessing,\n firstDateOfDocsForProcessing, lastDateOfDocsForProcessing)\n usingHeaders = hFilter.get_filtered_headers(decisionsHeaders)\n\n check_text_location_for_headers(usingHeaders, DECISIONS_FOLDER_NAME)\n\n\n download_texts_for_headers(usingHeaders, DECISIONS_FOLDER_NAME)\n\n decisionsHeaders.update(usingHeaders)\n\n converters.save_pickle(decisionsHeaders, PATH_TO_PICKLE_HEADERS)\n\n roughLinksDict = \\\n rough_analysis.get_rough_links_for_multiple_docs(usingHeaders)\n if (rough_analysis.PATH_NONE_VALUE_KEY in roughLinksDict or\n rough_analysis.PATH_NOT_EXIST_KEY in roughLinksDict):\n raise ValueError('Some headers have no text')\n links = final_analysis.get_clean_links(roughLinksDict,\n decisionsHeaders)[0]\n \n if MY_DEBUG:\n converters.save_pickle(links, 'allCleanLinks.pickle')\n linkGraph = final_analysis.get_link_graph(links)\n if MY_DEBUG:\n converters.save_pickle(linkGraph, 'linkGraph.pickle')\n nFilter = models.GraphNodesFilter(\n nodesTypes, firstDateForNodes, lastDateForNodes, nodesIndegreeRange,\n nodesOutdegreeRange)\n hFromFilter = models.HeadersFilter(\n docTypesFrom,\n firstDateFrom, lastDateFrom)\n hToFilter = models.HeadersFilter(\n docTypesTo,\n firstDateTo, lastDateTo)\n eFilter = models.GraphEdgesFilter(hFromFilter, hToFilter, weightsRange)\n subgraph = linkGraph.get_subgraph(nFilter, eFilter, includeIsolatedNodes)\n if MY_DEBUG:\n converters.save_pickle(subgraph, 'subgraph.pickle')\n linkGraphLists = (subgraph.get_nodes_as_IDs_list(),\n subgraph.get_edges_as_list_of_tuples())\n\n converters.save_json(linkGraphLists, graphOutputFilePath)\n if showPicture:\n visualizer.visualize_link_graph(linkGraphLists, 20, 1, (40, 40))\n# end of ProcessPeriod--------------------------------------------------\n\n\ndef start_process_with(\n decisionID, depth,\n firstDateForNodes=None, lastDateForNodes=None, nodesIndegreeRange=None,\n nodesOutdegreeRange=None, nodesTypes=None, includeIsolatedNodes=True,\n firstDateFrom=None, lastDateFrom=None, docTypesFrom=None,\n firstDateTo=None, lastDateTo=None, docTypesTo=None,\n weightsRange=None,\n graphOutputFilePath=PATH_TO_JSON_GRAPH,\n showPicture=True, isNeedReloadHeaders=False,\n visualizerParameters=(20, 1, (40, 40))):\n '''\n Start processing decisions from the decision which uid was given and repeat\n this behavior recursively for given depth.\n '''\n if (depth < 0):\n raise \"argument error: depth of the recursion must be large than 0.\"\n\n if isNeedReloadHeaders or not os.path.exists(PATH_TO_PICKLE_HEADERS):\n num = 3 # stub, del after web_crawler updating\n headers = collect_headers(PATH_TO_PICKLE_HEADERS, num)\n else:\n headers = converters.load_pickle(PATH_TO_PICKLE_HEADERS)\n if (decisionID not in headers):\n raise ValueError(\"Unknown uid\")\n\n if isinstance(firstDateForNodes, str):\n firstDateForNodes = parser.parse(\n firstDateForNodes, dayfirst=True).date()\n if isinstance(lastDateForNodes, str):\n lastDateForNodes = parser.parse(\n lastDateForNodes, dayfirst=True).date()\n if (firstDateForNodes is not None and\n lastDateForNodes is not None and\n firstDateForNodes > lastDateForNodes):\n raise ValueError(\"date error: The first date is later than the last date.\")\n\n if isinstance(firstDateFrom, str):\n firstDateFrom = parser.parse(\n firstDateFrom, dayfirst=True).date()\n if isinstance(lastDateFrom, str):\n lastDateFrom = parser.parse(\n lastDateFrom, dayfirst=True).date()\n if (firstDateFrom is not None and\n lastDateFrom is not None and\n firstDateFrom > lastDateFrom):\n raise ValueError(\"date error: The first date is later than the last date.\")\n\n if isinstance(firstDateTo, str):\n firstDateTo = parser.parse(\n firstDateTo, dayfirst=True).date()\n if isinstance(lastDateTo, str):\n lastDateTo = parser.parse(\n lastDateTo, dayfirst=True).date()\n if (firstDateTo is not None and\n lastDateTo is not None and\n firstDateTo > lastDateTo):\n raise ValueError(\"date error: The first date is later than the last date.\")\n\n check_text_location_for_headers(headers, DECISIONS_FOLDER_NAME)\n download_texts_for_headers(headers, DECISIONS_FOLDER_NAME)\n\n toProcess = {decisionID: headers[decisionID]}\n processed = {}\n allLinks = {headers[decisionID]: []}\n while depth > 0 and len(toProcess) > 0:\n depth -= 1\n roughLinksDict = rough_analysis.get_rough_links_for_multiple_docs(\n toProcess)\n if (rough_analysis.PATH_NONE_VALUE_KEY in roughLinksDict or\n rough_analysis.PATH_NOT_EXIST_KEY) in roughLinksDict:\n raise ValueError('Some headers have not text')\n cleanLinks = final_analysis.get_clean_links(roughLinksDict, headers)[0]\n allLinks.update(cleanLinks)\n processed.update(toProcess)\n toProcess = {}\n for decID in cleanLinks:\n for cl in cleanLinks[decID]:\n docID = cl.header_to.doc_id\n if (docID not in processed):\n toProcess[docID] = headers[docID]\n\n linkGraph = final_analysis.get_link_graph(allLinks)\n if MY_DEBUG:\n converters.save_pickle(processed, 'processWithHeaders.pickle')\n converters.save_pickle(processed, 'processWithGraph.pickle')\n nFilter = models.GraphNodesFilter(\n nodesTypes, firstDateForNodes, lastDateForNodes, nodesIndegreeRange,\n nodesOutdegreeRange)\n hFromFilter = models.HeadersFilter(\n docTypesFrom,\n firstDateFrom, lastDateFrom)\n hToFilter = models.HeadersFilter(\n docTypesTo,\n firstDateTo, lastDateTo)\n eFilter = models.GraphEdgesFilter(hFromFilter, hToFilter, weightsRange)\n subgraph = linkGraph.get_subgraph(nFilter, eFilter, includeIsolatedNodes)\n if MY_DEBUG:\n converters.save_pickle(subgraph, 'processWithSubgraph.pickle')\n linkGraphLists = (subgraph.get_nodes_as_IDs_list(),\n subgraph.get_edges_as_list_of_tuples())\n\n converters.save_json(linkGraphLists, graphOutputFilePath)\n if (showPicture):\n visualizer.visualize_link_graph(linkGraphLists,\n visualizerParameters[0],\n visualizerParameters[1],\n visualizerParameters[2])\n# end of start_process_with---------------------------------------------\n\nif __name__ == \"__main__\":\n import time\n start_time = time.time()\n # process_period(\"18.06.1980\", \"18.07.2020\", showPicture=False,\n # isNeedReloadHeaders=False, includeIsolatedNodes=True)\n\n # process_period(\n # firstDateOfDocsForProcessing='18.03.2013',\n # lastDateOfDocsForProcessing='14.08.2018',\n # docTypesForProcessing={'КСРФ/О', 'КСРФ/П'},\n # firstDateForNodes='18.03.2014', lastDateForNodes='14.08.2017',\n # nodesIndegreeRange=(0, 25), nodesOutdegreeRange=(0, 25),\n # nodesTypes={'КСРФ/О', 'КСРФ/П'},\n # includeIsolatedNodes=False,\n # firstDateFrom='18.03.2016', lastDateFrom='14.08.2016',\n # docTypesFrom={'КСРФ/О', 'КСРФ/П'},\n # firstDateTo='18.03.2015', lastDateTo='14.08.2015',\n # docTypesTo={'КСРФ/О', 'КСРФ/П'},\n # weightsRange=(1, 5),\n # graphOutputFilePath=PATH_TO_JSON_GRAPH,\n # showPicture=True, isNeedReloadHeaders=False)\n \n # start_process_with(decisionID='КСРФ/1-П/2015', depth=3)\n # load_and_visualize()\n start_process_with(\n decisionID='КСРФ/1-П/2015', depth=10,\n firstDateForNodes='18.03.2014', lastDateForNodes='14.08.2018',\n nodesIndegreeRange=(0, 25), nodesOutdegreeRange=(0, 25),\n nodesTypes={'КСРФ/О', 'КСРФ/П'},\n includeIsolatedNodes=False,\n firstDateFrom='18.03.2011', lastDateFrom='14.08.2019',\n docTypesFrom={'КСРФ/О', 'КСРФ/П'},\n firstDateTo='18.03.2011', lastDateTo='14.08.2018',\n docTypesTo={'КСРФ/О', 'КСРФ/П'},\n weightsRange=(1, 5),\n graphOutputFilePath=PATH_TO_JSON_GRAPH,\n showPicture=True, isNeedReloadHeaders=False)\n\n print(f\"Headers collection spent {time.time()-start_time} seconds.\")\n # get_only_unique_headers()\n input('press any key...')","sub_path":"link_analysis/api_module.py","file_name":"api_module.py","file_ext":"py","file_size_in_byte":14733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"127336531","text":"'''문제링크 https://programmers.co.kr/learn/courses/30/lessons/42840'''\n\ndef solution(answers):\n L = len(answers)\n pattern1 = [1, 2, 3, 4, 5]*(L//5 + 1)\n pattern2 = [2, 1, 2, 3, 2, 4, 2, 5]*(L//8 + 1)\n pattern3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]*(L//10 + 1)\n score = [0]*4\n for i in range(L):\n if answers[i] == pattern1[i]: score[1] += 1\n if answers[i] == pattern2[i]: score[2] += 1\n if answers[i] == pattern3[i]: score[3] += 1\n \n MAX = max(score)\n return sorted([ i for i in range(1, 4) if score[i] == MAX ])\n","sub_path":"trivial/수포자.py","file_name":"수포자.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"377686520","text":"# -*- coding: iso8859-15 -*-\n#\n# test_FileMonitor.py\n# LogNotifier\n#\n# Created by Riko on 23/10/06.\n# Copyright (c) 2006 __MyCompanyName__. All rights reserved.\n#\n\nimport sys, os\nfrom os.path import join, dirname \nlocation = join(dirname(sys.argv[0]), '..')\n\nsys.path.insert(0, location)\n\nimport unittest\nimport FileMonitor\n\nimport time\n\ntry:\n import cStringIO as StringIO\nexcept ImportError:\n import StringIO \n \nfrom StringIO import StringIO\n\nfrom FileTestHelper import FileTestHelper\n\nclass FileMonitorTest(unittest.TestCase, FileTestHelper):\n \n def setUp(self):\n self.fname = self.get_name()\n self.counter = 1\n for i in xrange(3): self.log(self.fname)\n \n def tearDown(self):\n # os.system('/usr/local/bin/mate -w %s' % self.fname)\n os.unlink(self.fname)\n\n def testIsFileLike(self):\n self.assert_(FileMonitor.is_filelike(file(sys.argv[0])))\n self.assert_(FileMonitor.is_filelike(StringIO()))\n\n def testEmpty(self):\n fm = FileMonitor.FileMonitor(self.fname)\n self.failIf(fm.is_modified())\n self.assertEqual(fm.readline(), '')\n \n def testOneLine(self):\n fm = FileMonitor.FileMonitor(self.fname)\n self.log(self.fname)\n self.assert_(fm.is_modified())\n self.assertEqual(fm.readline(), \"Message no 4 file %s\\n\" % self.fname)\n self.failIf(fm.is_modified())\n \n def testTwoLines(self):\n fm = FileMonitor.FileMonitor(self.fname)\n self.failIf(fm.is_modified())\n self.log(self.fname)\n self.log(self.fname)\n self.assert_(fm.is_modified())\n self.assertEqual(fm.readline(), \"Message no 4 file %s\\n\" % self.fname)\n self.assertEqual(fm.readline(), \"Message no 5 file %s\\n\" % self.fname)\n self.failIf(fm.is_modified())\n \n def testInterleavedLines(self):\n fm = FileMonitor.FileMonitor(self.fname)\n self.failIf(fm.is_modified())\n self.log(self.fname)\n self.assert_(fm.is_modified())\n self.assertEqual(fm.readline(), \"Message no 4 file %s\\n\" % self.fname)\n self.failIf(fm.is_modified())\n self.log(self.fname)\n self.assert_(fm.is_modified())\n self.assertEqual(fm.readline(), \"Message no 5 file %s\\n\" % self.fname)\n self.failIf(fm.is_modified())\n \n def testEncodings(self):\n fm = FileMonitor.FileMonitor(self.fname)\n self.log(self.fname, \"Hello, è char\\n\")\n self.assert_(fm.is_modified())\n self.assertEqual(fm.readline(), \"Hello, è char\\n\")\n self.log(self.fname, \"Hello ©\\n\")\n self.assert_(fm.is_modified())\n line = fm.readline()\n self.assertEqual(line, \"Hello ©\\n\")\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"trunk/LogNotifier/tests/FileMonitorTest.py","file_name":"FileMonitorTest.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236740803","text":"import numpy as np\n\nfrom stylo.color import RGB8\nfrom stylo.domain import get_real_domain\nfrom stylo.image import Image\nfrom stylo.image.image import Drawable, render_drawable\n\n\nclass LayeredImage(Image):\n def __init__(self, background=None, scale=2, colorspace=None):\n self.scale = scale\n\n if background is None:\n background = \"ffffff\"\n\n if colorspace is None:\n colorspace = RGB8\n\n self.background = colorspace.parse(background)\n self.colorspace = colorspace\n self.layers = []\n\n def add_layer(self, shape, color, domain=None):\n\n # Make sure everyone uses the same colorspace.\n color.colorspace = self.colorspace\n\n self.layers.append(Drawable(domain, shape, color))\n\n def _render(self, width, height):\n\n domain = get_real_domain(width, height, self.scale)\n dimensions = (height, width, len(self.background))\n image_data = np.full(dimensions, self.background, dtype=np.uint8)\n\n for drawable in self.layers:\n\n if drawable.domain is None:\n drawable.domain = domain\n\n image_data = render_drawable(drawable, image_data)\n\n return image_data\n","sub_path":"stylo/image/layered.py","file_name":"layered.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309357151","text":"import pytest\nimport peewee\nfrom datetime import datetime\nimport model\nfrom model import User, Location, UserDialogState, Trip\nfrom playhouse.test_utils import test_database\nfrom functools import wraps\nfrom unittest.mock import MagicMock, patch\n\nfrom bot import text_handler, add_fav_location, start_trip, cancel_cmd, \\\n trip_started_cmd, finish_trip_cmd, list_locations, remove_location\nfrom conftest import MockLocation\nfrom utils import prettify_seconds\n\n\ndef test_add_fav_location_success(db, bot, update):\n\n add_fav_location(bot, update)\n assert User.get().state.dialog_name == 'AddFavLocation_start'\n\n update.set_location(MockLocation(100, 200))\n text_handler(bot, update)\n assert User.get().state.dialog_name == 'AddFavLocation_finish'\n\n update.set_text('Home')\n text_handler(bot, update)\n\n user = User.get()\n assert user.idle\n assert len(user.favorite_locations) == 1\n assert user.favorite_locations[0].name == 'Home'\n assert user.favorite_locations[0].lon == 100\n assert user.favorite_locations[0].lat == 200\n\n\ndef test_add_fav_location_errors(db, bot, update):\n\n add_fav_location(bot, update)\n update.set_text('Not a location -> error')\n text_handler(bot, update)\n assert User.get().state.dialog_name == 'AddFavLocation_start'\n\n update.set_text('Home')\n text_handler(bot, update)\n\n update.set_location(MockLocation())\n text_handler(bot, update)\n assert User.get().state.dialog_name == 'AddFavLocation_finish'\n\n\ndef test_add_fav_location_canceled(db, bot, update):\n\n add_fav_location(bot, update)\n update.set_location(MockLocation())\n text_handler(bot, update)\n\n cancel_cmd(bot, update)\n assert User.get().favorite_locations == []\n\n\ndef test_start_trip_success(db, bot, update, monkeypatch):\n monkeypatch.setattr('dialogs.take_map_snapshot', MagicMock())\n\n home_loc = Location.create(lon=1, lat=2, name='Home', user=User.get())\n work_loc = Location.create(lon=5, lat=6, name='Work', user=User.get())\n\n start_trip(bot, update)\n assert User.get().state.dialog_name == 'StartTrip_start'\n\n update.set_text('Home')\n text_handler(bot, update)\n assert User.get().state.dialog_name == 'StartTrip_select_dest'\n\n update.set_text('Work')\n text_handler(bot, update)\n assert User.get().state.dialog_name == 'StartTrip_wait_readiness'\n\n update.set_text('/rock')\n trip_started_cmd(bot, update)\n assert User.get().state.dialog_name == 'StartTrip_wait_finish'\n\n update.set_text('/finish')\n finish_trip_cmd(bot, update)\n\n user = User.get()\n trip = Trip.get()\n\n assert user.idle\n assert len(user.trips) == 1\n assert trip.start_location == home_loc\n assert trip.finish_location == work_loc\n assert trip.duration > 0\n assert trip.is_done\n\n\n@pytest.mark.parametrize('msg', ['Japan', '3', '0', '-1'])\ndef test_start_trip_location_inv_choices(db, bot, update, msg):\n home_loc = Location.create(lon=1, lat=2, name='Home', user=User.get())\n work_loc = Location.create(lon=5, lat=6, name='Work', user=User.get())\n\n start_trip(bot, update)\n update.set_text(msg)\n text_handler(bot, update)\n assert User.get().state.dialog_name == 'StartTrip_start'\n\n\ndef test_start_trip_location_number_choices(db, bot, update):\n home_loc = Location.create(lon=1, lat=2, name='Home', user=User.get())\n work_loc = Location.create(lon=5, lat=6, name='Work', user=User.get())\n\n start_trip(bot, update)\n update.set_text('1')\n text_handler(bot, update)\n assert Trip.get().start_location == home_loc\n\n cancel_cmd(bot, update)\n start_trip(bot, update)\n\n update.set_text('2')\n text_handler(bot, update)\n assert Trip.get().start_location == work_loc\n\n\ndef test_list_locations(db, bot, update):\n list_locations(bot, update)\n expected_err_msg = update.message.reply_text.call_args[0][0]\n assert expected_err_msg\n\n Location.create(lon=1, lat=2, name='Chill', user=User.get())\n Location.create(lon=1, lat=2, name='Out', user=User.get())\n list_locations(bot, update)\n expected_response = update.message.reply_text.call_args[0][0]\n assert 'Chill' in expected_response\n assert 'Out' in expected_response\n\n\ndef test_remove_location(db, bot, update):\n remove_location(bot, update)\n expected_err_msg = update.message.reply_text.call_args[0][0]\n assert expected_err_msg\n\n Location.create(lon=1, lat=1, name='Home', user=User.get())\n Location.create(lon=1, lat=1, name='Work', user=User.get())\n remove_location(bot, update)\n update.set_text('1')\n text_handler(bot, update)\n assert len(User.get().favorite_locations) == 1\n Location.get().name == 'Work'\n\n\ndef test_prettify_seconds():\n assert prettify_seconds(1) == '1s'\n assert prettify_seconds(0.5) == '0s'\n assert prettify_seconds(60) == '1m'\n assert prettify_seconds(3600) == '1h'\n assert prettify_seconds(70) == '1m 10s'\n assert prettify_seconds(3670) == '1h 1m 10s'\n assert prettify_seconds(3660) == '1h 1m'\n","sub_path":"test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124923589","text":"from detect_blinks import *\nfrom angle import *\nimport cv2\nimport numpy as np\nfrom kuangshi_api import *\n\ndata_path = \"./\"\n\nimg_path = os.path.join(data_path, \"Test\", \"new_crop\")\nlandmark_path = os.path.join(data_path, \"Test\", \"new_landmark\")\n\nimg_paths = os.listdir(img_path)\n\nleft_eye_status_list = []\nright_eye_status_list = []\nyaw_angle_list = []\nrow_angle_list = []\nroll_angle_list = []\n# initialize the frame counters and the total number of blinks\nCOUNTER = [0 for i in range(3)]\nTOTAL = 0\nLEFT_EYE = 0\nRIGHT_EYE = 0\nID_KEY = [\"yaw_angle\", \"pitch_angle\", \"roll_angle\"]\nfor idx in range(len(img_paths)):\n i = img_paths[idx]\n current_img_path = os.path.join(img_path, i)\n current_landmark_path = os.path.join(landmark_path, i) + \".txt\"\n if not os.path.exists(current_img_path):\n continue\n\n if not os.path.exists(current_landmark_path):\n continue\n\n img = cv2.imread(current_img_path)\n\n landmark_idx = 0\n\n with open(current_landmark_path, 'r') as f:\n lines = f.readlines()\n shape = np.zeros((len(lines), 2))\n for poit in lines:\n x, y = poit.split(' ')\n shape[landmark_idx] = (float(x), float(y))\n landmark_idx += 1\n left_eye_status, right_eye_status = get_eye_status(shape)\n\n angle = get_angle(shape)\n yaw_angle, row_angle, roll_angle = angle[0], angle[1], angle[2]\n\n left_eye_status_list.append(left_eye_status)\n right_eye_status_list.append(right_eye_status)\n row_angle_list.append(row_angle)\n yaw_angle_list.append(yaw_angle)\n roll_angle_list.append(roll_angle)\n # api_angles,eye_open = get_api_angle(current_img_path)\n # if len(api_angles) > 0:\n # TOTAL += 1\n # for i in range(3):\n # if abs(api_angles[0][ID_KEY[i]] - angle[i]) <= 5:\n # COUNTER[i] += 1\n # if left_eye_status == eye_open[0][0]:\n # LEFT_EYE += 1\n # if right_eye_status == eye_open[0][1]:\n # RIGHT_EYE += 1\n# print(\"yaw: {0}\".format(COUNTER[0] / TOTAL))\n# print(\"pitch: {0}\".format(COUNTER[1] / TOTAL))\n# print(\"roll: {0}\".format(COUNTER[2] / TOTAL))\n# print(\"left eye: {0}\".format(LEFT_EYE / TOTAL))\n# print(\"right eye: {0}\".format(RIGHT_EYE / TOTAL))\nprint(left_eye_status_list)\nprint(right_eye_status_list)\nprint(row_angle_list)\nprint(roll_angle_list)\nprint(yaw_angle_list)\n","sub_path":"output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"139604480","text":"import datetime\nimport aniso8601\nimport logging\nimport requests\nfrom werkzeug.exceptions import abort\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\nlogger.addHandler(logging.StreamHandler())\n\n\ndef get_title(search_url, title_number):\n title_url = \"%s/%s/%s\" % (\n search_url,\n 'auth/titles',\n title_number)\n logger.debug(\"Requesting title url : %s\" % title_url)\n try:\n response = requests.get(title_url)\n response.raise_for_status()\n return response.json()\n except requests.exceptions.HTTPError as e:\n logger.error(\"HTTP Error %s\", e)\n abort(response.status_code)\n except requests.exceptions.ConnectionError as e:\n logger.error(\"Error %s\", e)\n abort(500)\n\n\ndef apply_change(current_title, change):\n logger.info('Dealing with: %s' % type(change))\n old_name = change['proprietor_full_name']\n proprietors = current_title['proprietorship']['fields']['proprietors']\n for x in proprietors:\n if x['name']['full_name'] == old_name:\n x['name']['full_name'] = change['proprietor_new_full_name']\n return current_title\n return None\n\ndef apply_edition_date(title, submitted_at):\n dt = aniso8601.parse_datetime(submitted_at)\n\n title['edition_date'] = dt.strftime('%Y-%m-%d')\n title['last_application'] = submitted_at\n\n return title\n","sub_path":"application/modify_titles.py","file_name":"modify_titles.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382865073","text":"#!/usr/bin/python3.6\n\n## import packages\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\"\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nimport sys\nsys.path.append(\"../Orca/io\")\nfrom hdf5datasetwriter import HDF5DatasetWriter\n\nimport tensorflow as tf\nconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\n\nfrom keras.applications import ResNet50\nfrom keras.applications import imagenet_utils\nfrom keras.preprocessing.image import img_to_array, load_img\nfrom keras.utils import plot_model\nfrom keras.models import Model\nfrom keras.layers import Input, GlobalAveragePooling2D\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom imutils import paths\nimport progressbar\nimport argparse\nimport random\nimport numpy as np\n\n\n\"\"\"\n- given dogs_vs_cats dataset is small & imagenet includes dogs & cats, use NN as\n feature extractor to extract features;\n- use ResNet50, which has output dim = 2048 at the last average pooling layer\n\"\"\"\n## construct arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-d\", \"--dataset\", required=True, \\\n help=\"path to input dataset folder\")\nparser.add_argument(\"-o\", \"--output\", required=True, \\\n help=\"path to output HDF5 file\")\n\nparser.add_argument(\"-p\", \"--add_gloAvgPool\", type=bool, default=True, \\\n help=\"define if need to add the final GlobalAveragePooling2D layer for FE\")\nparser.add_argument(\"-m\", \"--model\", type=str, default=\"resnet50\", \\\n help=\"which model to extract features\")\nparser.add_argument(\"-b\", \"--batch_size\", type=int, default=128, \\\n help=\"batch size of images\")\nparser.add_argument(\"-bf\", \"--buffer_size\", type=int, default=1000, \\\n help=\"size of feature extraction buffer\")\nargs = vars(parser.parse_args())\n\n# cache the var\nbs = args[\"batch_size\"]\ngloAvgPool = args[\"add_gloAvgPool\"] # MUST be str!!!\n\n\naccDim = 1 # dimension of extracted features w.r.t each image\n\ninputShapeBanks = {\n \"resnet50\" : (224, 224),\n }\noutputShapeBanks = {\n \"resnet50\" : (7, 7, 2048),\n }\n\n\n\"\"\"\n- grab & shuffle the list of iamges paths; extract labels from paths\n\"\"\"\nprint(\"[INFO] loading images...\") \nimagePaths = list(paths.list_images(args[\"dataset\"]))\nrandom.shuffle(imagePaths)\n\n# extract class labels & encode into numbers\nlabels = [p.split(os.path.sep)[-1].split(\".\")[0] for p in imagePaths]\nle = LabelEncoder()\nlabels = le.fit_transform(labels)\nprint(\"[INFO] encode %d categories in total\" % len(set(labels)))\n\n\n\"\"\"\n- load ResNet50 without top\n- noting that the \"top\" dropped from resnet includes the GlobalAveragePooling2D layer\n- perform model surgery to add the GlobalAveragePooling2D\n\"\"\"\nif gloAvgPool == True:\n print(\"[INFO] loading ResNet50 & add GlobalAveragePooling2D...\")\n x = Input(shape=(224, 224, 3))\n baseModel = ResNet50(weights=\"imagenet\", include_top=False, input_tensor=x) \n # add layer\n headModel = baseModel.output\n headModel = GlobalAveragePooling2D()(headModel) \n # merge model\n model = Model(inputs=baseModel.input, outputs=headModel)\n \n # model architecture path\n arch_path = \"./output/resnet50_architecture_with_GloAvgPool.png\" \n\n # output dim\n accDim = 2048\nelse:\n print(\"[INFO] loading ResNet50 without GlobalAveragePooling2D...\")\n model = ResNet50(weights=\"imagenet\", include_top=False)\n arch_path = \"./output/resnet50_architecture_without_GloAvgPool.png\" \n\n # output dim\n accDim = 7 * 7 * 2048\nprint(\"[INFO] noting that, accDim = \", accDim) \n\nprint(\"[INFO] plot model architecture...\")\nplot_model(model, to_file=arch_path, show_shapes=True)\n\n\n\"\"\"\n- initiate a HDF5 dataset writer\n\"\"\"\n# store the class label names in the dataset\n# (batch, 2048) = output volume shape of last GlobalAveragePooling2D layer\ndataset = HDF5DatasetWriter(args[\"output\"], \n (len(imagePaths), accDim),\n dataKey=\"features\", \n bufSize=args[\"buffer_size\"],\n )\ndataset.storeClassLabels(le.classes_)\n\n\n\"\"\"\n- loop over imagePaths to load in images in batch;\n\"\"\"\n# create a progress bar\nwidgets = [\"Extracting Features :\", progressbar.Percentage(), \" \", \\\n progressbar.Bar(), \" \", progressbar.ETA()]\npbar = progressbar.ProgressBar(maxval=len(imagePaths), widgets=widgets).start()\n\n# loop over imagePaths\nfor i in np.arange(0, len(imagePaths), bs):\n # get imagePaths & labels of current batch\n batchPaths = imagePaths[i : i + bs]\n batchLabels = labels[i : i + bs]\n batchImages = []\n\n # load images of current batch\n for j, imgPath in enumerate(batchPaths):\n # ResNet is trained on 224 * 224 images\n image = load_img(imgPath, target_size=inputShapeBanks[args[\"model\"]])\n image = img_to_array(image)\n\n # add batch dim\n image = np.expand_dims(image, axis=0)\n # Special for ImageNet dataset => substracting mean RGB pixel intensity\n image = imagenet_utils.preprocess_input(image)\n \n batchImages.append(image)\n pass\n\n # stack vertically the images in the batch\n batchImages = np.vstack(batchImages)\n features = model.predict(batchImages, batch_size=bs) # scores before softmax\n\n # flatten features of each image w.r.t to output volume shape\n features = features.reshape((features.shape[0], accDim))\n \n # add the feature & labels into HDF5 dataset\n dataset.add(features, batchLabels)\n\n # update pbar\n pbar.update(i)\n pass\n\n# close the dataset when finished\ndataset.close()\npbar.finish()\n\n\n","sub_path":"extract_features_resnet50.py","file_name":"extract_features_resnet50.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202335893","text":"import pandas as pd;\nimport sklearn.linear_model as lm;\nimport matplotlib.pyplot as plt;\nfrom sklearn.model_selection import train_test_split\n\n\n# Reading data from file\ndf = pd.read_csv(\"carprices.csv\");\n# print(df);\n\n# data is already munged and is ready to be used\n\n# Data visualisation \nplt.scatter(df[\"Mileage\"],df[\"Sell Price($)\"],marker = \"*\", color = \"g\");\nplt.show(block = False);\nplt.pause(2);\nplt.close();\n\nplt.scatter(df[\"Age(yrs)\"],df[\"Sell Price($)\"],marker = \"o\", color = \"b\");\nplt.show(block = False);\nplt.pause(2);\nplt.close();\n\n# Model training\nmodel = lm.LinearRegression();\nX = df[[\"Mileage\",\"Age(yrs)\"]]\ny = df[\"Sell Price($)\"];\n\n# creating test and train datasets\nX_train, X_test , y_train, y_test = train_test_split(X,y,test_size=0.2);\nmodel.fit(X_train,y_train);\n\n# making predictions on the test data\nprediction = model.predict(X_test);\n# print(y_test);\n# print(prediction);\n\n# checking accuracy of my model\nacc = model.score(X_test,y_test);\nprint(acc);","sub_path":"ML/6_train_test_split/my_solution.py","file_name":"my_solution.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"228404696","text":"import os\nimport time\nimport argparse\nimport numpy as np\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torch.backends.cudnn as cudnn\n# from tensorboardX import SummaryWriter\n\nimport model\nimport evaluate\nimport data_utils\nfrom loss import loss_function\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', \n\ttype = str,\n\thelp = 'dataset used for training, options: amazon_book, yelp, adressa',\n\tdefault = 'amazon_book')\nparser.add_argument('--model', \n\ttype = str,\n\thelp = 'model used for training. options: GMF, NeuMF-end',\n\tdefault = 'GMF')\nparser.add_argument('--drop_rate', \n\ttype = float,\n\thelp = 'drop rate',\n\tdefault = 0.2)\nparser.add_argument('--num_gradual', \n\ttype = int, \n\tdefault = 30000,\n\thelp='how many epochs to linearly increase drop_rate')\nparser.add_argument('--exponent', \n\ttype = float, \n\tdefault = 1, \n\thelp='exponent of the drop rate {0.5, 1, 2}')\nparser.add_argument(\"--lr\", \n\ttype=float, \n\tdefault=0.001, \n\thelp=\"learning rate\")\nparser.add_argument(\"--dropout\", \n\ttype=float,\n\tdefault=0.0, \n\thelp=\"dropout rate\")\nparser.add_argument(\"--batch_size\", \n\ttype=int, \n\tdefault=1024, \n\thelp=\"batch size for training\")\nparser.add_argument(\"--epochs\", \n\ttype=int,\n\tdefault=10,\n\thelp=\"training epoches\")\nparser.add_argument(\"--eval_freq\", \n\ttype=int,\n\tdefault=2000,\n\thelp=\"the freq of eval\")\nparser.add_argument(\"--top_k\", \n\ttype=list, \n\tdefault=[50, 100],\n\thelp=\"compute metrics@top_k\")\nparser.add_argument(\"--factor_num\", \n\ttype=int,\n\tdefault=32, \n\thelp=\"predictive factors numbers in the model\")\nparser.add_argument(\"--num_layers\", \n\ttype=int,\n\tdefault=3, \n\thelp=\"number of layers in MLP model\")\nparser.add_argument(\"--num_ng\", \n\ttype=int,\n\tdefault=1, \n\thelp=\"sample negative items for training\")\nparser.add_argument(\"--out\", \n\tdefault=True,\n\thelp=\"save model or not\")\nparser.add_argument(\"--gpu\", \n\ttype=str,\n\tdefault=\"1\",\n\thelp=\"gpu card ID\")\nargs = parser.parse_args()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\ncudnn.benchmark = True\n\ntorch.manual_seed(2019) # cpu\ntorch.cuda.manual_seed(2019) #gpu\nnp.random.seed(2019) #numpy\nrandom.seed(2019) #random and transforms\ntorch.backends.cudnn.deterministic=True # cudnn\n\ndef worker_init_fn(worker_id):\n np.random.seed(2019 + worker_id)\n\ndata_path = '../data/{}/'.format(args.dataset)\nmodel_path = './models/{}/'.format(args.dataset)\nprint(\"arguments: %s \" %(args))\nprint(\"config model\", args.model)\nprint(\"config data path\", data_path)\nprint(\"config model path\", model_path)\n\n############################## PREPARE DATASET ##########################\n\ntrain_data, valid_data, test_data_pos, user_pos, user_num ,item_num, train_mat, train_data_noisy = data_utils.load_all(args.dataset, data_path)\n\n# construct the train and test datasets\ntrain_dataset = data_utils.NCFData(\n\t\ttrain_data, item_num, train_mat, args.num_ng, 0, train_data_noisy)\nvalid_dataset = data_utils.NCFData(\n\t\tvalid_data, item_num, train_mat, args.num_ng, 1)\n\ntrain_loader = data.DataLoader(train_dataset,\n\t\tbatch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)\nvalid_loader = data.DataLoader(valid_dataset,\n\t\tbatch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)\n\nprint(\"data loaded! user_num:{}, item_num:{} train_data_len:{} test_user_num:{}\".format(user_num, item_num, len(train_data), len(test_data_pos)))\n\n########################### CREATE MODEL #################################\nif args.model == 'NeuMF-pre': # pre-training. Not used in our work.\n\tGMF_model_path = model_path + 'GMF.pth'\n\tMLP_model_path = model_path + 'MLP.pth'\n\tNeuMF_model_path = model_path + 'NeuMF.pth'\n\tassert os.path.exists(GMF_model_path), 'lack of GMF model'\n\tassert os.path.exists(MLP_model_path), 'lack of MLP model'\n\tGMF_model = torch.load(GMF_model_path)\n\tMLP_model = torch.load(MLP_model_path)\nelse:\n\tGMF_model = None\n\tMLP_model = None\n\nmodel = model.NCF(user_num, item_num, args.factor_num, args.num_layers, \n\t\t\t\t\t\targs.dropout, args.model, GMF_model, MLP_model)\n\nmodel.cuda()\nBCE_loss = nn.BCEWithLogitsLoss()\n\nif args.model == 'NeuMF-pre':\n\toptimizer = optim.SGD(model.parameters(), lr=args.lr)\nelse:\n\toptimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n# writer = SummaryWriter() # for visualization\n\n# define drop rate schedule\ndef drop_rate_schedule(iteration):\n\n\tdrop_rate = np.linspace(0, args.drop_rate**args.exponent, args.num_gradual)\n\tif iteration < args.num_gradual:\n\t\treturn drop_rate[iteration]\n\telse:\n\t\treturn args.drop_rate\n\n\n########################### Eval #####################################\ndef eval(model, valid_loader, best_loss, count):\n\t\n\tmodel.eval()\n\tepoch_loss = 0\n\tvalid_loader.dataset.ng_sample() # negative sampling\n\tfor user, item, label, noisy_or_not in valid_loader:\n\t\tuser = user.cuda()\n\t\titem = item.cuda()\n\t\tlabel = label.float().cuda()\n\n\t\tprediction = model(user, item)\n\t\tloss = loss_function(prediction, label, drop_rate_schedule(count))\n\t\tepoch_loss += loss.detach()\n\tprint(\"################### EVAL ######################\")\n\tprint(\"Eval loss:{}\".format(epoch_loss))\n\tif epoch_loss < best_loss:\n\t\tbest_loss = epoch_loss\n\t\tif args.out:\n\t\t\tif not os.path.exists(model_path):\n\t\t\t\tos.mkdir(model_path)\n\t\t\ttorch.save(model, '{}{}_{}-{}.pth'.format(model_path, args.model, args.drop_rate, args.num_gradual))\n\treturn best_loss\n\n########################### Test #####################################\ndef test(model, test_data_pos, user_pos):\n\ttop_k = args.top_k\n\tmodel.eval()\n\t_, recall, NDCG, _ = evaluate.test_all_users(model, 4096, item_num, test_data_pos, user_pos, top_k)\n\n\tprint(\"################### TEST ######################\")\n\tprint(\"Recall {:.4f}-{:.4f}\".format(recall[0], recall[1]))\n\tprint(\"NDCG {:.4f}-{:.4f}\".format(NDCG[0], NDCG[1]))\n\n########################### TRAINING #####################################\ncount, best_hr = 0, 0\nbest_loss = 1e9\n\nfor epoch in range(args.epochs):\n\tmodel.train() # Enable dropout (if have).\n\n\tstart_time = time.time()\n\ttrain_loader.dataset.ng_sample()\n\n\tfor user, item, label, noisy_or_not in train_loader:\n\t\tuser = user.cuda()\n\t\titem = item.cuda()\n\t\tlabel = label.float().cuda()\n\n\t\tmodel.zero_grad()\n\t\tprediction = model(user, item)\n\t\tloss = loss_function(prediction, label, drop_rate_schedule(count))\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\tif count % args.eval_freq == 0 and count != 0:\n\t\t\tprint(\"epoch: {}, iter: {}, loss:{}\".format(epoch, count, loss))\n\t\t\tbest_loss = eval(model, valid_loader, best_loss, count)\n\t\t\tmodel.train()\n\n\t\tcount += 1\n\nprint(\"############################## Training End. ##############################\")\ntest_model = torch.load('{}{}_{}-{}.pth'.format(model_path, args.model, args.drop_rate, args.num_gradual))\ntest_model.cuda()\ntest(test_model, test_data_pos, user_pos)\n","sub_path":"T_CE/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118582015","text":"class Solution:\r\n def diffWaysToCompute(self, input: str) -> List[int]:\r\n def difc(input):\r\n if \"*\" not in input and \"-\" not in input and \"+\" not in input:return([int(input)])\r\n else:\r\n return[left+right if mid==\"+\" else left-right if mid==\"-\" else left*right\r\n for index,mid in enumerate(input) if mid in \"+-*\"\r\n for left in difc(input[:index])\r\n for right in difc(input[index+1:])] #or [int(input)]\r\n return difc(input) \r\n# since the left side would never be null but right might.","sub_path":"241_different_ways_to_add_parentheses.py","file_name":"241_different_ways_to_add_parentheses.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"512941293","text":"#! /usr/bin/env python\n\nimport rospkg\nimport rospy\n# Import the service message used by the service /gazebo/delete_model\nfrom iri_wam_reproduce_trajectory.srv import ExecTraj, ExecTrajRequest\n\n\n# Initialise a ROS node with the name service_client\nrospy.init_node('excercise_3_1_node')\n# Wait for the service client /gazebo/delete_model to be running\nrospy.wait_for_service('/execute_trajectory')\n# Create the connection to the service\nrobot_execute_trajectory_service_client = rospy.ServiceProxy('/execute_trajectory', ExecTraj)\n# Create an object of type DeleteModelRequest\nrobot_execute_trajectory_request_object = ExecTrajRequest()\n# Fill the variable model_name of this object with the desired value\n\nname =rospkg.RosPack()\ntraj = name.get_path('iri_wam_reproduce_trajectory') + \"/config/init_pos.txt\"\ntraj_1 = name.get_path('iri_wam_reproduce_trajectory') + \"/config/get_food.txt\"\ntraj_2 = name.get_path('iri_wam_reproduce_trajectory') + \"/config/release_food.txt\"\n# Send through the connection the name of the object to be deleted by the service\nrobot_execute_trajectory_request_object.file = traj_1\n# Print the result given by the service called\nresult = robot_execute_trajectory_service_client(robot_execute_trajectory_request_object)\nprint(result)","sub_path":"ROS_Basics/unit_3_service/src/exercise_3_1.py","file_name":"exercise_3_1.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"86919383","text":"#!/Users/akifyusein/.virtualenvs/demo/bin/python3 -W ignore\nimport whois, re, requests\nfrom ipwhois import IPWhois\nfrom pprint import pprint\nimport sys\n# Install 'pip install whois'\n\n# Check if argument is given from command line if not exit\nif len(sys.argv) > 1:\n filename = sys.argv[1]\n print (filename)\nelse:\n print (f'Usage: {sys.argv[0]} filename')\n sys.exit()\n\ndomains_ip = []\ndomains_name = []\nwith open(filename) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n content = [x.strip() for x in content]\n\n # Extract the site name from the line which is first\n for line in content:\n x = line.split(\" : \")\n domains_name.append(x[0])\n if len(x) == 2:\n domains_ip.append(x[1])\n else:\n domains_ip.append(\"NONE\")\n\n# Use i to track list in domains_name list and access them by index\ni = 0\nfor ip_addr in domains_ip:\n if ip_addr == \"NONE\":\n i += 1\n print ('\"%s\" -- \"%s\"' % (name, \"NO-IP-ADDRESS\"))\n continue\n\n ip_addr = ip_addr.strip('\\\"')\n obj = IPWhois(ip_addr)\n res = obj.lookup_whois()\n name = res[\"nets\"][0]['name']\n description = res[\"nets\"][0]['description']\n if not name:\n name = description\n print ('\"%s\", \"%s\", \"%s\"' % (ip_addr, domains_name[i], name))\n\n # print ('\"%s\" -- \"%s\" : \"%s\" -- \"%s\"' % (name, description, ip_addr, domains_name[i]))\n i += 1\n#\n#ip addr add 188.65.117.120/28 dev em1\n# ip route add default via 188.65.117.113 dev em1\n","sub_path":"Projects/Krishna/krishna_new.py","file_name":"krishna_new.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"270928419","text":"#!/bin/python\nfrom __future__ import print_function\n\ndef p_format(ar): # 打印格式特殊,专门来一个打印函数处理\n for i in ar:\n print (i, end=' ')\n print()\n return \"\"\n\ndef insertionSort(ar): \n for i in xrange(1, len(ar)): # 从第二位开始排起\n for j in xrange(0, i):\n if ar[i] < ar[j]: # 以为动态中一直是“sorted”情况,所以只要在合适的位置“insert”就可以了\n ar[i], ar[j] = ar[j], ar[i]\n p_format(ar) \n return \"\"\n\nm = input()\nar = [int(i) for i in raw_input().strip().split()]\ninsertionSort(ar)\n","sub_path":"Algorithms/Sorting/Insertion Sort - Part 2.py","file_name":"Insertion Sort - Part 2.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233671892","text":"import heapq\n\nclass graph: # _new_=regularconst vs _init_ =+ add-on manip. before creat.\n def __init__(self, width, height): \n self.width = width\n self.height = height\n self.walls = [] # [] > list \n self.weights = {} # {} > dic['key' : 'value']- no dublicates \n\n #checks if any pair of x,y is within our map/limits\n def in_bounds(self, id):\n (x, y) = id\n return 0 <= x < self.width and 0 <= y < self.height\n\n # checks and return true if param location has no obsticle on the way\n def passable(self, id):\n return id not in self.walls\n\n # returns all available adjacent (x,y) --in map and no walls\n def neighbors(self, id):\n (x, y) = id\n results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]\n if (x + y) % 2 == 0: results.reverse()\n results = filter(self.in_bounds, results)\n results = filter(self.passable, results)\n return results\n\n # scales the effort to go from point A to B\n def cost(self, from_node, to_node):\n return self.weights.get(to_node, 1)\n\n\nclass PriorityQueue:\n def __init__(self):\n self.elements = []\n\n def empty(self):\n return len(self.elements) == 0\n\n def put(self, item, priority):\n heapq.heappush(self.elements, (priority, item))\n\n def get(self):\n return heapq.heappop(self.elements)[1]\n\ndef reconstructPath(came_from, start, goal):\n current = goal\n path = [current]\n while current != start:\n current = came_from[current]\n path.append(current)\n path.reverse()\n return path\n\ndef heuristic(a, b):\n (x1, y1) = a\n (x2, y2) = b\n return abs(x1 - x2) + abs(y1 - y2)\n\ndef aStarSearch(graph, start, goal):\n frontier = PriorityQueue()\n frontier.put(start, 0)\n cameFrom = {}\n costSoFar = {}\n cameFrom[start] = None\n costSoFar[start] = 0\n\n while not frontier.empty():\n current = frontier.get()\n\n if current == goal:\n break\n\n for next in graph.neighbors(current):\n newCost = costSoFar[current] + graph.cost(current, next)\n if next not in costSoFar or newCost < costSoFar[next]:\n costSoFar[next] = newCost\n priority = newCost + heuristic(goal, next)\n frontier.put(next, priority)\n cameFrom[next] = current\n\n return cameFrom, costSoFar\n\nexample = graph(10, 10)\ntestStart = (0, 0)\ntestGoal = (5, 4)\n\n(pathway, _ ) = aStarSearch(example, testStart, testGoal)\nprint(reconstructPath(pathway, testStart, testGoal))\n","sub_path":"astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"216356971","text":"import numpy as np\nimport time\nimport cifarload as cifar\n\nstrt = time.time()\nprint(\"Classification of CIFAR-10 dataset using regularized SVM with Minibatch gradient descent\")\n#trainData,trainLabels,testData,testLabels = cifar.load_CIFAR_10()\n\nlearning_rate = 0.01\nN = 10 # Number of classes\nM = 64 # Size of minibatch\nD = 3073 # Size of image\nLAMBDA = 0.0001\n#trainingSize = trainData.shape[0]\n#testSize = testData.shape[0]\n#w = np.zeros([N,D])\nw = 0.01*np.random.randn(N,D)\nr = []\nt = []\n# x (examples,3072)\n# y (examples, 1)\n\ndef preprocess(x):\n x = x - np.mean(x)\n oldX = x\n sizeX = list(x.shape)\n sizeX[1] += 1\n x = np.ones(sizeX)\n x[:,:-1] = oldX\n return x\n##def Loss(xi,yi,W):\n## delta = 1\n## f = np.dot(W,xi)\n## diffs = np.maximum(0,(f - f[yi,range(M)] + delta))\n## diffs[yi] = 0\n## Lossi = diffs\n## return Lossi\nsca = 0\ndef eval_grad(xi,yi,W):\n global sca, xd, fl, yh\n delta = 1\n # linear combination\n f = np.dot(W,xi)\n # grad for incorrect classes\n scaled = ((f-f[yi,range(M)]+delta)>0)*1\n # grad for correct class\n scaled[yi,range(M)] = -1*np.sum(scaled,0)\n scaled[yi,range(M)] += 1 # remove the contribution from the correct class\n \n sca = W\n grad = np.dot(scaled,np.transpose(xi))\n grad[:,:-1] += LAMBDA*W[:,:-1]\n xd = xi\n return grad\ndef train():\n global w, sca\n [x,y,xt,yt] = cifar.load_CIFAR_10()\n x = preprocess(x)\n xt = preprocess(xt)\n for u in range(1):\n for i in range(1,int(x.shape[0]/M)*M,M):\n delw = eval_grad(np.transpose(x[i:i+M]),y[i:i+M],w)\n w -= learning_rate*delw\n \n return [xt,yt]\n\ndef test():\n global w, sca\n [xt,yt] = train()\n sum1 = 0\n # Testing in Minibatches (increased performance vs Full batch)\n for i in range(1,int(xt.shape[0]/M)*M,M):\n res = np.dot(w,np.transpose(xt[i:i+M]))\n sum1 += (list(np.argmax(res,0)==yt[i:i+M]).count(True))\n print(\"Test accuracy is: %1.3f\" % (sum1*100/(int(xt.shape[0]/M)*M))+\" %\")\n print(\"Total execution time: %1.3f s\" % (time.time()-strt))\ntest()\n","sub_path":"MLalgos/svmMinigdReg.py","file_name":"svmMinigdReg.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"101357956","text":"import requests\nimport json\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route('/')\ndef get_all_open_issues():\n url =\"https://api.github.com/orgs/att/repos?type=public\"\n response = []\n get_public_repos = requests.get(url)\n public_repos_json = get_public_repos.json()\n\n for item in public_repos_json:\n repo_issues = requests.get(\"https://api.github.com/repos/att/\"+item['name']+\"/issues?state=open\").json();\n repo_info = {}\n repo_info['repository name:'] = item['name']\n repo_info['repository_id'] = item['id']\n iss_list = []\n for issue in repo_issues:\n issue_info = {}\n issue_info['issue_id'] = issue['id']\n issue_info['issue_number'] = issue['number']\n issue_info['issue_title'] = issue['title']\n issue_info['issue_description'] = issue['body']\n issue_info['issue_created_at'] = issue['created_at']\n issue_info['issue_created_by_user'] = issue['user']['id']\n issue_info['issue_status'] = issue['state']\n issue_comments = requests.get(issue['comments_url']).json()\n iss_comments = []\n for issue_comment in issue_comments:\n comment = {}\n comment['comment_id'] = issue_comment['id']\n comment['comment_created_at'] = issue_comment['created_at']\n comment['comment_body'] = issue_comment['body']\n comment['comment_user_id'] = issue_comment['user']['id']\n iss_comments.append(comment)\n issue_info['comments'] = iss_comments\n iss_list.append(issue_info)\n repo_info['issues'] = iss_list\n response.append(repo_info)\n\n print(json.dumps(response, indent=2))\n return response\n\nget_all_open_issues()\nif __name__ == '__main__':\n app.run()","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"220072494","text":"import os\nimport numpy as np \nfrom osgeo import gdal\nfrom osgeo import osr\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\ndef gdal_mix_max(path_raster):\n r = gdal.Open(path_raster)\n banda1=r.GetRasterBand(1)\n stats = banda1.GetStatistics(True, True)\n min,max = stats[0],stats[1]\n print (min,max)\n return min,max\n\ndef equidistantes (path_r,categorias=5):\n min,max = gdal_mix_max(path_r)\n lista_val = [min,]\n incremento = (max - min) / categorias\n for i in range(1,categorias+1):\n valor = min + (incremento * i)\n lista_val.append(valor)\n print (lista_val)\n raster = gdal.Open(path_r)\n band1 =raster.GetRasterBand(1).ReadAsArray()\n nodata_r=raster.GetRasterBand(1).GetNoDataValue()\n band2= band1[band1 >=min]\n band2 = band2.flatten()\n lista_pix = []\n for j in range(1,len(lista_val)):\n pixeles = np.sum((np.logical_and(band2>=lista_val[j-1], band2<=lista_val[j])))\n print(pixeles)\n lista_pix.append(pixeles)\n return lista_val,lista_pix\n\ndef cuantiles(path_r,quantil):\n min,max = gdal_mix_max(path_r)\n raster = gdal.Open(path_r)\n band1 =raster.GetRasterBand(1).ReadAsArray()\n nodata_r=raster.GetRasterBand(1).GetNoDataValue()\n band2= band1[band1 >=min]\n band2 = band2.flatten()\n print (band2)\n lista_val = [min,]\n lista_pix = []\n \n for i in range(1,quantil+1):\n print (i,i/quantil)\n valor= i/quantil\n cuantil_c = np.quantile(band2,valor)\n lista_val.append(cuantil_c)\n lista_cuantiles =['Q'+str(x)+'\\n'+str(round(lista_val[x-1],3))+' - '+str(round(lista_val[x],3)) for x in range(1,quantil+1)]\n for j in range(1,len(lista_val)):\n pixeles = np.sum((np.logical_and(band2>=lista_val[j-1], band2<=lista_val[j])))\n print(pixeles)\n lista_pix.append(pixeles)\n \n\n return lista_val,lista_cuantiles,lista_pix\ndef grafica_cats(lista_val,lista_pix,categorias):\n ## PARA ESTILO DE LAS GRAFICAS\n # palette={\"primary\":\"#FEF702\",\n # \"background\": \"#252525\",\n # \"primary_chart\":\"#F1F1F1\",\n # \"text_color\": \"#7F7F7F\"}\n\n # mpl.rcParams[\"figure.facecolor\"] = palette[\"background\"]\n # mpl.rcParams[\"axes.facecolor\"] = palette[\"background\"]\n # mpl.rcParams[\"savefig.facecolor\"] = palette[\"background\"]\n # mpl.rcParams['axes.labelcolor']= palette[\"text_color\"]\n fig = plt.figure(u'Expansión turística Máxima') # Figure\n ax = fig.add_subplot(111) # Axes\n lista_cats =['Cat_'+str(x)+'\\n'+str(round(lista_val[x-1],3))+' - '+str(round(lista_val[x],3)) for x in range(1,categorias+1)]\n nombres = lista_cats\n datos = lista_pix\n xx = range(len(datos))\n\n\n plt.title(\"Expansión turística Máxima\",\n horizontalalignment = 'left',\n x=0.05,\n y=0.99,\n color='#691e91',\n pad=25)\n\n ax.bar(xx, datos, width=0.5, align='center',color='#691e91')\n ax.set_xticks(xx)\n ax.set_xticklabels(nombres)\n ax.set_ylabel('Pixeles')\n ax.set_xlabel('Cuantiles')\n plt.show()\npath_r = 'C:/Dropbox (LANCIS)/FOMIX/fmx_estudio_tecnico/diagnostico/talleres/sectores/agricultura/aptitud/agrie_agricultura_riego_sigindex.tif'\n\n#lista_val,lista_cuantiles,lista_pix = cuantiles(path_r,10)\nlista_val,lista_pix= equidistantes(path_r,3)\ngrafica_cats(lista_val,lista_pix,3)\n\n\n\n\n\n\n","sub_path":"codigos/secundarios/cuantiles.py","file_name":"cuantiles.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5047384","text":"from django.shortcuts import render, redirect\nfrom .models import Profile\nfrom .form import ProfileForm, UserForm\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef profile(request):\n obj = Profile.objects.get(user_id=request.user.id)\n if request.method == 'POST':\n p_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)\n u_form = UserForm(request.POST, instance=request.user)\n if p_form.is_valid() and u_form.is_valid():\n p_form.save()\n u_form.save()\n return redirect('profile')\n\n p_form = ProfileForm()\n u_form = UserForm(instance=request.user)\n\n return render(request, 'user/user.html', {'obj': obj, 'p_form': p_form, 'u_form':u_form})\n\n\n\ndef userProfile(request, id):\n obj = Profile.objects.get(id=id)\n if request.user == obj.user:\n return redirect('profile')\n\n return render(request, 'user/user Profile.html', {'x': obj})\n","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429302758","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Yuki Furuta \n\nimport click\nimport os.path as osp\nimport sys\nimport chainer.functions as F\nimport chainervr\nimport chainervr.utils.train as T\n\n\ndef mse_gd_loss(x, t, eta=0.5):\n mse = F.mean_squared_error(x, t)\n gd = chainervr.functions.gradient_difference_error(x, t)\n return mse * (1.0 - eta) + gd * eta\n\n\n@click.command()\n@click.option(\"--batch-size\", type=int, default=16)\n@click.option(\"--max-iter\", type=int, default=100000)\n@click.option(\"--gpu\", type=int, default=-1)\n@click.option(\"--multi-gpu\", is_flag=True)\n@click.option(\"--out\", type=str, default=\"results\")\n@click.option(\"--loss-func\", type=click.Choice([\"mse\", \"gd\", \"mse_gd\"]), default=\"mse_gd\")\n@click.option(\"--eta\", type=float, default=0.4)\n@click.option(\"--hidden-channels\", type=int, default=1000)\n@click.option(\"--dropout\", type=float, default=0.1)\n@click.option(\"--noise-sigma\", type=float, default=0.1)\n@click.option(\"--num-episodes\", type=int, default=5)\n@click.option(\"--log-interval\", type=int, default=10)\n@click.option(\"--snapshot-interval\", type=int, default=1000)\n@click.option(\"--resume\", type=str, default=\"\")\ndef train(batch_size, max_iter,\n gpu, multi_gpu, out,\n loss_func, eta,\n hidden_channels, num_episodes,\n dropout, noise_sigma,\n log_interval, snapshot_interval, resume):\n\n T.info(\"Loading model\")\n\n model = chainervr.models.DeepEpisodicMemory(\n hidden_channels=hidden_channels, num_episodes=num_episodes,\n dropout=dropout, noise_sigma=noise_sigma)\n\n T.info(\"Using %s as loss function\" % loss_func)\n if loss_func == \"mse\":\n loss_func = F.mean_squared_error\n elif loss_func == \"gd\":\n loss_func = chainervr.functions.gradient_difference\n else:\n loss_func = lambda x, t: mse_gd_loss(x,t, eta)\n\n train_chain = chainervr.models.EpisodicTrainChain(\n model, ratio=0.5, loss_func=loss_func)\n\n model.reset_state()\n\n T.info(\"Loading dataset\")\n\n T.train(\n model=model,\n train_chain=train_chain,\n train_dataset_cls=chainervr.datasets.UCF101Dataset,\n test_dataset_cls=chainervr.datasets.UCF101Dataset,\n dataset_args={\"num_episodes\": num_episodes*2},\n in_episodes=num_episodes,\n out_episodes=num_episodes,\n gpu=gpu, multi_gpu=multi_gpu, out=out,\n batch_size=batch_size, max_iter=max_iter,\n evaluate=False,\n resume=resume,\n log_interval=log_interval,\n snapshot_interval=snapshot_interval,\n )\n\n\nif __name__ == '__main__':\n train()\n","sub_path":"examples/ucf101/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"5065182","text":"\"\"\"\nThis module is designed to validate the existence and structure of content pack essential files in content.\n\"\"\"\nimport os\nimport io\nimport re\nimport json\n\nfrom Tests.test_utils import pack_name_to_path\nfrom Tests.scripts.constants import PACKS_WHITELIST_FILE_NAME, PACKS_PACK_IGNORE_FILE_NAME, PACKS_PACK_META_FILE_NAME, \\\n PACKS_README_FILE_NAME\n\n\nclass PackUniqueFilesValidator():\n \"\"\"PackUniqueFilesValidator is designed to validate the correctness of pack's files structure.\n Existence and validity of this files is essential.\"\"\"\n\n def __init__(self, pack):\n self.pack = pack\n self.pack_path = pack_name_to_path(self.pack)\n self.secrets_file = PACKS_WHITELIST_FILE_NAME\n self.pack_ignore_file = PACKS_PACK_IGNORE_FILE_NAME\n self.pack_meta_file = PACKS_PACK_META_FILE_NAME\n self.readme_file = PACKS_README_FILE_NAME\n self._errors = []\n\n # error handling\n def _add_error(self, error):\n \"\"\"Adds error entry to a list under pack's name\"\"\"\n if error:\n self._errors.append(error)\n\n def get_errors(self, raw=False):\n \"\"\"Get the dict version or string version for print\"\"\"\n errors = ''\n if raw:\n errors = self._errors\n elif self._errors:\n errors = '@@@Issues with unique files in pack: {}\\n {}'.format(self.pack, '\\n '.join(self._errors))\n\n return errors\n\n # file utils\n def _get_pack_file_path(self, file_name=''):\n \"\"\"Returns the full file path to pack's file\"\"\"\n return os.path.join(self.pack_path, file_name)\n\n def _is_pack_file_exists(self, file_name):\n \"\"\"Check if .secrets-ignore exists\"\"\"\n if not os.path.isfile(self._get_pack_file_path(file_name)):\n self._add_error('\"{}\" file does not exist, create one in the root of the pack'.format(file_name))\n return False\n\n return True\n\n def _read_file_content(self, file_name):\n \"\"\"Open & Read a file object's content throw exception if can't\"\"\"\n try:\n with io.open(self._get_pack_file_path(file_name), mode=\"r\", encoding=\"utf-8\") as file:\n return file.read()\n except IOError:\n self._add_error('Could not open \"{}\" file'.format(file_name))\n except ValueError:\n self._add_error('Could not read the contents of \"{}\" file'.format(file_name))\n\n return False\n\n def _parse_file_into_list(self, file_name, delimiter='\\n'):\n \"\"\"Parse file's content to list, throw exception if can't\"\"\"\n file_content = self._read_file_content(file_name)\n try:\n if file_content:\n return file_content.split(delimiter)\n except ValueError:\n self._add_error('Could not parse the contents of \"{}\" file into a list'.format(file_name))\n\n return False\n\n # secrets validation\n def validate_secrets_file(self):\n \"\"\"Validate everything related to .secrets-ignore file\"\"\"\n if self._is_pack_file_exists(self.secrets_file) and all([self._is_secrets_file_structure_valid()]):\n return True\n\n return False\n\n def _is_secrets_file_structure_valid(self):\n \"\"\"Check if .secrets-ignore structure is parse-able\"\"\"\n if self._parse_file_into_list(self.secrets_file):\n return True\n\n return False\n\n # pack ignore validation\n def validate_pack_ignore_file(self):\n \"\"\"Validate everything related to .pack-ignore file\"\"\"\n if self._is_pack_file_exists(self.pack_ignore_file) and all([self._is_pack_ignore_file_structure_valid()]):\n return True\n\n return False\n\n def _is_pack_ignore_file_structure_valid(self):\n \"\"\"Check if .pack-ignore structure is parse-able & has valid regex\"\"\"\n try:\n pack_ignore_regex_list = self._parse_file_into_list(self.pack_ignore_file)\n if pack_ignore_regex_list and all(re.compile(regex) for regex in pack_ignore_regex_list):\n return True\n except re.error:\n self._add_error('Detected none valid regex in {} file'.format(self.pack_ignore_file))\n\n return False\n\n # pack metadata validation\n def validate_pack_meta_file(self):\n \"\"\"Validate everything related to pack-metadata.json file\"\"\"\n if self._is_pack_file_exists(self.pack_meta_file) and all([self._is_pack_meta_file_structure_valid()]):\n return True\n\n return False\n\n def _is_pack_meta_file_structure_valid(self):\n \"\"\"Check if pack-metadata.json structure is json parse-able\"\"\"\n try:\n pack_meta_file_content = self._read_file_content(self.pack_meta_file)\n if pack_meta_file_content and json.loads(pack_meta_file_content):\n return True\n except (ValueError, TypeError):\n self._add_error('Could not parse {} file contents to json format'.format(self.pack_meta_file))\n\n return False\n\n # pack README.md validation\n def validate_readme_file(self):\n \"\"\"Validate everything related to README.md file\"\"\"\n if self._is_pack_file_exists(self.readme_file):\n return True\n\n return False\n\n def validate_pack_unique_files(self):\n \"\"\"Main Execution Method\"\"\"\n self.validate_secrets_file()\n self.validate_pack_ignore_file()\n self.validate_pack_meta_file()\n self.validate_readme_file()\n return self.get_errors()\n","sub_path":"Tests/scripts/hook_validations/pack_unique_files.py","file_name":"pack_unique_files.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"27512775","text":"from typing import List, Type, Any\n\nfrom tornado.routing import _RuleList\nfrom tornado.web import RequestHandler, Application\nfrom prometheus_client import Histogram, Counter, REGISTRY\nfrom prometheus_client.exposition import choose_encoder\n\n\nclass PrometheusMixInApplication(Application):\n default_bucket = (0.01, 0.05, 0.1, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0)\n\n def __init__(self, handlers: _RuleList = None, buckets: List[float] = default_bucket, default_host: str = None,\n transforms: List[Type[\"OutputTransform\"]] = None, **settings: Any) -> None:\n handlers.append((r\"/metrics\", PrometheusMetricsHandler))\n super().__init__(handlers, default_host, transforms, **settings)\n\n self.request_time_seconds = Histogram(\n namespace=\"tornado\",\n subsystem=\"http\",\n name=\"request_duration_seconds\",\n documentation=\"HTTP request duration in seconds\",\n buckets=buckets,\n labelnames=(\"handler\", \"method\"),\n )\n\n self.requests_total = Counter(\n namespace=\"tornado\",\n subsystem=\"http\",\n name=\"requests_total\",\n documentation=\"Total of HTTP requests processed\",\n labelnames=(\"handler\", \"method\", \"status\"),\n )\n\n def observe_request(self, handler):\n handler_name = type(handler).__name__\n method = handler.request.method\n request_time = handler.request.request_time()\n status = handler.get_status()\n\n self.request_time_seconds.labels(handler_name, method).observe(request_time)\n self.requests_total.labels(\n handler_name, method, self.classify_status_code(status)\n ).inc()\n\n def log_request(self, handler):\n super().log_request(handler)\n self.observe_request(handler)\n\n def classify_status_code(self, status_code):\n \"\"\"\n Prometheus recomends to have lower number of cardinality,\n each combination creates a new metric in datastore,\n to reduce this risk we store only the class of status code\n \"\"\"\n if 200 <= status_code < 300:\n return \"2xx\"\n\n if 300 <= status_code < 400:\n return \"3xx\"\n\n if 400 <= status_code < 500:\n return \"4xx\"\n\n return \"5xx\"\n\n\nclass PrometheusMetricsHandler(RequestHandler):\n def get(self):\n encoder, content_type = choose_encoder(self.request.headers.get('accept'))\n self.set_header(\"Content-Type\", content_type)\n self.write(encoder(REGISTRY))\n","sub_path":"tornado_prometheus/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"44257833","text":"import pkgutil\nimport importlib\n\nmodules = [name for _, name, _ in pkgutil.iter_modules(__path__)]\nfor module in modules:\n importlib.import_module(__package__+'.'+module)\n\nforces = [eval(module).ipi_force for module in modules if hasattr(eval(module), 'ipi_force')]\n\nforce_cache = None\ncall_calc_forces = False\n\n\ndef calc(integrator):\n global force_cache, call_calc_forces\n\n #calc is called twice per velocity verlet step, so cache the forces for the next call\n call_calc_forces = not call_calc_forces\n if call_calc_forces:\n force_cache = calc_forces(integrator)\n return force_cache\n\n\ndef calc_forces(integrator):\n return reduce(lambda f, force: f + force(integrator), forces, 0)\n","sub_path":"ipi/engine/extraforces/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582690766","text":"from datetime import date\nfrom flask import request, abort\nfrom flask_restful import Resource\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\nfrom app import db, models\nfrom app.api.v1_0 import check_fields\n\naccess_fields = {'id', 'name',\n 'email', 'bdate',\n 'created_events',\n 'about', 'email',\n 'events', 'comments'}\n\n\ndef parse(o, fields):\n dct = {}\n for f in fields:\n if f == 'events':\n dct[f] = [e.id for e in getattr(o, f)]\n elif f == 'created_events':\n dct[f] = [e.id for e in getattr(o, f)]\n elif f == 'comments':\n dct[f] = [c.id for c in getattr(o, f)]\n elif f == 'bdate':\n bdate = o.bdate\n print(bdate)\n dct[f] = str(bdate.day) + \\\n '.' + str(bdate.month) + \\\n '.' + str(bdate.year)\n else:\n dct[f] = getattr(o, f)\n return dct\n\n\nclass Users(Resource):\n decorators = [jwt_required]\n\n @staticmethod\n def parse(o, fields):\n if fields == {'id'}:\n return o.id\n return parse(o, fields)\n\n def get(self):\n fields = set(request.args.get('fields', 'id').split(','))\n fields = check_fields(fields, access_fields)\n users = models.User.query\n res = [Users.parse(u, fields) for u in users]\n return res\n\n def post(self):\n data = request.get_json()\n username = data.get('username', None)\n password = data.get('password', None)\n name = data.get('name', None)\n if not (username and password and name):\n return abort(400, 'Bad Request')\n if models.User.query.filter_by(email=username).first():\n return abort(409, 'This username already exists.')\n if models.User.query.filter_by(name=name).first():\n return abort(409, 'This name already exists.')\n user = models.User(name, username, password)\n db.session.add(user)\n db.session.commit()\n uid = models.User.query.filter_by(email=username).first().id\n return {'id': uid}, 201\n\n\nclass User(Resource):\n decorators = [jwt_required]\n\n @staticmethod\n def parse(o, fields):\n if fields == {'id'}:\n return {'id': o.id, 'name': o.name}\n return parse(o, fields)\n\n def get(self, user_id):\n fields = set(request.args.get('fields', 'id').split(','))\n fields = check_fields(fields, access_fields)\n u = models.User.query.get_or_404(user_id)\n res = User.parse(u, fields)\n return res\n\n def put(self, user_id):\n if get_jwt_identity().get('id') != user_id:\n return abort(403)\n u = models.User.query.get_or_404(user_id)\n if not u:\n return abort(404)\n params = request.get_json().get('params')\n if not params:\n return abort(400)\n for field in params:\n if field == 'username':\n if not models.User.query.get(user_id).email == params['username'] and \\\n models.User.query.filter_by(email=params['username']):\n return abort(406)\n u.email = params['username']\n elif field == 'bdate':\n\n u.bdate = date(params['bdate'].split('.').day,\n params['bdate'].split('.').month,\n params['bdate'].split('.').year)\n else:\n setattr(u, field, params[field])\n db.session.add(u)\n db.session.commit()\n return {'message': 'User has been successfully updated.'}\n\n def delete(self, user_id):\n if get_jwt_identity().id != user_id:\n return abort(403)\n db.session.delete(models.User.query.get(user_id))\n db.session.commit()\n return {'message': 'User has been successfully deleted.'}\n","sub_path":"app/api/v1_0/users/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"466179865","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 9 21:42:40 2016\n\n@author: owen\n\"\"\"\n\n # Notice: the range of elements must be in the inclusive range [lower, upper]\n # Sorted array may include duplicates\n \nclass Solution(object):\n def getRange(self, lower, upper):\n if lower==upper:\n return '{}'.format(lower)\n elif lower{}'.format(lower,upper)\n else:\n return []\n \n def findMissingRanges(self,num,lower,upper):\n ranges = []\n pre = lower - 1\n for i in range(len(num)):\n curr = num[i]\n if curr-pre >= 2: # at least one missing element\n ranges.append(self.getRange(pre+1, curr-1))\n pre = curr \n \n if upper > pre: # check if upper didn't overlap in the nums array\n ranges.append(self.getRange(pre+1, upper))\n return ranges\n\n \n#class Solution(object): \n# def findMissingRanges(self,num,lower,upper):\n# # nested functions\n# def getRange(lower,upper):\n# if lower==upper:\n# return '{}'.format(lower)\n# elif lower{}'.format(lower,upper)\n# else:\n# return []\n#\n# ranges=[]\n# pre=lower-1\n# for i in range(len(num)+1):\n# if i==len(num):\n# curr=upper+1\n# else:\n# curr=num[i]\n# if curr-pre>1:\n# ranges.append(getRange(pre+1,curr-1))\n# pre=curr \n# return ranges\n\n\n#class Solution(object):\n# def findMissingRanges(self, nums, lower, upper):\n# \"\"\"\n# :type nums: List[int]\n# :type lower: int\n# :type upper: int\n# :rtype: List[str]\n# \"\"\"\n# nums = [lower - 1] + nums + [upper + 1] # nums must be in the range of [lower-1: upper+1]\n# ranges = []\n# for i in range(1, len(nums)):\n# if nums[i] - nums[i - 1] == 2: # missing one element\n# ranges.append(str(nums[i] - 1))\n# elif nums[i] - nums[i - 1] > 2: # missing two or more elements\n# ranges.append(str(nums[i - 1] + 1) + \"->\" + str(nums[i] - 1))\n## else: # pass continuous elements, or duplicates\n## pass\n#\n# return ranges\n \n \nif __name__==\"__main__\":\n print(Solution().findMissingRanges([],1,2))\n# print(Solution().findMissingRanges([0,1,3,50,75],-1,69)) # lower & upper out of range\n print(Solution().findMissingRanges([0,1,3,50,75],0,79))\n print(Solution().findMissingRanges([-2147483648,-2147483648,0,2147483647,2147483647],-2147483648,2147483647))\n","sub_path":"163. Missing Ranges.py","file_name":"163. Missing Ranges.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504526521","text":"from django.shortcuts import HttpResponse\nfrom tyc.tianyancha import TianYanCha\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\n\n# Create your views here.\ndef get_func(request):\n target = request.GET.get('target')\n tyc = TianYanCha(target)\n shuihao = tyc.run()\n dic = {\n 'target': target,\n 'shui_hao': shuihao,\n 'success': '1' if shuihao is not None else '0'\n }\n return HttpResponse(str(dic))\n\n\ndef post_func(request):\n if request.method == 'POST':\n target = dict(request.POST)\n name = target['target'][0]\n tyc = TianYanCha(name)\n shuihao = tyc.run()\n dic = {\n 'target': name,\n 'shui_hao': shuihao,\n 'success': '1' if shuihao is not None else '0'\n }\n\n return HttpResponse(str(dic))\n\n\ndef sub_function(request):\n pass\n\n\ndef home(request):\n return render_to_response(\"index.html\")\n\n # name = \"hello world!\"\n # dic = {\n # \"name\": name\n # }\n # return HttpResponse(str(dic))\n","sub_path":"dj_shuihao/dj_shuihao/my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78429116","text":"import collections\n\nfrom kameleon_rks.densities.gaussian import sample_gaussian, log_gaussian_pdf\nfrom old.gaussian_rks import sample_basis, feature_map, \\\n feature_map_grad_single, feature_map_single, gamma_median_heuristic\nimport numpy as np\n\n\nclass KameleonRKSGaussian():\n \"\"\"\n Implements a random kitchen sink version of Kameleon MCMC.\n \"\"\"\n \n def __init__(self, D, kernel_gamma, m, step_size, gamma2=0.1, schedule=None, acc_star=0.234,\n update_kernel_gamma=None, update_kernel_gamma_schedule=None, update_kernel_gamma_tol=0.1):\n \"\"\"\n D - Input space dimension\n kernel_gamma - Gaussian kernel parameter\n m - Feature space dimension\n gamma2 - Exploration parameter. Kameleon falls back\n to random walk with that scaling when in unexplored regions.\n Increasing increases exploration but decreases mixing in explored regions.\n step_size - Gradient step size. Effectively a scaling parameter.\n schedule - Optional. Function that generates adaptation weights\n given the MCMC iteration number.\n The weights are used in the stochastic updating of the\n feature covariance.\n If not set, feature covariance is never updated. In that case, call\n batch_covariance() before using. \n acc_star Optional: If set, the step_size parameter is tuned so that\n average acceptance equals eta_star, using the same schedule\n as for the covariance update (If schedule is set, otherwise\n ignored)\n update_kernel_gamma - Optional. If set to an integer, collects a sliding\n window of past samples in the update() method.\n Uses 1./t as probability to re-set the\n kernel bandwidth via computing the media distance\n in the collected samples. Then updates the random\n feature basis.\n The window size depends on the support of the distribution,\n and in particular the number of samples to estimate the median\n distance reliably. Set to a few thousand if in doubt.\n Suggested to only use in pilot runs and then fix the found\n kernel_gamma.\n Note that a separate updating schedule can be provided.\n update_kernel_gamma_schedule - Optional. A schedule different to 1./t can be provided to\n update the kernel bandwidth.\n update_kernel_gamma_tol - Optional. Tolerance for kernel parameter update.\n Bandwidth is only updated if at least that far from previous\n \"\"\"\n self.kernel_gamma = kernel_gamma\n self.m = m\n self.D = D\n self.gamma2 = gamma2\n self.step_size = step_size\n self.schedule = schedule\n self.acc_star = acc_star\n self.update_kernel_gamma = update_kernel_gamma\n self.update_kernel_gamma_schedule = update_kernel_gamma_schedule\n self.update_kernel_gamma_tol = update_kernel_gamma_tol\n \n # scaling parameter evolution might be collected to assess convergence\n self.nu2s = [step_size]\n \n # some sanity checks\n if acc_star is not None:\n assert acc_star > 0 and acc_star < 1\n \n if schedule is not None:\n lmbdas = np.array([schedule(t) for t in np.arange(100)])\n assert np.all(lmbdas > 0)\n assert np.allclose(np.sort(lmbdas)[::-1], lmbdas)\n \n if self.update_kernel_gamma:\n self.past_samples = collections.deque()\n \n if self.update_kernel_gamma_schedule is not None:\n lmbdas = np.array([update_kernel_gamma_schedule(t) for t in np.arange(100)])\n assert np.all(lmbdas > 0)\n assert np.allclose(np.sort(lmbdas)[::-1], lmbdas)\n \n self._initialise()\n \n def _initialise(self):\n \"\"\"\n Initialises internal state. To be called before MCMC chain starts.\n \"\"\"\n # fix feature space random basis\n self.omega, self.u = sample_basis(self.D, self.m, self.kernel_gamma)\n \n # _initialise running averages for feature covariance\n self.t = 0\n\n if self.schedule is not None:\n # start from scratch\n self.mu = np.zeros(self.m)\n \n # _initialise as isotropic\n self.C = np.eye(self.m)\n else:\n # make user call the set_batch_covariance() function\n self.mu = None\n self.C = None\n\n def set_batch_covariance(self, Z):\n Phi = feature_map(Z, self.omega, self.u)\n self.mu = np.mean(Phi, axis=0)\n self.C = np.cov(Phi.T)\n \n def update_step_size(self, accept_prob):\n # generate learning rate\n lmbda = self.schedule(self.t)\n \n # difference desired and actuall acceptance rate\n diff = accept_prob - self.acc_star\n \n self.step_size = np.exp(np.log(self.step_size) + lmbda * diff)\n\n def next_iteration(self):\n self.t += 1\n \n\n def update(self, z_new, previous_accpept_prob):\n \"\"\"\n Updates the proposal covariance and potentially scaling parameter, according to schedule.\n Note that every call increases a counter that is used for the schedule (if set)\n \n If not schedule is set, this method does not have any effect unless counting.\n \n Parameters:\n z_new - A 1-dimensional array of size (D) of.\n previous_accpept_prob - Acceptance probability of previous iteration\n \"\"\"\n self.next_iteration()\n \n if self.schedule is not None:\n # generate updating weight\n lmbda = self.schedule(self.t)\n \n # project current point\n phi = feature_map_single(z_new, self.omega, self.u)\n \n # update\n centred = self.mu - phi\n self.mu = self.mu * (1 - lmbda) + lmbda * phi\n self.C = self.C * (1 - lmbda) + lmbda * np.outer(centred, centred)\n \n # update scalling parameter if wanted\n if self.acc_star is not None:\n self.update_step_size(previous_accpept_prob)\n self.nu2s.append(self.step_size)\n \n if self.update_kernel_gamma is not None:\n # update sliding window\n self.past_samples.append(z_new)\n if len(self.past_samples) > self.update_kernel_gamma:\n self.past_samples.popleft()\n \n num_samples_window = len(self.past_samples)\n \n # probability of updating\n if self.update_kernel_gamma_schedule is not None:\n update_prob = self.update_kernel_gamma_schedule(self.t)\n else:\n update_prob = 1. / (self.t + 1)\n \n # update kernel bandwidth (if window full yet)\n if np.random.rand() < update_prob and num_samples_window >= self.update_kernel_gamma:\n \n # transform past samples into array\n Z = np.array(self.past_samples)\n \n # compute new kernel gamma\n print(\"Checking whether to update kernel_gamma\")\n new_kernel_gamma = gamma_median_heuristic(Z, num_samples_window)\n diff = np.abs(new_kernel_gamma - self.kernel_gamma)\n \n # only update if change above tolerance\n if np.abs(diff > self.update_kernel_gamma_tol):\n self.kernel_gamma = new_kernel_gamma\n \n # re-sample basis\n self.omega, self.u = sample_basis(self.D, self.m, self.kernel_gamma)\n \n # populate feature covariance from past samples\n self.set_batch_covariance(Z)\n \n print(\"Updated kernel gamma to %.3f (from %d samples)\" % (self.kernel_gamma, num_samples_window))\n \n def proposal(self, current):\n \"\"\"\n Returns a sample from the proposal centred at current, and its log-probability\n \"\"\"\n \n if self.schedule is None and (self.mu is None or self.C is None):\n raise ValueError(\"Kameleon has not seen data yet.\" \\\n \"Either call set_batch_covariance() or set update schedule\")\n \n L_R = self._construct_proposal_covariance(current)\n proposal = sample_gaussian(N=1, mu=current, Sigma=L_R, is_cholesky=True)[0]\n proposal_log_prob = log_gaussian_pdf(proposal, current, L_R, is_cholesky=True)\n \n # probability of proposing current when would be sitting at proposal\n L_R_inv = self._construct_proposal_covariance(proposal)\n proopsal_log_prob_inv = log_gaussian_pdf(current, proposal, L_R_inv, is_cholesky=True)\n \n return proposal, proposal_log_prob, proopsal_log_prob_inv\n \n def _construct_proposal_covariance(self, y):\n \"\"\"\n Helper method to compute Cholesky factor of the Gaussian Kameleon-lite proposal centred at y.\n \"\"\"\n # compute gradient projection\n grad_phi_y = feature_map_grad_single(y, self.omega, self.u)\n \n # construct covariance, adding exploration noise\n R = self.gamma2 * np.eye(self.D) + self.step_size * np.dot(grad_phi_y, (self.m ** 2) * np.dot(self.C, grad_phi_y.T))\n L_R = np.linalg.cholesky(R)\n \n return L_R\n \n","sub_path":"old/KameleonRKSGaussian.py","file_name":"KameleonRKSGaussian.py","file_ext":"py","file_size_in_byte":10478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"76969331","text":"from django.test import TestCase\nfrom stack_it.models import Page\nfrom django.conf import settings\n\nclass TextPageContentModelTest(TestCase):\n def test_page_existence(self):\n page = Page.objects.create(title=\"Hello\", template_path=\"base.html\")\n response = self.client.get(page.ref_full_path)\n self.assertEqual(\n response.status_code,\n 200,\n {\"response\": response.content, \"url\": page.ref_full_path},\n )\n\n def test_page_redirect(self):\n page = Page.objects.create(title=\"Hello\", template_path=\"base.html\")\n url = page.ref_full_path\n page.title = \"New Title\"\n page.save()\n response = self.client.get(url, follow=True)\n self.assertEqual(\n response.status_code,\n 200,\n {\"response\": response.content, \"url\": page.ref_full_path},\n )\n self.assertIn(\n (page.ref_full_path, 301), response.redirect_chain, response.redirect_chain\n )\n\n def test_404(self):\n page = Page.objects.create(title=\"Hello\", template_path=\"base.html\")\n response = self.client.get(\"page.ref_full_path\")\n self.assertEqual(response.status_code, 404)\n\n","sub_path":"tests/test_urls/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456042700","text":"from turtle import *\n\ndef drawShape(numSides, sideLength, colour = 'black', \\\n doFill = False, startX = -1, startY = -1):\n if (doFill):\n begin_fill()\n\n if startX >= 0 and startY >= 0:\n goto(startX, startY)\n\n color(colour, \"\")\n pendown()\n \n turningAngle = 360.0 / numSides\n for n in range(numSides):\n forward(sideLength)\n right(turningAngle)\n\n penup()\n\n if (doFill):\n end_fill()\n\n##numSidesAnswer = input(\"How many sides on the shape? \")\n##numSides = 6 if numSidesAnswer == \"\" else int(numSidesAnswer)\n\nsideLengthAnswer = input(\"How long are the sides? \")\nsideLength = 50 if sideLengthAnswer == \"\" else int(sideLengthAnswer)\n\nnumShapesAnswer = input(\"How many shapes? \")\nnumShapes = 30 if numShapesAnswer == \"\" else int(numShapesAnswer)\n\nangleStep = 360 // numShapes\ncolours = ['', '', '', 'red', 'orange', 'yellow', 'green', 'blue']\n\nhideturtle()\nspeed(0)\nfor n in range(7, 2, -1):\n for i in range(0, 360, angleStep):\n ##drawShape(numSides, sideLength)\n drawShape(n, sideLength * n / 3, colours[n])\n right(angleStep)\n","sub_path":"Programs/Turtle/patterns1.py","file_name":"patterns1.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"345849736","text":"# -*- coding: utf-8 -*-\nimport werkzeug\nimport base64\nimport requests\nimport logging\nimport odoo.http as http\nfrom odoo.http import request\n\n_logger = logging.getLogger(__name__)\n\n\nclass SupportTicketController(http.Controller):\n\n @http.route('/support/ticket/submit', type=\"http\",\n auth=\"user\", website=True)\n def support_submit_ticket(self, **kw):\n \"\"\"Let's public and registered user submit a support ticket\"\"\"\n person_name = http.request.env.user.name\n\n category_access = []\n for category_permission in http.request.env.user.groups_id:\n category_access.append(category_permission.id)\n\n ticket_categories = http.request.env['project.tags'].sudo().search([])\n\n return http.request.render(\n 'ecs_website_support.support_submit_ticket',\n {'categories': ticket_categories,\n 'person_name': person_name,\n 'email': http.request.env.user.email})\n\n @http.route('/support/ticket/process', type=\"http\", auth=\"public\",\n website=True, csrf=True)\n def support_process_ticket(self, **kwargs):\n \"\"\"Adds the support ticket to the database and sends out emails to\n everyone following the support ticket category\"\"\"\n values = {}\n for field_name, field_value in kwargs.items():\n values[field_name] = field_value\n\n if values['my_gold'] != \"256\":\n return \"Bot Detected\"\n\n create_dict = {}\n customer_id = request.env['res.partner'].sudo().search(\n [('name', '=', values['person_name'])])\n\n try:\n project_name = \\\n values['subject'].split(':')[0].split('-')[1].strip()\n project_id = request.env['project.project'].sudo().search(\n [('name', '=', project_name.upper())])\n if not project_id:\n project_id = request.env['project.project'].sudo().search(\n [('project_index', '!=', False)])\n except Exception:\n project_id = request.env['project.project'].sudo().search(\n [('project_index', '!=', False)])\n\n create_dict = {'name': values['subject'],\n 'partner_id': customer_id.id,\n 'email_from': values['email'],\n 'description': values['description'],\n 'tag_ids': [(6, 0, [values['category']])],\n 'project_id': project_id.id}\n new_task_id = request.env['project.task'].sudo().create(create_dict)\n\n if 'file' in values:\n for c_file in request.httprequest.files.getlist('file'):\n data = c_file.read()\n if c_file.filename:\n request.env['ir.attachment'].sudo().create({\n 'name': c_file.filename,\n 'datas': base64.b64encode(data),\n 'datas_fname': c_file.filename,\n 'res_model': 'project.task',\n 'res_id': new_task_id.id\n })\n return werkzeug.utils.redirect(\"/support/ticket/thanks\")\n\n @http.route('/support/ticket/thanks', type=\"http\",\n auth=\"public\", website=True)\n def support_ticket_thanks(self, **kw):\n \"\"\"Displays a thank you page after the user submits a ticket\"\"\"\n return http.request.render('website_crm.contactus_thanks', {})\n","sub_path":"ecs_website_support/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348280359","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport pdb \nfrom torch.autograd import Variable\n\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn.utils.weight_norm as wtnrm\n\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\n\nimport sys\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/MT_Transformer/')\nfrom MT_TransV1.Trans_Decoder import Decoder\nfrom MT_TransV1.Trans_Encoder import Encoder\n\n\n#import sys\n#sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/MT_Transformer/MT_Transformer')\n\n\n#--------------------------------------------------------------------------\nclass Transformer(nn.Module):\n \"\"\"An encoder-decoder framework only includes attention. \"\"\"\n def __init__(self,args):\n super(Transformer, self).__init__() \n self.label_smoothing = args.label_smoothing\n self.encoder = Encoder(args=args,MT_flag=True)\n self.decoder = Decoder(args=args)\n \n #----------------------------------\n def forward(self, padded_Src_seq,padded_Tgt_seq):\n ###conv layers\n\n #General Transformer MT model\n encoder_padded_outputs, *_ = self.encoder(padded_Src_seq)\n \n\n #output_dict = self.decoder(padded_Tgt_seq, encoder_padded_outputs)\n pred, gold = self.decoder(padded_Tgt_seq, encoder_padded_outputs)\n \n\n #cost, CER = cal_performance(pred, gold,self.decoder.IGNORE_ID,normalize_length=False,smoothing=self.decoder.label_smoothing)\n #breakpoint()\n # output_dict={'cost':cost, 'CER':CER, 'smp_pred':pred,'smp_gold':gold} \n #output_dict = {'cost':cost, 'dec_slf_attn_list':dec_slf_attn_list, 'dec_enc_attn_list':dec_enc_attn_list, 'Char_cer':CER, 'Word_cer':CER}\n\n return pred, gold\n #=============================================================================================================\n #=============================================================================================================\n #==============================================================================\n def predict(self, Src_tokens,args):\n #print(\"went to the decoder loop\")\n \n\n with torch.no_grad():\n #### read feature matrices \n \n smp_Src_labels = torch.LongTensor(Src_tokens)\n smp_Src_labels = smp_Src_labels.cuda() if args.gpu else smp_Src_labels\n smp_Src_labels = smp_Src_labels.unsqueeze(0)\n \n\n #General Transformer ASR model\n encoder_padded_outputs, *_ = self.encoder(smp_Src_labels)\n nbest_hyps,scoring_list = self.decoder.recognize_batch_beam_autoreg_LM_multi_hyp(encoder_padded_outputs,args.beam,args.Am_weight,args.gamma,args.LM_model,args.len_pen,args)\n #===================================================================================\n beam_len = nbest_hyps.size(0)\n hyp = {'score': 0.0, 'yseq': None,'state': None, 'alpha_i_list':None, 'Text_seq':None}\n\n #===============================================\n Output_dict=[]\n for I in range(beam_len): \n\n new_hyp={}\n new_hyp['yseq'] = nbest_hyps[I]\n new_hyp['score'] = scoring_list[I].sum()\n #new_hyp['Text_seq'] = self.decoder.get_charecters_for_sequences(nbest_hyps[I].unsqueeze(0))\n new_hyp['Text_seq'] = self.decoder.get_charecters_for_sequences(nbest_hyps[I].unsqueeze(0),self.decoder.Tgt_model,self.decoder.pad_index,self.decoder.eos_id,self.decoder.word_unk)\n\n new_hyp['state'] = hyp['state']\n new_hyp['alpha_i_list'] = hyp['alpha_i_list']\n\n Output_dict.append(new_hyp)\n return Output_dict\n #----------------------------------------------------------------\n\n\n\n#=============================================================================================================\n#=============================================================================================================\n#-------------------------------------------------------------------------------------------------------------\n\n#=============================================================================================================\n#-------------------------------------------------------------------------------------------------------------\nclass TransformerOptimizer(object):\n \"\"\"A simple wrapper class for learning rate scheduling\"\"\"\n\n def __init__(self, optimizer, k, d_model, step_num=0, warmup_steps=4000, warm_restart=200000):\n self.optimizer = optimizer\n \n self.optimizer_org = optimizer\n self.k = k\n \n #present_lr=[param_group['lr'] for param_group in self.optimizer.param_groups]\n self.init_lr = d_model ** (-0.5)\n self.warmup_steps = warmup_steps\n self.step_num = step_num\n\n self.reduction_factor=1\n self.warm_restart = warm_restart\n\n\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self):\n self._update_lr()\n self.optimizer.step()\n self.warm_restartfn()\n\n\n def _update_lr(self):\n\n\n self.step_num += 1\n lr = self.k * self.init_lr * min(self.step_num ** (-0.5), self.step_num * (self.warmup_steps ** (-1.5)))\n #print(lr,self.step_num ** (-0.5),self.step_num * self.warmup_steps ** (-1.5),self.reduction_factor)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n\n\n\n def load_state_dict(self, state_dict):\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def set_k(self, k):\n self.k = k\n\n def set_step_num(self, step_num):\n self.step_num=step_num\n\n def reduce_learning_rate(self, k):\n self.reduction_factor = self.reduction_factor*k\n #print(self.reduction_factor)\n \n def print_lr(self):\n present_lr=[param_group['lr'] for param_group in self.optimizer.param_groups]\n return present_lr[0]\n\n def warm_restartfn(self):\n if (self.step_num%self.warm_restart==0):\n self.optimizer = self.optimizer_org\n self.step_num = self.warm_restart\n\n#=============================================================================================================\n\n#---------------------------------------------------------------------------------------------------------------\n#===============================================================================================================\n#===============================================================================================================\n#===============================================================================================================\n#===============================================================================================================\n#===============================================================================================================\n\n","sub_path":"MT_TransV1/TRANSFORMER_MT_V1_parllel.py","file_name":"TRANSFORMER_MT_V1_parllel.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"646843599","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\ndata = [] #create empty list\nwrite_file = []\nwith open('data/driving_log.csv', 'rU') as f:\n #reader = csv.reader(f, ' ', quoting=csv.QUOTE_NONNUMERIC)\n reader = csv.reader(f)\n first_line=True\n count=0\n for line in reader:\n if(first_line):\n first_line=False\n else:\n if float(line[3])==0 and count <4000:\n count+=1\n else:\n data.append(float(line[3]))\n write_file.append(line)\n\nwith open(\"data/driving_log_truncated.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(write_file)\n\nprint(\"Ignored these zeros:\",count)\n# generate the histogram\nhist, bin_edges=np.histogram(data, bins=50, range=[-1, 1])\n\n\n# generate histogram figure\nplt.hist(data, bin_edges)\n#plt.savefig('chart_file', format=\"pdf\")\n#plt.show()\n\nfrom keras.utils import plot_model\n\nfrom keras.models import load_model\nimport h5py\n\n#model = load_model(\"model.h5\")\n#plot_model(model, to_file='model.png')\n\n\nimport cv2\nname = 'straight.jpg'\ncenter_image = cv2.imread(name)\ncenter_image = cv2.flip(center_image,1)\ncv2.imwrite('flip.jpg', center_image)\n\n","sub_path":"analyize_csv.py","file_name":"analyize_csv.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"206979142","text":"class CspProblem(object):\n def __init__(self, variables, domains, constraints):\n self.variables = variables\n self.domains = domains\n self.constraints = constraints\n\n self.var_contraints = dict([(v, [constraint for constraint in constraints if v in constraint[0]])\n for v in variables])\n\n self.var_degrees = dict([(v, len(self.var_contraints[v]))\n for v in variables])\n\n\n\n\n\n","sub_path":"timetabling/timetabling/csp_solver.py","file_name":"csp_solver.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"564499908","text":"import speech_recognition as sr\nimport LSMachineLearning1 as ls\nfrom gtts import gTTS\nimport time\nimport os\n\ndef let_mini_speak(answer1):\n tts = gTTS(text=answer1, lang='en')\n print(\"Machine file generated\")\n tts.save(\"pcVoice.mp3\")\n # to start the file from python\n os.system(\"start pcVoice.mp3\")\n print(\"Miny Said: \" + answer1)\n return answer1\n\ndef decision_on_good_to_play_football(answer1):\n return answer1\n\n\nquery = \"can I play Football\"\n\nfor x in range(1,50):\n #get audio from the microphone\n time.sleep(5)\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Please Speak:\")\n audio = r.listen(source)\n\n try:\n # Using of Dictionary this time\n questionRepository = {'who are you': \"I am Machine\", 'who made you': \"You made me sir. By the way ,thanks for\"\n + \"making me.\", 'can you hack': \"Sorry sir, I am not designed to hack\",'what is my wife name':\"Your wife name is manashi\",\n 'thanks': \"Thanks i am exiting\",'can I play Football':\"Good to know that\",'can I play football':\"Good to know that\",\n 'Sunny':\"Sunny\",'Sani':\"Sunny\",'OverCast':\"OverCast\",'overcast':\"OverCast\",'Rain':\"Rain\",'One':\"Sunny\",'1':\"Sunny\",\n 'Hot':\"Hot\",'hot':\"Hot\",'Cool':\"Cool\",'Mild':\"Mild\",'One':\"Hot\",'1':\"Hot\",\n 'High':\"High\",'high':\"High\",'Hi':\"High\",'hi':\"High\",'Normal':\"Normal\",\n 'Weak':\"Weak\",'pic':\"Weak\",'week':\"Weak\",'weak':\"Weak\",'Strong':\"Strong\",'strong':\"Strong\",'One':\"Weak\",'1':\"Weak\",\n 'What are different types of document process in LS':\"LS processes Income statements, Bank statements, Dealer forms\"}\n\n print(\"You said : \" + r.recognize_google(audio))\n if query == r.recognize_google(audio):\n answer = questionRepository.get(r.recognize_google(audio))\n let_mini_speak(answer)\n audioParam1 = ''\n \n let_mini_speak(\"What is the Outlook outside, is it Sunny, Rain or OverCast\")\n r1 = sr.Recognizer()\n with sr.Microphone() as source1:\n print(\"Please Speak (Sunny/Rain/OverCast):\")\n audioParam1 = r1.listen(source1)\n answer1 = questionRepository.get(r1.recognize_google(audioParam1))\n answer1 = let_mini_speak(answer1)\n time.sleep(5)\n \n let_mini_speak(\"What is the Temperature outside, is it Hot, Cool or Mild\")\n r2 = sr.Recognizer()\n with sr.Microphone() as source2: \n print(\"Please Speak (Hot/ Cool/ Mild):\")\n audioParam2 = r2.listen(source2)\n answer2 = questionRepository.get(r2.recognize_google(audioParam2))\n let_mini_speak(answer2)\n time.sleep(5)\n \n let_mini_speak(\"How is the Wind outside, is it Weak or Strong\")\n r3 = sr.Recognizer()\n with sr.Microphone() as source3: \n print(\"Please Speak (Weak / Strong):\")\n audioParam3 = r3.listen(source3)\n answer3 = questionRepository.get(r3.recognize_google(audioParam3))\n print(\"Miny Said: \" + r3.recognize_google(audioParam3))\n let_mini_speak(answer3)\n time.sleep(5)\n \n result = ls.test_cartDecisionTree2(answer1,answer3)\n #result = ls.test_cartDecisionTree2('Sunny','Weak')\n print(result)\n for lbl in result:\n print(result[lbl])\n if 'No' == result[lbl]:\n print('It will not be a suitable weather for you to play')\n let_mini_speak('It will not be a suitable weather for you to play')\n elif 'Yes' == result[lbl]:\n print('Great enjoy and play football')\n let_mini_speak('Great enjoy and play football')\n else:\n print('Not able to predict, sorry')\n \n elif r.recognize_google(audio) in questionRepository:\n answer = questionRepository.get(r.recognize_google(audio))\n let_mini_speak(answer)\n else:\n answer = \"Sorry I don't have answer for this dictionary. You can ask something else.\"\n let_mini_speak(answer)\n \n # IF YOU WILL SAY THANKS MACHINE WILL STOP\n\n if r.recognize_google(audio) == 'thanks':\n print(\"You said : \" + r.recognize_google(audio))\n answer = 'Nice to meet you Bye'\n tts = gTTS(text=answer, lang='en')\n print(\"Machine file generated\")\n tts.save(\"pcVoice.mp3\")\n # to start the file from python\n os.system(\"start pcVoice.mp3\")\n print(\"Miny Said: \" + answer)\n break\n except sr.UnknownValueError:\n #print('Could not understand audio')\n print('')\n except sr.UnknownValueError:\n print('error')\n except sr.RequestError as e:\n print('Could not request results; {0}'.format(e))\n\n \n\n \n\n\n\n\n","sub_path":"ChatBot_LS/LiveSpreadChatBot.py","file_name":"LiveSpreadChatBot.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"339060685","text":"#!/usr/bin/env python3\nimport sys\nimport socket\n\ndef get_accessible_ports(address, min_port, max_port):\n\tfound_ports = []\n\t\n\t# write code here\n\t\n\tfor p in range(min_port, max_port+1):\n\t\ttry:\n\t\t\ts = socket.socket()\n\t\t\ts.connect((address, p))\n\t\t\tmes = s.recv(1024)\n\t\t\tprint(mes)\n\t\t\tfound_ports.append(p)\n\t\texcept Exception as e:\n\t\t\tpass\n\t\t\n\treturn found_ports\n\n\ndef main(argv):\n\taddress = sys.argv[1]\n\tmin_port = int(sys.argv[2])\n\tmax_port = int(sys.argv[3])\n\tports = get_accessible_ports(address, min_port, max_port)\n\tfor p in ports:\n\t\tprint(p)\n\n# This makes sure the main function is not called immediatedly\n# when TMC imports this module\nif __name__ == \"__main__\": \n\tif len(sys.argv) != 4:\n\t\tprint('usage: python %s address min_port max_port' % sys.argv[0])\n\telse:\n\t\tmain(sys.argv)\n","sub_path":"part1-01.portscanner/src/portscanner.py","file_name":"portscanner.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624185255","text":"\"\"\"\nklifs.py\nDefines the Klifs class\n\n\"\"\"\n\nclass Klifs(object):\n\n def __init__(self, pdb, chain, kinase_id, name, struct_id, ligand, pocket_seq, numbering):\n \"\"\"This script defines a Klifs class of which any kinase can be represented as an object with the\n following parameters:\n\n Parameters\n ----------\n pdb: str\n The PDB code of the structure.\n chain: str\n The chain index of the structure.\n kinase_id: int\n The standard ID of a kinase enforced by the KLIFS database.\n name: str\n The standard name of the kinase used by the KLIFS database.\n struct_id: int\n The ID associated with a specific chain in the pdb structure of a kinase.\n ligand: str\n The ligand name as it appears in the pdb file.\n pocket_seq: str\n The 85 discontinuous residues (from multi-sequence alignment) that define the binding pocket of a kinase.\n numbering: list of int\n The residue indices of the 85 pocket residues specific to the structure.\n\n \"\"\"\n\n self.pdb = pdb\n self.chain = chain\n self.kinase_id = kinase_id\n self.name = name\n self.struct_id = struct_id\n self.ligand = ligand\n self.pocket_seq = pocket_seq\n self.numbering = numbering\n","sub_path":"kinomodel/features/klifs.py","file_name":"klifs.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"54918969","text":"\nfrom collections import namedtuple\nimport yaml\nimport json\nimport itertools\nimport datetime\n\n\ndef sequence():\n '''\n Sequence numbers are associated with a stream. They are used to determine\n the order of the messages sent in a stream.\n '''\n return itertools.count(1)\n\n\ndef now():\n '''\n Returns a string formatted timestamp for now in UTC time.\n '''\n return datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n\ndef serialize(message):\n '''\n Serializes a message to YAML format.\n '''\n return [message.__class__.__name__.encode(), yaml.dump(dict(message._asdict())).encode()]\n\n\ndef json_serialize(message):\n '''\n Serializes a message to JSON format.\n '''\n return json.dumps([message.__class__.__name__, dict(message._asdict())]).encode()\n\n\ndef json_deserialize(message):\n '''\n Deserializes a message from JSON format.\n '''\n data = json.loads(message)\n if isinstance(data, list):\n msg_type = data[0]\n msg_data = data[1]\n if msg_type in msg_types:\n try:\n return msg_types[msg_type](**msg_data)\n except BaseException as e:\n print(e)\n raise\n return None\n\n\nHello = namedtuple('Hello', ['seq_num', 'timestamp'])\nFSMState = namedtuple('FSMState', ['seq_num', 'timestamp', 'state'])\nDiff = namedtuple('Diff', ['seq_num', 'timestamp', 'diff'])\nValidationResult = namedtuple('ValidationResult', ['seq_num', 'timestamp', 'host', 'result'])\nValidationTask = namedtuple(\n 'ValidationTask', ['seq_num', 'timestamp', 'host', 'task_action', 'result'])\nStdout = namedtuple('Stdout', ['seq_num', 'timestamp', 'stdout'])\n\n\nDesiredState = namedtuple('DesiredState', ['seq_num', 'timestamp', 'id', 'client_id', 'desired_state'])\nActualState = namedtuple('ActualState', ['id', 'client_id', 'actual_state'])\nPoll = namedtuple('Poll', [])\nComplete = namedtuple('Complete', [])\nDifference = namedtuple('Difference', [])\nNoDifference = namedtuple('NoDifference', [])\nSuccess = namedtuple('Success', [])\nFailure = namedtuple('Failure', [])\n\nInventory = namedtuple('Inventory', ['seq_num', 'timestamp', 'inventory'])\nRules = namedtuple('Rules', ['seq_num', 'timestamp', 'rules'])\n\nControl = namedtuple('Control', ['seq_num', 'timestamp', 'id'])\nSystem = namedtuple('System', ['id', 'control_id'])\nMonitor = namedtuple('Monitor', ['id', 'system_id', 'control_id'])\n\nDesiredSystemState = namedtuple(\n 'DesiredSystemState', ['id', 'client_id', 'desired_state'])\n\nShutdown = namedtuple('Shutdown', [])\n\nServiceInstance = namedtuple('ServiceInstance', ['id',\n 'service_id',\n 'created_at',\n 'deleted_at',\n 'name',\n 'config',\n 'inventory',\n 'inventory_id',\n 'collection',\n 'service_name',\n 'schema_name',\n 'rules_name',\n 'status'])\n\nmsg_types = {x.__name__: x for x in [\n DesiredState, ActualState, Hello, Control, ServiceInstance]}\n","sub_path":"desired_state/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39853243","text":"import logging\nimport asyncio\nimport pathlib\nimport argparse\nimport copy\nimport sys\nimport os\nimport re\nimport shutil\nimport base64\nfrom minio import Minio\nfrom pathlib import Path\n\nimport aiodocker\nimport aiohttp\nimport yaml\nimport yaml.scanner\nfrom tenacity import retry, stop_after_attempt, wait_exponential\n\nfrom common.config import config, static\nfrom common.docker_helpers import (create_secret, get_secret, delete_secret, get_network, connect_to_aiodocker,\n docker_context, stream_docker_log, logger as docker_logger, disconnect_from_network)\n\nlogging.basicConfig(level=logging.DEBUG, format=\"{asctime} - {name} - {levelname}:{message}\", style='{')\n\nlogger = logging.getLogger(\"BOOTLOADER\")\nstatic.set_local_hostname(\"local_bootloader\")\n\nCOMPOSE_BASE = {\"version\": \"3.5\",\n \"services\": {},\n \"networks\": {\"walkoff_default\": {\"driver\": \"overlay\", \"name\": \"walkoff_default\", \"attachable\": True}},\n \"secrets\": {\"encryption_key\": {\"external\": True}}}\n\nAPP_NAME_PREFIX = \"walkoff_\"\n\nDOCKER_HOST_IP = os.getenv(\"DOCKER_HOST_IP\")\np = Path('./apps').glob('**/*')\n\n\ndef bannerize(text, fill='='):\n columns = shutil.get_terminal_size().columns\n border = \"\".center(columns, fill)\n banner = f\" {text} \".center(columns, fill)\n print(f\"\\n\\n{border}\\n{banner}\\n{border}\\n\")\n\n\ndef parse_yaml(path):\n with open(path) as fp:\n try:\n return yaml.safe_load(fp)\n except yaml.YAMLError as e:\n logger.info(f\"Invalid yaml: {path}. {e}\")\n except yaml.scanner.ScannerError as e:\n logger.info(f\"Invalid yaml: {path}. {e}\")\n\n\ndef dump_yaml(path, obj):\n with open(path, 'w') as fp:\n try:\n return yaml.dump(obj, fp)\n except yaml.YAMLError as e:\n logger.info(f\"Invalid yaml: {path}. {e}\")\n\n\ndef parse_env_file(path):\n with open(path) as fp:\n return [line.strip() for line in fp]\n\n\ndef compose_from_app(path: pathlib.Path, name):\n env_txt = path / \"env.txt\"\n env_file = {}\n if env_txt.exists():\n env_file = {\"environment\": parse_env_file(env_txt)}\n compose = copy.deepcopy(COMPOSE_BASE)\n build = {\"build\": {\"context\": str(path), \"dockerfile\": \"Dockerfile\"}}\n image = {\"image\": f\"{config.DOCKER_REGISTRY}/{APP_NAME_PREFIX}{name}:{path.name}\"}\n networks = {\"networks\": [\"walkoff_default\"]}\n deploy = {\"deploy\": {\"mode\": \"replicated\", \"replicas\": 0, \"restart_policy\": {\"condition\": \"none\"}}}\n config_mount = {\"configs\": [\"common_env.yml\"]}\n secret_mount = {\"secrets\": [\"walkoff_encryption_key\"]}\n shared_path = os.getcwd() + \"/data/shared\"\n final_mount = shared_path + \":/app/shared\"\n volumes_mount = {\"volumes\": [final_mount]}\n compose[\"services\"] = {name: {**build, **image, ** networks, **deploy, **config_mount,\n **secret_mount, **volumes_mount, **env_file}}\n return compose\n\n\nasync def log_proc_output(proc, silent=False):\n stdout, stderr = await proc.communicate()\n if not silent:\n if proc.returncode:\n for line in stderr.decode().split('\\n'):\n if line != '':\n logger.error(line)\n else:\n for line in stdout.decode().split('\\n'):\n if line != '':\n logger.info(line)\n\n\ndef merge_composes(base, others):\n if not isinstance(base, dict):\n base = parse_yaml(base)\n if base.get(\"services\") is None:\n base[\"services\"] = {}\n if not isinstance(others[0], dict):\n others = [parse_yaml(o) for o in others]\n for o in others:\n base[\"services\"].update(o.get(\"services\", {}))\n return base\n\n\ndef generate_app_composes():\n # TODO: Probably find a way to incorporate the app repo in here as well to eliminate mounting files to umpire\n composes = []\n for app in pathlib.Path(config.APPS_PATH).iterdir():\n # grabs only directories and ignores all __* directories i.e. __pycache__\n if app.is_dir() and not re.fullmatch(r\"(__.*)\", app.name):\n for version in app.iterdir():\n # grabs all valid version directories of form \"v0.12.3.45...\"\n if re.fullmatch(r\"((\\d\\.?)+)\", version.name):\n composes.append(compose_from_app(version, f\"app_{app.name}\"))\n logger.info(f\"Generated compose for {app.name} version: {version.name}\")\n return composes\n\n\nasync def create_encryption_key(docker_client, key_name):\n try:\n await get_secret(docker_client, key_name)\n except aiodocker.exceptions.DockerError:\n logger.info(f\"Creating secret {key_name}...\")\n await create_secret(docker_client, key_name, base64.urlsafe_b64encode(os.urandom(32)))\n else:\n logger.info(f\"Skipping secret {key_name} creation, it already exists.\")\n\n\nasync def delete_encryption_key(docker_client, key_name):\n try:\n await delete_secret(docker_client, key_name)\n except aiodocker.exceptions.DockerError:\n logger.info(f\"Skipping secret {key_name} deletion, it doesn't exist.\")\n\n\nasync def check_for_network(docker_client):\n try:\n await get_network(docker_client, \"walkoff_default\")\n return True\n except aiodocker.exceptions.DockerError:\n return False\n\n\nasync def delete_dir_contents(path):\n for root, dirs, files in os.walk(path):\n for f in files:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n shutil.rmtree(os.path.join(root, d))\n\n\n@retry(stop=stop_after_attempt(10), wait=wait_exponential(min=1, max=10))\nasync def deploy_compose(compose):\n\n try:\n if not isinstance(compose, dict):\n compose = parse_yaml(compose)\n\n # Dump the compose to a temporary compose file and launch that. This is so we can amend the compose and update the\n # the stack without launching a new one\n dump_yaml(config.TMP_COMPOSE, compose)\n compose = config.TMP_COMPOSE\n\n proc = await asyncio.create_subprocess_exec(\"docker\", \"stack\", \"deploy\", \"--compose-file\", compose, \"walkoff\",\n stderr=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)\n await log_proc_output(proc)\n\n if proc.returncode:\n raise OSError\n else:\n return True\n\n except Exception as e:\n logger.info(\"Failed deploying, waiting to try again...\")\n raise e\n\n\nasync def build_image(docker_client, repo, dockerfile, context_dir, dockerignore):\n\n logger.info(f\"Building {repo} with {dockerfile} in {context_dir}\")\n\n with docker_context(Path(context_dir), dockerignore=dockerignore) as context:\n log_stream = await docker_client.images.build(fileobj=context, tag=repo, rm=True,\n forcerm=True, pull=True, stream=True,\n path_dockerfile=dockerfile,\n encoding=\"application/x-tar\")\n\n await stream_docker_log(log_stream)\n\n\nasync def push_image(docker_client, repo):\n\n logger.info(f\"Pushing image {repo}.\")\n\n try:\n await docker_client.images.push(repo)\n # await stream_docker_log(log_stream)\n logger.info(f\"Pushed image {repo}.\")\n return True\n except aiodocker.exceptions.DockerError as e:\n logger.exception(f\"Failed to push image: {e}\")\n return False\n\n\nclass Bootloader:\n \"\"\" A class to hold the logic for each of the possible commands. This follows the dispatch pattern we us in app_base\n for calling actions in apps. The pattern as applied to the CLI follows close to this example:\n https://chase-seibert.github.io/blog/2014/03/21/python-multilevel-argparse.html#\n \"\"\"\n\n def __init__(self, session=None, docker_client=None):\n self.session: aiohttp.ClientSession = session\n self.docker_client: aiodocker.Docker = docker_client\n with open(\".dockerignore\") as f:\n self.dockerignore = [line.strip() for line in f.readlines()]\n\n @staticmethod\n async def run():\n \"\"\" Landing pad to launch primary command and do whatever async init the bootloader needs. \"\"\"\n # TODO: fill in the helps, and further develop cli with the end user in mind\n commands = {\"up\", \"build\", \"down\"}\n parser = argparse.ArgumentParser()\n parser.add_argument(\"command\", choices=commands)\n parser.add_argument(\"args\", nargs=argparse.REMAINDER)\n\n logger.setLevel(\"DEBUG\")\n docker_logger.setLevel(\"DEBUG\")\n\n # Parse out the command\n args = parser.parse_args(sys.argv[1:2])\n\n async with aiohttp.ClientSession() as session, connect_to_aiodocker() as docker_client:\n bootloader = Bootloader(session, docker_client)\n\n if hasattr(bootloader, args.command):\n await getattr(bootloader, args.command)()\n else:\n logger.error(\"Invalid command.\")\n # TODO: Pipe this through the logger. print_help() accepts a file kwarg that we can use to do this\n parser.print_help()\n\n @retry(stop=stop_after_attempt(10), wait=wait_exponential(min=1, max=10))\n async def wait_for_registry(self):\n try:\n async with self.session.get(\"http://\" + DOCKER_HOST_IP) as resp:\n if resp.status == 200:\n return True\n else:\n raise ConnectionError\n except Exception as e:\n logger.info(\"Registry not available yet, waiting to try again...\")\n raise e\n\n @retry(stop=stop_after_attempt(10), wait=wait_exponential(min=1, max=10))\n async def wait_for_minio(self):\n try:\n async with self.session.get(f\"http://{config.MINIO}/minio/health/ready\") as resp:\n if resp.status == 200:\n return True\n else:\n raise ConnectionError\n except Exception as e:\n logger.info(\"Minio not available yet, waiting to try again...\")\n raise e\n\n async def push_to_minio(self):\n minio_client = Minio(config.MINIO, access_key='walkoff', secret_key='walkoff123', secure=False)\n flag = False\n try:\n buckets = minio_client.list_buckets()\n for bucket in buckets:\n if bucket.name == \"apps-bucket\":\n flag = True\n except:\n logger.info(\"Bucket doesn't exist.\")\n\n if flag is False:\n minio_client.make_bucket(\"apps-bucket\", location=\"us-east-1\")\n\n files = [x for x in p if x.is_file()]\n for file in files:\n path_to_file = str(file)\n with open(path_to_file, \"rb\") as file_data:\n file_stat = os.stat(path_to_file)\n minio_client.put_object(\"apps-bucket\", path_to_file, file_data, file_stat.st_size)\n\n async def up(self):\n\n # Create Walkoff encryption key\n return_code = await create_encryption_key(self.docker_client, \"walkoff_encryption_key\")\n if return_code:\n logger.exception(\"Could not create secret walkoff_encryption_key. Exiting.\")\n os._exit(return_code)\n\n # Create internal user key\n return_code2 = await create_encryption_key(self.docker_client, \"walkoff_internal_key\")\n if return_code2:\n logger.exception(\"Could not create secret walkoff_internal_key. Exiting.\")\n os._exit(return_code2)\n\n # Set up a subcommand parser\n parser = argparse.ArgumentParser(description=\"Bring the WALKOFF stack up and initialize it\")\n parser.add_argument(\"-b\", \"--build\", action=\"store_true\",\n help=\"Builds and pushes all WALKOFF components to local registry.\")\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\",\n help=\"Set log level to debug.\")\n\n # Parse out the command\n args = parser.parse_args(sys.argv[2:])\n\n if args.debug:\n logger.setLevel(\"DEBUG\")\n docker_logger.setLevel(\"DEBUG\")\n\n logger.info(\"Creating persistent directories for registry, postgres, portainer...\")\n os.makedirs(Path(\"data\") / \"registry\" / \"reg_data\", exist_ok=True)\n os.makedirs(Path(\"data\") / \"postgres\" / \"pg_data\", exist_ok=True)\n os.makedirs(Path(\"data\") / \"portainer\" / \"prt_data\", exist_ok=True)\n os.makedirs(Path(\"data\") / \"minio\" / \"min_data\", exist_ok=True)\n\n # Bring up the base compose with the registry\n logger.info(\"Deploying base services (registry, postgres, portainer, redis)...\")\n base_compose = parse_yaml(config.BASE_COMPOSE)\n\n await deploy_compose(base_compose)\n\n await self.wait_for_registry()\n\n # Merge the base, walkoff, and app composes\n app_composes = generate_app_composes()\n walkoff_compose = parse_yaml(config.WALKOFF_COMPOSE)\n merged_compose = merge_composes(walkoff_compose, app_composes)\n\n dump_yaml(config.TMP_COMPOSE, merged_compose)\n\n if args.build:\n walkoff_app_sdk = walkoff_compose[\"services\"][\"app_sdk\"]\n await build_image(self.docker_client, walkoff_app_sdk[\"image\"],\n walkoff_app_sdk[\"build\"][\"dockerfile\"],\n walkoff_app_sdk[\"build\"][\"context\"],\n self.dockerignore)\n await push_image(self.docker_client, walkoff_app_sdk[\"image\"])\n\n for service_name, service in walkoff_compose[\"services\"].items():\n if \"build\" in service:\n await build_image(self.docker_client, service[\"image\"],\n service[\"build\"][\"dockerfile\"],\n service[\"build\"][\"context\"],\n self.dockerignore)\n await push_image(self.docker_client, service[\"image\"])\n\n await self.wait_for_minio()\n await self.push_to_minio()\n\n logger.info(\"Deploying Walkoff stack...\")\n\n return_code = await deploy_compose(merged_compose)\n\n return return_code\n\n async def down(self):\n\n # Set up a subcommand parser\n parser = argparse.ArgumentParser(description=\"Remove the WALKOFF stack and optionally related artifacts.\")\n parser.add_argument(\"-k\", \"--key\", action=\"store_true\",\n help=\"Removes the walkoff_encryption_key secret.\")\n parser.add_argument(\"-r\", \"--registry\", action=\"store_true\",\n help=\"Clears the registry bind mount directory.\")\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\",\n help=\"Set log level to debug.\")\n\n # Parse out the command\n args = parser.parse_args(sys.argv[2:])\n\n if args.debug:\n logger.setLevel(\"DEBUG\")\n docker_logger.setLevel(\"DEBUG\")\n\n logger.info(\"Removing Walkoff stack and related artifacts...\")\n\n proc = await asyncio.create_subprocess_exec(\"docker\", \"stack\", \"rm\", \"walkoff\", stderr=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE)\n\n await log_proc_output(proc)\n\n # if not args.skipnetwork:\n # logger.info(\"Waiting for containers to exit and network to be removed...\")\n # await exponential_wait(check_for_network, [self.docker_client], \"Network walkoff_default still exists\")\n\n if args.key:\n resp = input(\"Deleting encryption key will render database unreadable, and therefore it will be cleared. \"\n \"This will delete all workflows, execution results, globals, users, roles, etc. \"\n \"Are you sure? (yes/no): \")\n while resp.lower() not in (\"yes\", \"no\"):\n resp = input(\"Please answer 'yes' or 'no': \")\n\n if resp.lower() == \"yes\":\n await delete_encryption_key(self.docker_client, \"walkoff_encryption_key\")\n await delete_encryption_key(self.docker_client, \"walkoff_internal_key\")\n await delete_dir_contents(\"data/postgres\")\n\n if args.registry:\n await delete_dir_contents(\"data/registry\")\n await delete_dir_contents(\"data/minio/min_data\")\n\n return proc.returncode\n\n\nif __name__ == \"__main__\":\n asyncio.run(Bootloader.run())\n","sub_path":"bootloader/bootloader.py","file_name":"bootloader.py","file_ext":"py","file_size_in_byte":16463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300988091","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# filename: triangles.py\n# function: yang hui san jiao\n\n\ndef triangles():\n L = [1]\n while True:\n yield L\n L.append(0)\n L = [L[x-1] + L[x] for x in range(len(L))]\n\nn = 0\nfor t in triangles():\n print(t)\n n += 1\n if n == 10:\n break","sub_path":"py3/triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"278273506","text":"N, B = int(input()), list(map(int, input().split()))\n\nA = [0 for _ in range(N)]\nA[0] = B[0]\n\nfor i in range(1, N):\n A[i] = B[i] * (i + 1) - sum(A)\n\nfor num in A:\n print(num, end=' ')","sub_path":"PS_vsCode/10539. 수빈이와 수열.py","file_name":"10539. 수빈이와 수열.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136097319","text":"from typing import List, Text\nfrom bson.objectid import ObjectId\n\nfrom actions.db.store import db\nfrom actions.utils.debug import is_debug_env\n\nADMIN_CONFID_OBJECT_ID = \"000000000000000000000001\"\n\n\ndef lazy_init():\n admin_config = db.admin_config.find_one({\"_id\": ObjectId(ADMIN_CONFID_OBJECT_ID)})\n if not admin_config:\n db.admin_config.insert_one(\n {\n \"_id\": ObjectId(ADMIN_CONFID_OBJECT_ID),\n \"super_admins\": [],\n \"admin_group_id\": \"\",\n },\n )\n\n\ndef is_super_admin(chat_id: Text):\n if is_debug_env():\n return True\n return chat_id in get_super_admins()\n\n\ndef is_admin_group(chat_id: Text):\n return get_admin_group_id() == chat_id\n\n\ndef get_super_admins():\n lazy_init()\n return db.admin_config.find_one({\"_id\": ObjectId(ADMIN_CONFID_OBJECT_ID)}).get(\n \"super_admins\"\n )\n\n\ndef get_admin_group_id():\n lazy_init()\n return db.admin_config.find_one({\"_id\": ObjectId(ADMIN_CONFID_OBJECT_ID)}).get(\n \"admin_group_id\"\n )\n\n\ndef set_admin_group_id(group_id):\n lazy_init()\n db.admin_config.update_one(\n {\"_id\": ObjectId(ADMIN_CONFID_OBJECT_ID)},\n {\"$set\": {\"admin_group_id\": group_id}},\n )\n","sub_path":"dataset/actions/utils/admin_config.py","file_name":"admin_config.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498542853","text":"# competition description\n# https://www.kaggle.com/c/talkingdata-adtracking-fraud-detection\n\n# I used both Amazon`s EC2 and Google Compute services (156 GB RAM and 24 CPU cores), as the datasets were large.\n# The training dataset consisted of 185 million rows with testing set consisting of around 51 million rows.\n# As the dataset`s categorical variables were already encoded, the feature engineering was based on\n# mathematical procedures of counting unique values and frequencies of the single variables and combination of variables.\n# So out of the initial 5 features around 20 additional features were generated. \n# As an algorithm I used a single LightGBM model. \n\n# With simply more computational power and a few additional lines of code the pretty solid increase (+0.11-0.13) \n# in the score could have been achieved.\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport time\nimport numpy as np\nimport lightgbm as lgb\nimport os\n\nmax_rounds = 1000\nearly_stop = 50\nopt_rounds = 680\n\noutput_file = 'lgbm_submit.csv'\n\npath = \"../input/\"\n\ndtypes = {\n \t'ip'\t\t:'uint32',\n \t'app'\t\t:'uint16',\n\t'device'\t:'uint16',\n\t'os'\t\t:'uint16',\n\t'channel'\t:'uint16',\n\t'is_attributed'\t:'uint8',\n\t'click_id'\t:'uint32',\n\t}\n\n\n\ntrain_cols = ['ip', 'app', 'device', 'os', 'channel', 'is_attributed', 'click_time']\ntrain_df = pd.read_csv(path + 'train.csv', skiprows=range(1,144903891), nrows=20000000, dtype=dtypes, usecols=train_cols)\n\n\nprint('Load test.csv...')\ntest_cols = ['ip', 'app', 'device', 'os', 'click_time', 'channel', 'click_id']\ntest_df = pd.read_csv(path + \"test.csv\", dtype=dtypes, usecols=test_cols)\n\ntest_supplement_cols = ['ip', 'app', 'device', 'os', 'click_time', 'channel', 'click_id']\ntest_supplement = pd.read_csv(path + \"test.csv\", dtype=dtypes, usecols=test_supplement_cols)\n\nimport gc\n\nlen_train = len(train_df)\n\nprint('Preprocessing...')\n\n\ntrain_df['click_time']= pd.to_datetime(train_df['click_time'])\ntrain_df['hour'] = train_df['click_time'].dt.hour.astype('uint8')\n\ntest_supplement['click_time']= pd.to_datetime(test_supplement['click_time'])\ntest_supplement['hour'] = test_supplement['click_time'].dt.hour.astype('uint8')\n\ntest['click_time']= pd.to_datetime(test['click_time'])\n\ndef add_counts(df, cols):\n arr_slice = df[cols].values\n unq, unqtags, counts = np.unique(np.ravel_multi_index(arr_slice.T, arr_slice.max(axis=0)+1),\n return_inverse=True, return_counts=True)\n df[\"_\".join(cols)+\"_count\"] = counts[unqtags]\n\n# def add_next_click(df):\n# D = 2**26\n# df['category'] = (df['ip'].astype(str) + \"_\" + df['app'].astype(str) + \"_\" + df['device'].astype(str) \\\n# + \"_\" + df['os'].astype(str)).apply(hash) % D\n# click_buffer = np.full(D, 3000000000, dtype=np.uint32)\n# df['epochtime'] = df['click_time'].astype(np.int64) // 10 ** 9\n# next_clicks = []\n# for category, time in zip(reversed(df['category'].values), reversed(df['epochtime'].values)):\n# next_clicks.append(click_buffer[category] - time)\n# click_buffer[category] = time\n# del click_buffer\n# df['next_click'] = list(reversed(next_clicks))\n# df.drop(['category', 'epochtime'], axis=1, inplace=True)\n\n\n\ndef preproc_data(df):\n \n #Extrace date info\n \n # df['day'] = df['click_time'].dt.day.astype('uint8')\n # df['wday'] = df['click_time'].dt.dayofweek.astype('uint8')\n # gc.collect()\n df['click_time']= pd.to_datetime(df['click_time'])\n df['hour'] = df['click_time'].dt.hour.astype('uint8')\n #Groups\n # df['in_test_hh'] = ( 3\n\t # \t\t - 2 * df['hour'].isin( most_freq_hours_in_test_data )\n\t\t\t #- 1 * df['hour'].isin( least_freq_hours_in_test_data )).astype('uint8')\n\n # print('Adding next_click...')\n # add_next_click(df)\n\n print('Grouping...')\n \n add_counts(df, ['ip'])\n add_counts(df, ['os', 'device'])\n add_counts(df, ['os', 'app', 'channel'])\n\n add_counts(df, ['ip', 'device'])\n add_counts(df, ['app', 'channel'])\n\n # add_counts(df, ['ip', 'in_test_hh'])\n add_counts(df, ['ip', 'hour'])\n add_counts(df, ['ip', 'os', 'hour'])\n add_counts(df, ['ip', 'app', 'hour'])\n add_counts(df, ['ip', 'device', 'hour'])\n add_counts(df, ['ip', 'app', 'os'])\n add_counts(df, ['hour', 'app'])\n \n\n df.drop(['ip', 'click_time'], axis=1, inplace=True )\n gc.collect()\n\n print( df.info() )\n\n return df\n \nprint('doing do_countunique()...') \n \ndef do_countuniq( df, group_cols, counted, agg_type='uint32', show_max=False, show_agg=True ):\n agg_name= '{}_by_{}_countuniq'.format(('_'.join(group_cols)),(counted)) \n if show_agg:\n print( \"\\nCounting unqiue \", counted, \" by \", group_cols , '... and saved in', agg_name )\n gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})\n df = df.merge(gp, on=group_cols, how='left')\n del gp\n if show_max:\n print( agg_name + \" max value = \", df[agg_name].max() )\n df[agg_name] = df[agg_name].astype(agg_type)\n # predictors.append(agg_name)\n# print('predictors',predictors)\n gc.collect()\n return( df ) \n \ndef do_next_Click( df,agg_suffix='nextClick', agg_type='float32'):\n \n print(f\">> \\nExtracting {agg_suffix} time calculation features...\\n\")\n \n GROUP_BY_NEXT_CLICKS = [\n \n # V1\n # {'groupby': ['ip']},\n # {'groupby': ['ip', 'app']},\n # {'groupby': ['ip', 'channel']},\n # {'groupby': ['ip', 'os']},\n \n # V3\n {'groupby': ['ip', 'app', 'device', 'os', 'channel']},\n {'groupby': ['ip', 'os', 'device']},\n {'groupby': ['ip', 'os', 'device', 'app']}\n ]\n\n # Calculate the time to next click for each group\n for spec in GROUP_BY_NEXT_CLICKS:\n \n # Name of new feature\n new_feature = '{}_{}'.format('_'.join(spec['groupby']),agg_suffix) \n \n # Unique list of features to select\n all_features = spec['groupby'] + ['click_time']\n\n # Run calculation\n print(f\">> Grouping by {spec['groupby']}, and saving time to {agg_suffix} in: {new_feature}\")\n df[new_feature] = (df[all_features].groupby(spec[\n 'groupby']).click_time.shift(-1) - df.click_time).dt.seconds.astype(agg_type)\n \n # predictors.append(new_feature)\n gc.collect()\n return (df)\n\ny = train_df.is_attributed.values\n\n# submit = pd.DataFrame()\n# submit['click_id'] = test_df['click_id']\n\ntrain_len = len(train_df)\ncommon_cols = ['ip', 'app', 'device', 'os', 'channel', 'hour', 'click_time']\ntrain_df = pd.concat([train_df[common_cols], test_supplement[common_cols]])\n\ntrain_df = do_countuniq( train_df, ['ip'], 'channel' )\ngc.collect()\ntrain_df = do_countuniq( train_df, ['ip', 'device', 'os'], 'app')\ngc.collect()\ntrain_df = do_countuniq( train_df, ['ip'], 'hour' )\ngc.collect()\ntrain_df = do_countuniq( train_df, ['ip'], 'app')\ngc.collect()\ntrain_df = do_countuniq( train_df, ['ip', 'app'], 'os')\ngc.collect()\ntrain_df = do_countuniq( train_df, ['ip'], 'device')\ngc.collect()\ntrain_df = do_countuniq( train_df, ['app'], 'channel')\ngc.collect()\n\ntrain_df = do_next_Click( train_df,agg_suffix='nextClick', agg_type='float32' )\ngc.collect()\n\ntrain_df = preproc_data(train_df)\n\ntest_supplement = train_df.iloc[train_len:]\ntrain_df = train_df.iloc[:train_len]\n\ngc.collect()\n\nmetrics = 'auc'\nlgb_params = {\n\t'boosting_type': 'gbdt',\n\t'objective': 'binary',\n\t'metric': metrics,\n\t'learning_rate': .1,\n\t'num_leaves': 7,\n\t'max_depth': 4,\n\t'min_child_samples': 100,\n\t'max_bin': 100,\n\t'subsample': 0.7,\n\t'subsample_freq': 1,\n\t'colsample_bytree': 0.7,\n\t'min_child_weight': 0,\n\t'min_split_gain': 0,\n\t'nthread': 4,\n\t'verbose': 1,\n\t'scale_pos_weight': 99.7\n\t#'scale_pos_weight': 400\n}\n\ntarget = 'is_attributed'\n\ninputs = list(set(train_df.columns) - set([target])) \ncat_vars = ['app', 'device', 'os', 'channel', 'hour']\n\ntrain_df, val_df = train_test_split(train_df, train_size=.95, shuffle=False)\ny_train, y_val = train_test_split(y, train_size=.95, shuffle=False)\n\ngc.collect()\n\nprint('Training...')\n\nnum_boost_round=max_rounds\nearly_stopping_rounds=early_stop\n\nxgtrain = lgb.Dataset(train_df[inputs].values, label=y_train,\n\t\t feature_name=inputs,\n\t\t categorical_feature=cat_vars)\ndel train_df\ngc.collect()\n\nxgvalid = lgb.Dataset(val_df[inputs].values, label=y_val,\n\t\t feature_name=inputs,\n\t\t categorical_feature=cat_vars)\ndel val_df\ngc.collect()\n\nevals_results = {}\n\nmodel = lgb.train(lgb_params,\n\t\t xgtrain,\n\t\t valid_sets= [xgvalid],\n\t\t valid_names=['valid'],\n\t\t evals_result=evals_results,\n\t\t num_boost_round=num_boost_round,\n\t\t early_stopping_rounds=early_stopping_rounds,\n\t\t verbose_eval=1,\n\t\t feval=None)\nn_estimators = model.best_iteration\n\nprint('\\nModel Info:')\nprint('n_estimators:', n_estimators)\nprint(metrics+':', evals_results['valid'][metrics][n_estimators-1])\n\ndel xgvalid\ndel xgtrain\ngc.collect()\n\n\nprint('Predicting...')\ntest_supplement['is_attributed'] = model.predict(test_supplement[inputs], num_iteration=n_estimators)\n\njoin_cols = ['ip', 'app', 'device', 'os', 'channel', 'click_time']\nall_cols = join_cols + ['is_attributed']\n\ntest = test_df.merge(test_supplement[all_cols], how='left', on=join_cols)\ntest = test.drop_duplicates(subset=['click_id'])\n\ntest[['click_id', 'is_attributed']].to_csv('sub.csv', index=False)\n","sub_path":"Kaggle Top_18%_place.py","file_name":"Kaggle Top_18%_place.py","file_ext":"py","file_size_in_byte":9309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221658138","text":"# 9. Create a dictionary that contains a number (between 1 and n) in the form(x,x*x).\n# Sample data (n=5)\n# Expected Output: {1:1,2:4,3:9,4:16,5:25}\n\ndic1={}\n\ndef dict_square(n):\n for i in range(1,n+1):\n dic1[i]=i*i\n return dic1\n\na=dict_square(5)\nprint (a)","sub_path":"Task3/Task3_9.py","file_name":"Task3_9.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"183950247","text":"from unittest import TestCase\n\nimport pytest\nfrom osbot_utils.utils.Dev import pprint\nfrom osbot_utils.utils.Misc import list_set, random_text\n\nfrom cdr_plugin_folder_to_folder.common_settings.Config import Config\nfrom cdr_plugin_folder_to_folder.utils.Elastic import Elastic\nfrom cdr_plugin_folder_to_folder.utils._to_refactor.For_OSBot_Elastic.Index_Pattern import Index_Pattern\nfrom cdr_plugin_folder_to_folder.utils._to_refactor.For_OSBot_Elastic.Kibana import Kibana\n\n#@pytest.mark.skip\nfrom cdr_plugin_folder_to_folder.utils.testing.Setup_Testing import Setup_Testing\n\n\nclass test_Index_Pattern(TestCase):\n\n def setUp(self) -> None:\n self.config = Config()\n Setup_Testing().configure_config(self.config)\n self.host = self.config.kibana_host\n self.port = self.config.kibana_port\n self.kibana = Kibana(host=self.host, port=self.port).setup()\n\n if self.kibana.enabled is False:\n pytest.skip('Elastic server not available')\n\n self.pattern_name = 'temp_index_pattern'\n self.index_pattern = Index_Pattern(kibana=self.kibana, pattern_name=self.pattern_name)\n\n def test_create_info_exists_delete(self):\n result = self.index_pattern.create()\n #pprint(result)\n assert result.get('attributes').get('title') == self.pattern_name\n assert self.index_pattern.exists() is True\n assert list_set(self.index_pattern.info()) == ['fields', 'id', 'namespaces', 'references', 'score', 'title',\n 'type', 'updated_at']\n assert Index_Pattern(kibana=self.kibana, pattern_name=random_text()).info() == {}\n assert self.index_pattern.delete() is True\n\n def test_create__time_field(self):\n time_field = random_text()\n self.index_pattern.create(time_field=time_field)\n assert self.index_pattern.info().get('timeFieldName') == time_field\n assert self.index_pattern.delete() is True\n\n def test_delete(self):\n assert self.index_pattern.delete() is False\n\n def test_id(self):\n assert self.index_pattern.id() is None\n\n def test_info(self):\n assert self.index_pattern.info() == {}","sub_path":"tests/integration/utils/_to_refactor/For_OSBot_Elastic/test_Index_Pattern.py","file_name":"test_Index_Pattern.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"566091915","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n A_pointer = headA\n B_pointer = headB\n lengthA = 0\n lengthB =0\n if headA == None or headB ==None:\n return None\n while(headA.next != None ):\n lengthA+=1\n headA = headA.next\n while(headB.next != None ):\n lengthB+=1\n headB = headB.next\n \n if lengthA>lengthB:\n difference = lengthA -lengthB\n for i in range(difference):\n A_pointer = A_pointer.next\n while (A_pointer !=B_pointer):\n A_pointer = A_pointer.next\n B_pointer = B_pointer.next\n return A_pointer\n elif lengthA 0.5\n mask_negative = target[:, :, :, :, 0] < 0.5\n\n target_positive = target[mask_positive]\n target_negative = target[mask_negative]\n number, _ = target_positive.shape\n predict_positive = predict[mask_positive]\n predict_negative = predict[mask_negative]\n\n '''置信度损失'''\n if number > 0:\n loss_c_p = self.binary_cross_entropy(self.sigmoid(predict_positive[:, 0]), target_positive[:, 0])\n else:\n loss_c_p = 0\n loss_c_n = self.binary_cross_entropy(self.sigmoid(predict_negative[:, 0]), target_negative[:, 0])\n loss_c = loss_c_n + loss_c_p\n\n '''边框回归'''\n if number > 0:\n loss_box1 = self.mse_loss(self.sigmoid(predict_positive[:, 1:3]), target_positive[:, 1:3])\n loss_box2 = self.mse_loss(predict_positive[:, 3:5], target_positive[:, 3:5])\n\n '''分类损失'''\n loss_class = self.ce_loss(predict_positive[:, 5:], target_positive[:, 5].long())\n else:\n loss_box1 = 0\n loss_box2 = 0\n loss_class = 0\n return loss_c + (loss_box1 + loss_box2) + loss_class\n\n\nif __name__ == '__main__':\n trainer = Trainer()\n trainer.train()\n","sub_path":"project3/P3Trainer.py","file_name":"P3Trainer.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"372191283","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom django import forms\nfrom django.db import models\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html_join\n\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.fields import RichTextField, StreamField\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, MultiFieldPanel\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailcore.url_routing import RouteResult\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailsnippets.models import register_snippet\nfrom wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel\nfrom wagtailgeowidget.edit_handlers import GeoPanel\nfrom wagtailgeowidget.helpers import geosgeometry_str_to_struct\n\n\n# A contact that someone should reach out to\n# Used in the step templates\n@register_snippet\nclass Contact(models.Model):\n name = models.CharField(max_length=255, help_text=\"The name of someone to contact. Ex: Sharon Smith\")\n location = models.CharField(max_length=255, blank=True, help_text=\"An address for where this person works\")\n email = models.EmailField(max_length=255, blank=True)\n phone_number = models.CharField(max_length=16, blank=True) # validators should be a list\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n panels = [\n FieldPanel('name'),\n FieldPanel('location'),\n FieldPanel('email'),\n FieldPanel('phone_number'),\n ImageChooserPanel('image'),\n ]\n\n def __str__(self):\n return self.name\n\n\n# Allows admins to create logic that directs users to specific steps\n# Used in the track templates\nclass ChoiceRulesBlock(blocks.CharBlock):\n def __init__(self, required=True, help_text=None, max_length=None, min_length=None,\n **kwargs):\n super(ChoiceRulesBlock, self).__init__(**kwargs)\n\n def field(self):\n field_kwargs = {}\n field_kwargs.update(self.field_options)\n return forms.CharField(**field_kwargs)\n\n def clean(self, value):\n return super(ChoiceRulesBlock, self).clean(value)\n\n def render_form(self, value, prefix='', errors=None):\n if value:\n choices = value.split(',')\n choicesHtml = ''\n for choice in choices:\n choicesHtml = choicesHtml + ''\n out = \"\"\"

When someone selects:

{}

Direct them to these pages:

\"\"\".format(choicesHtml)\n else:\n value = 'NEW'\n out = \"\"\"

When someone selects:

{}

Direct them to these pages:

\"\"\".format('')\n\n out = '
' + out + '
'\n\n return mark_safe(out + '')\n\n def value_from_form(self, value):\n arr = value.split(',')\n arr.sort()\n val = ','.join(arr)\n return super(ChoiceRulesBlock, self).value_from_form(val)\n\nclass TaskChoicesBlock(blocks.StreamBlock):\n question = blocks.CharBlock()\n choices = blocks.ListBlock(blocks.StructBlock([\n ('label', blocks.CharBlock(required=True)),\n ]))\n\n class Meta:\n label='Add choices to guide a client to services'\n template='roadmap/track/partials/_choice_form.html'\n\n# A Track -- contains a series of steps that a user can do to accomplish a specific goal\nclass Track(Page):\n page_body = RichTextField(blank=True, help_text=\"The main content of the page, above the list of steps and form\")\n form_submission_message = models.CharField(max_length=255, help_text=\"The text that is shown to the user after they make a form submissiom\", default=\"Based on your choices we suggest looking at the following:\")\n question = models.CharField(max_length=255, blank=True, help_text=\"The question for the form on the page (optional)\")\n choices = StreamField([\n ('label', blocks.CharBlock(required=True)),\n ], blank=True, null=True)\n\n has_strict_rules = models.BooleanField(default=False, help_text=\"If the rule definitions are strict it will ONLY display results that match the exact answers (instead of the union of the answers)\")\n\n rules = StreamField([\n ('rule', blocks.StructBlock([\n ('name', ChoiceRulesBlock()),\n ('pages', blocks.ListBlock(blocks.PageChooserBlock())),\n ('override', blocks.BooleanBlock(default=False, required=False))\n ]))], default=[], blank=True)\n\n default_steps = StreamField([\n ('page', blocks.PageChooserBlock())\n ], blank=True, default=[], help_text=\"The steps to show if someone submits a form with answers that are not covered by the rules\")\n\n content_panels = Page.content_panels + [\n FieldPanel('page_body', classname='full'),\n MultiFieldPanel([\n MultiFieldPanel([\n FieldPanel('question'),\n FieldPanel('form_submission_message'),\n StreamFieldPanel('choices')\n ]),\n FieldPanel('has_strict_rules'),\n StreamFieldPanel('rules'),\n StreamFieldPanel('default_steps'),\n ], heading=\"Options form for the page to help narrow down choices\", classname=\"collapsible\")\n ]\n\n template = 'roadmap/track/base.html'\n\n def steps(self):\n # Get list of all step pages that are descendants of this page\n events = Step.objects.live().descendant_of(self)\n return events\n\n # Directs people to the walk through or self service routes\n # Walk through path uses the choices model to filter steps to take\n def route(self, request, path_components):\n if len(request.GET):\n return RouteResult(self, kwargs={'template': 'roadmap/track/base.html'})\n else:\n return super(Track, self).route(request, path_components)\n\n def serve(self, request, template=''):\n if template == '':\n template = self.template\n #Kind of hacky but intercept request if it has 'submit-choice' in the slug\n #Serve the rules for the selected choices\n if len(request.GET):\n #Get selected checkbox values from params in request\n selected_choices = list(request.GET.values())\n\n #Sort the choices so we have them in the same order as the admin defined rules\n selected_choices.sort()\n\n pages = [] #list of pages that will be presented to the user\n default_pages = [] #default pages if there isn't a rule defined for the choices the user selected\n all_selected_choices = ','.join(selected_choices)\n\n #loop through each admin defined rule to see if we have a defined rule for the selected choices\n if self.has_strict_rules:\n #Find the one rule that matches the selected choices and only suggest those steps\n for rule in self.rules:\n if rule.value['override'] and re.search(rule.value['name'], all_selected_choices):\n pages = rule.value['pages']\n break\n if rule.value['name'] == all_selected_choices:\n pages = rule.value['pages']\n else:\n #Union all the pages that match with a rule\n for rule in self.rules:\n if rule.value['override'] and re.search(rule.value['name'], all_selected_choices):\n pages = rule.value['pages']\n break\n if rule.value['name'] in selected_choices:\n for page in rule.value['pages']:\n if page not in pages:\n pages.append(page)\n\n for page in self.default_steps:\n #if the user defines default pages in the admin then create a list of pages\n #otherwise the default default_pages list is all the steps in the track\n default_pages.append(Page.objects.get(id=page.value.id))\n\n if not pages:\n if not default_pages:\n default_pages = Step.objects.live().descendant_of(self)\n pages = default_pages\n\n request.path = '/'.join(request.path.split('/')[:3])\n\n return render(request, template, {\n 'steps': list(map((lambda page: page.specific), pages)),\n 'page': self,\n 'selected_choices': ','.join(map(str, selected_choices)),\n 'default_pages': default_pages,\n 'showMessage': True\n })\n #Otherwise just render the track page with the appropriate template\n return render(request, template, {\n 'page': self,\n 'steps': self.steps()\n })\n\nclass Step(Page):\n short_description = models.CharField(max_length=75, help_text=\"A short preview of what the page is about\")\n page_body = RichTextField(blank=True)\n checklist_instructions = RichTextField(blank=True, help_text=\"Specific instructions provided when a user prints the page. Keep it short!\")\n\n address = models.CharField(max_length=250, blank=True, null=True)\n location = models.CharField(max_length=250, blank=True, null=True)\n\n contact = models.ForeignKey(\n 'roadmap.Contact',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n content_panels = Page.content_panels + [\n FieldPanel('short_description', classname='full'),\n FieldPanel('page_body', classname='full'),\n FieldPanel('checklist_instructions', classname='full'),\n SnippetChooserPanel('contact'),\n MultiFieldPanel([\n FieldPanel('address'),\n GeoPanel('location', address_field='address'),\n ], _('Location of the step')),\n ]\n\n template = 'roadmap/step/base.html'\n\n @cached_property\n def point(self):\n return geosgeometry_str_to_struct(self.location)\n\n @property\n def lat(self):\n return self.point['y']\n\n @property\n def lng(self):\n return self.point['x']\n\n@hooks.register('insert_editor_js')\ndef editor_js():\n js_files = [\n 'js/choices_panel.js',\n ]\n js_includes = format_html_join('\\n', '',\n ((settings.STATIC_URL, filename) for filename in js_files)\n )\n return js_includes\n\n","sub_path":"roadmap/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239646034","text":"def sumatoriaa(A,B,C):\n\n n = len(A)\n acumulador = 0\n for i in range (n):\n h = A[i]*B[i]\n f = h+C[i]\n acumulador = acumulador+f\n resultado = acumulador +(n**2)\n return resultado\n \nA = [1,1,1,1]\nB = [2,2,2,2]\nC = [3,3,3,3]\nsumatoriaa(A,B,C)\n\n#Prueba\nt=(((1*2)+3)+((1*2)+3)+((1*2)+3)+((1*2)+3))+(4)**2\nt","sub_path":"py3_ciclos/sumatoria.py","file_name":"sumatoria.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"219914358","text":"\"\"\"Setup\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# figure out the version\nabout = {}\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, \"challengeutils\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(name='challengeutils',\n version=about[\"__version__\"],\n description='Challenge utility functions',\n url='https://github.com/Sage-Bionetworks/challengeutils',\n author='Thomas Yu',\n author_email='thomasyu888@gmail.com',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='Apache',\n packages=find_packages(),\n zip_safe=False,\n python_requires='>=3.5',\n scripts=['bin/runqueue.py'],\n entry_points={'console_scripts': ['challengeutils = challengeutils.__main__:main']},\n install_requires=['pandas>=1.0.0',\n 'synapseclient>=1.9.4'])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119751625","text":"#DruErin SI 206 Final Project\n\nimport requests\nimport json\nimport sqlite3\nimport APIKeys\nimport spotipy\nimport spotipy.oauth2 as oauth2\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport ssl\nimport urllib.request, urllib.parse, urllib.error\nimport datetime\n\n## API Keys\nclient_id_ = APIKeys.client_id\nclient_secret_ = APIKeys.client_secret\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = 'https://www.billboard.com/charts/hot-100'\nr = requests.get(url)\nsoup = BeautifulSoup(r.text, 'html.parser')\n\ndef generate_token():\n '''Generates token for Spotify API using the API keys'''\n credentials = oauth2.SpotifyClientCredentials(client_id= client_id_, client_secret= client_secret_)\n token = credentials.get_access_token()\n return token\ndef grab_spotify_data(user = \"spotifycharts\", playlist_id = \"37i9dQZEVXbLRQDuF5jeBp\"):\n '''Makes call to Spotify API and returns US Top 50 songs'''\n token = generate_token()\n spotify = spotipy.Spotify(auth = token)\n results = spotify.user_playlist_tracks(user, playlist_id) #large amount of data from top 50 list\n return results\ndef grab_billboard_data():\n '''Scrapes the Billboard Hot 100 Chart website using BeautifulSoup and returns list of top 100 songs'''\n outer_tag = soup.find(\"div\", class_ = \"chart-details \")\n song_tag_list = outer_tag.find_all(\"div\", class_ = \"chart-list-item\")\n song_list = []\n for song in song_tag_list:\n song_list.append(song[\"data-title\"])\n return song_list\n\ndate = str(datetime.datetime.now()).split()[0]\n\nspotify_results = grab_spotify_data()\n\n\nsong_list = grab_billboard_data()\n\n\nconn = sqlite3.connect(\"spotify.sqlite\")\ncur = conn.cursor()\n\ncur.execute(\"CREATE TABLE IF NOT EXISTS SpotifySongData (song_title TEXT UNIQUE, rating INTEGER, date STRING)\")\n\ncur.execute(\"CREATE TABLE IF NOT EXISTS BillboardData (rating_billboard INTEGER, song TEXT UNIQUE)\")\n\ncur.execute(\"CREATE TABLE IF NOT EXISTS TopSpotifyData (rating INTEGER, song_title TEXT UNIQUE)\")\n\ncount1 = 0\nfor song in spotify_results[\"items\"]:\n if count1 == 20:\n break\n song_title = song[\"track\"][\"name\"]\n rating = spotify_results[\"items\"].index(song) + 1\n sql = \"INSERT OR IGNORE INTO SpotifySongData (song_title, rating, date) VALUES (?,?,?)\"\n val = (song_title, rating, date)\n rows_modified = cur.execute(sql, val).rowcount\n if rows_modified != 0:\n count1 += 1\nconn.commit()\n\nfor song in spotify_results[\"items\"]:\n song_title = song[\"track\"][\"name\"]\n rating = spotify_results[\"items\"].index(song) + 1\n sql = \"INSERT OR IGNORE INTO TopSpotifyData (rating, song_title) VALUES (?,?)\"\n val = (rating, song_title)\n cur.execute(sql, val)\nconn.commit()\n\ncount = 0\nfor song in song_list:\n if count == 20:\n break\n rating_billboard = song_list.index(song) + 1\n sql = \"INSERT OR IGNORE INTO BillboardData (rating_billboard, song) VALUES (?,?)\"\n val = (rating_billboard, song) \n rows_modified = cur.execute(sql, val).rowcount\n if rows_modified != 0:\n count += 1\nconn.commit()\n","sub_path":"MusicGrab.py","file_name":"MusicGrab.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197101056","text":"#!/usr/bin/python3\n\nimport time\nimport socket\nimport datetime\nimport platform\n\nimport psutil\n\nprint('-----------------------------CPU信息-------------------------------------')\n# 查看cpu物理个数的信息\ncpu_count = psutil.cpu_count(logical=False)\n# CPU的使用率\ncpu = (str(psutil.cpu_percent(1))) + '%'\nprint(u\"物理CPU个数: %s CUP使用率: %s\" % (cpu_count, cpu))\n\nprint('-----------------------------内存信息-------------------------------------')\n# 查看内存信息,剩余内存.free 总共.total\n# round()函数方法为返回浮点数x的四舍五入值。\nfree = str(round(psutil.virtual_memory().free / (1024.0 * 1024.0 * 1024.0), 2))\ntotal = str(round(psutil.virtual_memory().total / (1024.0 * 1024.0 * 1024.0), 2))\nmemory = int(psutil.virtual_memory().total - psutil.virtual_memory().free) / float(psutil.virtual_memory().total)\nprint(u\"物理内存: %sG 剩余物理内存: %sG 物理内存使用率: %s%%\" % (total, free, int(memory * 100)))\n\nprint('-----------------------------磁盘信息---------------------------------------')\ndisk = psutil.disk_usage(\"/\")\ntotal = int(disk.total / (1024.0 * 1024.0 * 1024.0))\nfree = int(disk.free / (1024.0 * 1024.0 * 1024.0))\nprint(\"总容量: %sG 可用容量: %sG\" % (total, free))\n\nprint('-----------------------------网络信息---------------------------------------')\nnet = psutil.net_io_counters()\nbytes_sent = '{0:.2f}Mb'.format(net.bytes_recv / 1024 / 1024)\nbytes_rcvd = '{0:.2f}Mb'.format(net.bytes_sent / 1024 / 1024)\nprint(u\"网卡接收流量: %s 网卡发送流量: %s\" % (bytes_rcvd, bytes_sent))\n\nprint('-----------------------------系统信息-------------------------------------')\n# 当前时间\nnow_time = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))\nprint(\"当前时间: %s\" % now_time)\n\n# 系统启动时间\nprint(u\"系统启动时间: %s\" % datetime.datetime.fromtimestamp(psutil.boot_time()).strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n# 系统用户\nusers_count = len(psutil.users())\nusers_list = \",\".join([u.name for u in psutil.users()])\nprint(u\"登录用户: %s个,分别是 %s\" % (users_count, users_list))\n\nsys_system = platform.system() # 系统类型\nsys_platform = platform.platform() # 操作系统名称及版本号,'Windows-10-10.0.17134-SP0'\nsys_version = platform.version() # 操作系统版本号,'10.0.17134'\nsys_architecture = platform.architecture() # 操作系统的位数,('64bit', 'WindowsPE')\nsys_machine = platform.machine() # 硬件架构,'AMD64'\nsys_node = platform.node() # 网络名称,'TDM'\nsys_processor = platform.processor() # 处理器信息,'Intel64 Family 6 Model 158 Stepping 9, GenuineIntel'\nsys_uname = platform.uname() # 包含上面所有的信息汇总,\n\nprint(\"系统类型: %s\" % sys_system)\nprint(\"系统版本: %s\" % sys_version)\nprint(\"硬件架构: %s\" % sys_machine)\nprint(\"处理器信息: %s\" % sys_processor)\nprint(\"网络名称: %s\" % sys_node)\n\n\ndef get_host_ip():\n \"\"\"\n 查询本机ip地址\n :return: ip\n \"\"\"\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n return ip\n\n\nif __name__ == '__main__':\n print(\"IP地址: %s\" % get_host_ip())\n","sub_path":"devops/cli/sysinfo.py","file_name":"sysinfo.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9604265","text":"# 04_partition_list.py\n\n\nfrom linked_list import *\n\n\ndef partition_list(linked, x):\n ''' Starts with second element as it doesn't meter if first is smaller or bigger than X, it will be the border element.\n Elements smaller than X will be moved to the head of the list.\n So all elements to the left included (or excluded if first element wasn't less than x) the border element are less than X.\n All elements to the right included (or excluded if first element was less than x) the border element are bigger or equal to X.\n '''\n current = linked.head.next\n previous = linked.head\n while current is not None:\n if current.data < x:\n previous.next = current.next\n current.next = linked.head\n linked.head = current\n current = previous.next\n else:\n previous = current\n current = current.next\n\n\nif __name__ == '__main__':\n linked = LinkedList()\n for n in Node(-1), Node(15), Node(-3), 8.2, Node(14), Node(-8):\n linked.append(n)\n print('Linked list contain %i node(s)' % linked.length())\n linked.output()\n print('--------')\n\n partition_list(linked, 0)\n print('Linked list contain %i node(s)' % linked.length())\n linked.output()\n","sub_path":"experiments/34_question_and_solutions/02_linked_lists/04_partition_list.py","file_name":"04_partition_list.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69255550","text":"# 上乘feedback,下接三个init函数\nimport json\nimport cv2\nimport numpy as np\nfrom time import sleep\nfrom picamera import PiCamera\n\nfrom init_pkg import init_pattern\nfrom init_pkg import init_cata\nfrom init_pkg import init_color\n\nif __name__ == '__main__':\n import fb_subcontroller\n\ndef shot_and_get(json):\n \"\"\"\n :lib: feedback\n :func: shot_and_get\n :param: ctr: the time this func has been called\n :return: none\n \"\"\"\n ctr = json['param']\n # 设置图片存储路径\n path_suffix = '.jpg'\n path_prefix = '/path/my_img'\n img_path = path_prefix + str(ctr) + path_suffix\n # 拍摄图片\n camera = PiCamera()\n camera.start_preview()\n sleep(2)\n camera.capture(img_path)\n camera.stop_preview()\n # 存储图片和标签\n pattern_tag = init_pattern.pattern(img_path)\n cata_tag = init_cata.cata(img_path)\n color_tag = init_color.color(img_path)\n request = [{\n 'lib': 'DB',\n 'func': 'InputClothes',\n 'param': {'CTYP':cata_tag,\\\n 'CCOLOR':color_tag,\\\n 'CTEX':pattern_tag,\\\n 'CIMG':img_path}\n }]\n json_request = json.dumps(request)\n retval = fb_subcontroller.queryOutHandler(json_request)\n return retval","sub_path":"feedback/clocam.py","file_name":"clocam.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"344375974","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter(name='margenizar')\ndef margenizar(hora_inicio, hora_anterior):\n '''devuelve el valor del margen correspondiente para la grilla de programas'''\n\n inicio = hora_inicio.hour*60 + hora_inicio.minute\n fin_previo = hora_anterior.hour*60 + hora_anterior.minute\n if inicio <= fin_previo:\n return 0\n\n diferencial = (inicio - fin_previo)*1.5 #- 30)/5 #diferencial de alto extra para programas de mas de media hora\n return int(diferencial)\n\n\n@register.filter(name='calendarizar')\ndef calendarizar(programa):\n min_final = (programa.hora_fin.hour*60 + programa.hora_fin.minute)\n min_inicio = (programa.hora_inicio.hour*60 + programa.hora_inicio.minute)\n diferencial_minutos = min_final - min_inicio #(min_final - min_inicio - 30)/5\n return int(diferencial_minutos*1.5) # cada minuto corresponde a un pixel\n\n\n@register.filter(name='formatear')\ndef formatear(programa):\n min_final = (programa.hora_fin.hour*60 + programa.hora_fin.minute)\n min_inicio = (programa.hora_inicio.hour*60 + programa.hora_inicio.minute)\n diferencial_minutos = min_final - min_inicio #(min_final - min_inicio - 30)/5\n if diferencial_minutos == 30:\n return 0\n return diferencial_minutos/5 # cada minuto corresponde a un pixel\n\n\n@register.filter(name='prev')\ndef prev(list, index):\n previo = list[int(index)-1]\n return previo.hora_fin\n\n\n","sub_path":"programas/templatetags/programas_extras.py","file_name":"programas_extras.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"339995732","text":"import nltk\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\nimport re\n#from keras.utils import to_categorical\nimport os\nfrom numpy import array\nfrom pickle import dump, load\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom tensorflow.keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Embedding\nfrom keras.callbacks import EarlyStopping\nfrom keras.models import load_model\n\n\nfrom sklearn.model_selection import train_test_split\nimport functools\n\nfrom src.helpers import *\n\n# path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\npath = 'C:/Projects/NBA/'\n\ndatapath= \"C:/Users/703301318/Enquero/Adidas CDNA - Documents/02. Next Best Engagement/05. A-B Testing Codes/EXAP_DATA_INPUT/\"\n\n\ndf = pd.read_csv(datapath+'Member_Eng_Data.csv')\n\n# Only with engagements performed, not emails sent\ndf = df.loc[(df['EMAIL_ENGAGED_WITHIN_48H'] ==1) | \n (df['ENGAGED_WITHOUT_EMAIL'] == 1) , :]\n\n# Sort by recipient\ndf.sort_values(['IRECIPIENTID', 'TSENGMTDT'], inplace=True)\n\n\n# # Group by irecipientid\n# df_grp = df.groupby(['IRECIPIENTID'])['ENGMT_TYPE'].count()\n\n# # Get engagement number\ndf['Engmt_Number'] = df.groupby(['IRECIPIENTID']).cumcount()+1\n# df['Engmt_Number'] = 'ENGMT_' + df['Engmt_Number'].astype('str').str.zfill(2)\n\n# # Pivot\n# df_pivot = df.pivot_table(values='ENGMT_TYPE', index='IRECIPIENTID'\n# , columns='Engmt_Number', aggfunc='sum')\n\ndf_pivot1 = df.pivot_table(values='ENGMT_TYPE', index='IRECIPIENTID'\n , aggfunc=lambda x: '|'.join(x))\n\n# Filter for rows with more than 1 engagement\ndf_pivot2 = df_pivot1[df_pivot1['ENGMT_TYPE'].str.contains('\\|')]\ndf_pivot_1eng = df_pivot1[~df_pivot1['ENGMT_TYPE'].str.contains('\\|')]\n\n# df_pivot.columns\n\n\n\n# df = df.iloc[0:100000,:]\n# config_path = path+'/Config/'\n# input_path = path+'/Input/'\n\n# try:\n# config_file = open(config_path + \"/config.txt\")\n# exec(config_file.read(), globals())\n# print('[SUCCESS] : Reading Config')\n# except:\n# print('[ERROR]: Reading Config')\n\n# # from doc3 import training_doc3\n# # load ascii text and covert to lowercase\n# file = input_path+filename\n# raw_text = open(file, 'r', encoding='latin-1').read()\n# lines = raw_text.split('\\n')\n\n# # Clean text\n# df_lines = pd.DataFrame(lines, columns={'text'})\n# df_lines['text'] = df_lines['text'].astype(str)\n# df_lines = clean_column(df_lines, 'text')\n# df_lines = df_lines[~df_lines['text'].isnull()]\n\nlines = df_pivot2['ENGMT_TYPE'].values\n\n# integer encode sequences of words\ntokenizer = Tokenizer(split='|', filters='!\"#$%&()*+./:;<=>?@[\\\\]^_`{}~\\t\\n'\n , char_level=False) # remove special characters in the filter that form words\ntokenizer.fit_on_texts(lines)\nsequences = tokenizer.texts_to_sequences(lines)\n\n# # create sequences\ntrain_len = 21\n# text_sequences = []\n# for l in sequences:\n# for i in range(train_len, len(l)+1):\n# seq = l[i-train_len:i]\n# print(seq)\n# text_sequences.append(seq)\n\n# Pad sequences\n# text_sequences = pad_sequences(text_sequences, maxlen=train_len, truncating='pre')\ntext_sequences = pad_sequences(sequences, maxlen=train_len, truncating='pre')\n\n# vocabulary size\nvocab_size = len(tokenizer.word_index) + 1\n\n# separate into input and output\ntext_sequences = array(text_sequences)\nX, y = text_sequences[:, :-1], text_sequences[:, -1]\n\n#Split dataset into train and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\ny_train = to_categorical(y_train, num_classes=vocab_size)\nseq_length = X_train.shape[1]\n\n# define model\nbatch_size = 128\nepochs = 5\nmodel = Sequential()\nmodel.add(Embedding(vocab_size, 50, input_length=seq_length))\nmodel.add(LSTM(100, return_sequences=True))\nmodel.add(LSTM(100))\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(vocab_size, activation='softmax'))\nprint(model.summary())\n# compile model\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n# fit model\nmodel.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1\n , validation_split=0.2\n , callbacks=[EarlyStopping(monitor='val_loss'\n , patience=3, min_delta=0.0001)])\n\n# save the model to file\nmodel.save(path+'Model/NBE_Model.h5')\n# save the tokenizer\ndump(tokenizer, open(path+'Model/NBE_Model.pkl', 'wb'))\n\n# Test accuracy\ny_test_dummy = to_categorical(y_test, num_classes=vocab_size)\n\nscore, acc = model.evaluate(X_test, y_test_dummy, batch_size=batch_size)\nprint('Test loss score:', score)\nprint('Test accuracy:', acc)\n\n#####################\n\n# load the model\nmodel_name = 'Model/NBE_Model.h5'\nmodel = load_model(path+model_name)\n\n# load the tokenizer\ntokenizer_name = 'Model/NBE_Model.pkl'\ntokenizer = load(open(path+tokenizer_name, 'rb'))\n\n# Labels to classes dictionary\nlabels_dict = tokenizer.index_word\n\n\n# Test - get predictions\npred = model.predict(X_test)\n\n# Get top recommendations\nn=3\ndf_test = pd.DataFrame(y_test, columns=['Actual'])\ndf_test['recommendations'] = list(map(functools.partial(get_top_n_recommendations, n=n, tokenizer=tokenizer), pred))\ndf_test[['Reco1', 'Reco2', 'Reco3']] = pd.DataFrame(df_test['recommendations'].tolist(), index=df_test.index)\ndf_test['Actual_engagement'] = df_test[\"Actual\"]\ndf_test[\"Actual_engagement\"].replace(labels_dict, inplace=True)\n\n# Get matches\ndf_test['Match'] = list(map(get_matches, df_test[\"Actual_engagement\"],\n df_test['Reco1'], df_test['Reco2'],\n df_test['Reco3']))\n\nX_test_df = pd.DataFrame(X_test)\ndf_test['engagements_count'] = 20-(X_test_df == 0).sum(axis=1)\n\ndf_test.reset_index(inplace=True)\n\n\n# Get summary\ndf_summary = pd.DataFrame(df_test.groupby([\"Actual_engagement\", 'Match'])['Match'].count())\ndf_summary['%Match'] = df_summary['Match'] / df_summary.groupby('Actual_engagement')['Match'].transform('sum')\ndf_summary.to_csv(path+'Test_Summary.csv')\n\n# Get summary for 1 engagement\ndf_test1 = df_test[df_test['engagements_count'] == 1]\ndf_summary = pd.DataFrame(df_test1.groupby([\"Actual_engagement\", 'Match'])['Match'].count())\ndf_summary['%Match'] = df_summary['Match'] / df_summary.groupby('Actual_engagement')['Match'].transform('sum')\ndf_summary.to_csv(path+'Test_Summary1.csv')\n\n\n\n\n# Predict next engagement for users with 1 engagement\nlines_test = df_pivot_1eng['ENGMT_TYPE'].values\nencoded = tokenizer.texts_to_sequences(lines_test)\n# truncate sequences to a fixed length\nencoded_pad = pad_sequences(encoded, maxlen=seq_length, truncating='pre')\n\npred = model.predict(encoded_pad)\n\n# Get top recommendations\nn=3\ndf_pivot_1eng['recommendations'] = list(map(functools.partial(get_top_n_recommendations, n=n, tokenizer=tokenizer), pred))\ndf_pivot_1eng[['Reco1', 'Reco2', 'Reco3']] = pd.DataFrame(df_pivot_1eng['recommendations'].tolist(), index=df_pivot_1eng.index)\ndf_pivot_1eng.to_csv('1Engagement_Recommendations.csv')\n\n\n# Check\ntest_eng = [0,0,2,1]\nget_top_n_recommendations(model.predict(pad_sequences([test_eng], maxlen=seq_length, truncating='pre'))[0], n, tokenizer)\n\n","sub_path":"NBA/src/Next_word_prediction_training.py","file_name":"Next_word_prediction_training.py","file_ext":"py","file_size_in_byte":7203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"109707902","text":"from datetime import date, timedelta\n\nimport requests\nfrom flask import current_app\nfrom requests.exceptions import RequestException\n\nJOKES_PER_REQ = 10\n\n\ndef build_url(categories_param):\n urls = []\n categories = []\n for i in range(10):\n categories.append(categories_param[i % len(categories_param)])\n\n for category in categories[:10]:\n urls.append(f\"{current_app.config['CHUNK_API']}/random?category={category}\")\n return urls\n\n\ndef get_categories():\n response = requests.get(f\"{current_app.config['CHUNK_API']}/categories\")\n if response.status_code != 200:\n current_app.logger.exception(\n \"chunk api returned status code %s\", response.status_code\n )\n raise RequestException()\n return response.json()\n\n\ndef get_jokes(categories_param):\n if len(categories_param) > 0:\n urls = build_url(categories_param)\n response = list(requests.get(url) for url in urls)\n else:\n response = list(\n requests.get(f\"{current_app.config['CHUNK_API']}/random\") for x in range(10)\n )\n\n if len(response) > 0 and response[0].status_code != 200:\n current_app.logger.exception(\n \"chunk api returned status code %s\", response[0].status_code\n )\n raise RequestException()\n\n return list(map(lambda x: x.json(), response))\n","sub_path":"backendApp/app/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181542505","text":"#\n# encoding.py\n#\n# Copyright © 2013-2014 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nSupport for CFF Encodings.\n\"\"\"\n\n# System imports\nimport logging\n\n# Other imports\nfrom fontio3.CFF import encodings_f0, encodings_f1, encodings_predefined\nfrom fontio3.CFF.cffutils import stdStrings, nStdStrings, dStdStrings\nfrom fontio3.fontdata import mapmeta\n\n# -----------------------------------------------------------------------------\n\n#\n# Private constants\n#\n\n_workClasses = {\n 0: encodings_f0.Format0,\n 1: encodings_f1.Format1}\n\n\n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nclass Encoding(dict, metaclass=mapmeta.FontDataMetaclass):\n \"\"\"\n Objects representing CFF Encodings, a mapping of codes to glyphIDs.\n \"\"\"\n \n mapSpec = dict(\n item_renumberdirectvalues = True,\n map_compactremovesfalses = True)\n \n attrSpec = dict(\n originalFormat = dict(\n attr_ignoreforcomparisons = True,\n attr_initfunc = (lambda: None),\n attr_showonlyiftrue = True),\n predefinedFormat = dict(\n attr_ignoreforcomparisons = True,\n attr_initfunc = (lambda: None),\n attr_showonlyiftrue = True))\n #\n # Initialization and class methods\n #\n \n @classmethod\n def fromvalidatedwalker(cls, w, **kwArgs):\n \"\"\"\n Like fromwalker(), this method returns a new Encoding. However, it also\n does extensive validation via the logging module (the client should\n have done a logging.basicConfig call prior to calling this method,\n unless a logger is passed in via the 'logger' keyword argument).\n \"\"\"\n \n logger = kwArgs.pop('logger', None)\n \n if logger is None:\n logger = logging.getLogger().getChild('charset')\n else:\n logger = logger.getChild('charset')\n\n byteLength = w.length()\n logger.debug(('V0001', (byteLength,), \"Walker has %d remaining bytes.\"))\n \n if byteLength < 1:\n logger.error(('V0004', (), \"Insufficient bytes.\"))\n return None\n \n format = w.unpack(\"B\", advance=False)\n \n if format not in _workClasses:\n logger.error(('V0002', (format,), \"Invalid format (0x%04X).\"))\n return None\n\n workObj = _workClasses[format].fromvalidatedwalker(\n w,\n logger = logger,\n **kwArgs)\n \n if workObj is None: return None\n\n return cls(workObj, originalFormat=format)\n \n\n @classmethod\n def fromvalidatednumber(cls, n, **kwArgs):\n \"\"\"\n Like fromnumber, fromvalidatednumber returns an Encoding based\n on a number (one of the 2 Predefined encodings: Standard\n Encoding (0) or Expert Encoding (1)). It also performs\n validation using a logger.\n \"\"\"\n logger = kwArgs.pop('logger', None)\n \n if logger is None:\n logger = logging.getLogger().getChild('encoding')\n else:\n logger = logger.getChild('encoding')\n\n d = encodings_predefined.Predefined.fromvalidatednumber(\n n,\n logger=logger,\n **kwArgs)\n return cls(d, predefinedFormat=n)\n\n\n @classmethod\n def fromwalker(cls, w, **kwArgs):\n \"\"\"\n Initialize Encoding data from the stored format from the\n specified walker.\n \"\"\"\n \n format = w.unpack(\"B\", advance=False)\n workObj = _workClasses[format].fromwalker(w, **kwArgs)\n return cls(workObj, originalFormat=format)\n\n\n @classmethod\n def fromnumber(cls, n, **kwArgs):\n \"\"\"\n Returns an Encoding based on a number (one of the 2 Predefined\n encodings: Standard Encoding (0) or Expert Encoding (1)).\n \"\"\"\n\n d = encodings_predefined.Predefined.fromnumber(n, **kwArgs)\n return cls(d, predefinedFormat=n)\n\n \n #\n # Public methods\n #\n \n def buildBinary(self, w, **kwArgs):\n \"\"\"\n Call buildBinary using the appropriate originalFormat method.\n \n >>> print(utilities.hexdumpString(_testingValues[1].binaryString()), end='')\n 0 |0003 0203 04 |..... |\n \"\"\"\n \n if self.originalFormat is not None:\n c=_workClasses[self.originalFormat]\n c(self).buildBinary(w, **kwArgs)\n\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3 import utilities\n \n _testingValues = (\n Encoding(originalFormat=0),\n Encoding({1:2, 2:3, 3:4}, originalFormat=0))\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n\n","sub_path":"fontio3/fontio3/CFF/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652447351","text":"''' ONNX backend '''\n\nimport collections\nimport enum\nimport json\n\nclass ModelFactory:\n ''' ONNX backend model factory '''\n def serialize(self, model):\n ''' Serialize ONNX model to JSON message '''\n # import onnx.shape_inference\n # model = onnx.shape_inference.infer_shapes(model)\n json_model = {}\n json_model['signature'] = 'netron:onnx'\n json_model['format'] = 'ONNX' + (' v' + str(model.ir_version) if model.ir_version else '')\n if model.producer_name and len(model.producer_name) > 0:\n producer_version = ' v' + model.producer_version if model.producer_version else ''\n json_model['producer'] = model.producer_name + producer_version\n if model.model_version and model.model_version != 0:\n json_model['version'] = str(model.model_version)\n if model.doc_string and len(model.doc_string):\n json_model['description'] = str(model.doc_string)\n json_metadata = []\n metadata_props = [ [ entry.key, entry.value ] for entry in model.metadata_props ]\n metadata = collections.OrderedDict(metadata_props)\n value = metadata.get('converted_from')\n if value:\n json_metadata.append({ 'name': 'source', 'value': value })\n value = metadata.get('author')\n if value:\n json_metadata.append({ 'name': 'author', 'value': value })\n value = metadata.get('company')\n if value:\n json_metadata.append({ 'name': 'company', 'value': value })\n value = metadata.get('license')\n license_url = metadata.get('license_url')\n if license_url:\n value = '' + (value if value else license_url) + ''\n if value:\n json_metadata.append({ 'name': 'license', 'value': value })\n if 'author' in metadata:\n metadata.pop('author')\n if 'company' in metadata:\n metadata.pop('company')\n if 'converted_from' in metadata:\n metadata.pop('converted_from')\n if 'license' in metadata:\n metadata.pop('license')\n if 'license_url' in metadata:\n metadata.pop('license_url')\n for name, value in metadata.items():\n json_metadata.append({ 'name': name, 'value': value })\n if len(json_metadata) > 0:\n json_model['metadata'] = json_metadata\n json_model['graphs'] = []\n graph = model.graph\n json_graph = {\n 'nodes': [],\n 'inputs': [],\n 'outputs': [],\n 'arguments': []\n }\n json_model['graphs'].append(json_graph)\n arguments = {}\n def tensor(tensor): # pylint: disable=unused-argument\n return {}\n def argument(name, tensor_type=None, initializer=None):\n if not name in arguments:\n json_argument = {}\n json_argument['name'] = name\n arguments[name] = len(json_graph['arguments'])\n json_graph['arguments'].append(json_argument)\n index = arguments[name]\n if tensor_type or initializer:\n json_argument = json_graph['arguments'][index]\n if initializer:\n json_argument['initializer'] = tensor(initializer)\n return index\n\n for value_info in graph.value_info:\n argument(value_info.name)\n for initializer in graph.initializer:\n argument(initializer.name, None, initializer)\n for node in graph.node:\n op_type = node.op_type\n json_node = {}\n json_node_type = {}\n json_node_type['name'] = op_type\n if self.category(op_type):\n json_node_type['category'] = self.category(op_type)\n json_node['type'] = json_node_type\n if node.name:\n json_node['name'] = node.name\n json_node['inputs'] = []\n for value in node.input:\n json_node['inputs'].append({\n 'name': 'X',\n 'arguments': [ argument(value) ]\n })\n json_node['outputs'] = []\n for value in node.output:\n json_node['outputs'].append({\n 'name': 'X',\n 'arguments': [ argument(value) ]\n })\n json_node['attributes'] = []\n for _ in node.attribute:\n if _.type == _AttributeType.UNDEFINED:\n attribute_type = None\n value = None\n elif _.type == _AttributeType.FLOAT:\n attribute_type = 'float32'\n value = _.f\n elif _.type == _AttributeType.INT:\n attribute_type = 'int64'\n value = _.i\n elif _.type == _AttributeType.STRING:\n attribute_type = 'string'\n value = _.s.decode('latin1' if op_type == 'Int8GivenTensorFill' else 'utf-8')\n elif _.type == _AttributeType.TENSOR:\n attribute_type = 'tensor'\n value = tensor(_.t)\n elif _.type == _AttributeType.GRAPH:\n attribute_type = 'tensor'\n raise Exception('Unsupported graph attribute type')\n elif _.type == _AttributeType.FLOATS:\n attribute_type = 'float32[]'\n value = list(_.floats)\n elif _.type == _AttributeType.INTS:\n attribute_type = 'int64[]'\n value = list(_.ints)\n elif _.type == _AttributeType.STRINGS:\n attribute_type = 'string[]'\n value = [ item.decode('utf-8') for item in _.strings ]\n elif _.type == _AttributeType.TENSORS:\n attribute_type = 'tensor[]'\n raise Exception('Unsupported tensors attribute type')\n elif _.type == _AttributeType.GRAPHS:\n attribute_type = 'graph[]'\n raise Exception('Unsupported graphs attribute type')\n elif _.type == _AttributeType.SPARSE_TENSOR:\n attribute_type = 'tensor'\n value = tensor(_.sparse_tensor)\n else:\n raise Exception(\"Unsupported attribute type '\" + str(_.type) + \"'.\")\n json_attribute = {}\n json_attribute['name'] = _.name\n if attribute_type:\n json_attribute['type'] = attribute_type\n json_attribute['value'] = value\n json_node['attributes'].append(json_attribute)\n json_graph['nodes'].append(json_node)\n text = json.dumps(json_model, ensure_ascii=False)\n return text.encode('utf-8')\n\n categories = {\n 'Constant': 'Constant',\n 'Conv': 'Layer',\n 'ConvInteger': 'Layer',\n 'ConvTranspose': 'Layer',\n 'FC': 'Layer',\n 'RNN': 'Layer',\n 'LSTM': 'Layer',\n 'GRU': 'Layer',\n 'Gemm': 'Layer',\n 'FusedConv': 'Layer',\n 'Dropout': 'Dropout',\n 'Elu': 'Activation',\n 'HardSigmoid': 'Activation',\n 'LeakyRelu': 'Activation',\n 'PRelu': 'Activation',\n 'ThresholdedRelu': 'Activation',\n 'Relu': 'Activation',\n 'Selu': 'Activation',\n 'Sigmoid': 'Activation',\n 'Tanh': 'Activation',\n 'LogSoftmax': 'Activation',\n 'Softmax': 'Activation',\n 'Softplus': 'Activation',\n 'Softsign': 'Activation',\n 'Clip': 'Activation',\n 'BatchNormalization': 'Normalization',\n 'InstanceNormalization': 'Normalization',\n 'LpNormalization': 'Normalization',\n 'LRN': 'Normalization',\n 'Flatten': 'Shape',\n 'Reshape': 'Shape',\n 'Tile': 'Shape',\n 'Xor': 'Logic',\n 'Not': 'Logic',\n 'Or': 'Logic',\n 'Less': 'Logic',\n 'And': 'Logic',\n 'Greater': 'Logic',\n 'Equal': 'Logic',\n 'AveragePool': 'Pool',\n 'GlobalAveragePool': 'Pool',\n 'GlobalLpPool': 'Pool',\n 'GlobalMaxPool': 'Pool',\n 'LpPool': 'Pool',\n 'MaxPool': 'Pool',\n 'MaxRoiPool': 'Pool',\n 'Concat': 'Tensor',\n 'Slice': 'Tensor',\n 'Split': 'Tensor',\n 'Pad': 'Tensor',\n 'ImageScaler': 'Data',\n 'Crop': 'Data',\n 'Upsample': 'Data',\n 'Transpose': 'Transform',\n 'Gather': 'Transform',\n 'Unsqueeze': 'Transform',\n 'Squeeze': 'Transform',\n }\n\n def category(self, name):\n ''' Get category for type '''\n return self.categories[name] if name in self.categories else ''\n\nclass _AttributeType(enum.IntEnum):\n UNDEFINED = 0\n FLOAT = 1\n INT = 2\n STRING = 3\n TENSOR = 4\n GRAPH = 5\n FLOATS = 6\n INTS = 7\n STRINGS = 8\n TENSORS = 9\n GRAPHS = 10\n SPARSE_TENSOR = 11\n SPARSE_TENSORS = 12\n TYPE_PROTO = 13\n TYPE_PROTOS = 14\n","sub_path":"source/onnx.py","file_name":"onnx.py","file_ext":"py","file_size_in_byte":9071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"338679318","text":"# Copyright 2020 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n\n\"\"\"The is Aptitude package manager implementation\"\"\"\nimport json\nimport os\nimport re\nfrom core.src.package_managers.PackageManager import PackageManager\nfrom core.src.bootstrap.Constants import Constants\n\n\nclass AptitudePackageManager(PackageManager):\n \"\"\"Implementation of Debian/Ubuntu based package management operations\"\"\"\n\n # For more details, try `man apt-get` on any Debian/Ubuntu based box.\n def __init__(self, env_layer, execution_config, composite_logger, telemetry_writer, status_handler):\n super(AptitudePackageManager, self).__init__(env_layer, execution_config, composite_logger, telemetry_writer, status_handler)\n # Repo refresh\n self.repo_refresh = 'sudo apt-get -q update'\n\n # Support to get updates and their dependencies\n self.security_sources_list = '/tmp/az-update-security.list'\n self.prep_security_sources_list_cmd = 'sudo grep security /etc/apt/sources.list > ' + self.security_sources_list\n self.dist_upgrade_simulation_cmd_template = 'LANG=en_US.UTF8 sudo apt-get -s dist-upgrade ' # Dist-upgrade simulation template - needs to be replaced before use; sudo is used as sometimes the sources list needs sudo to be readable\n self.single_package_check_versions = 'apt-cache madison '\n self.single_package_find_installed_dpkg = 'sudo dpkg -s '\n self.single_package_find_installed_apt = 'sudo apt list --installed '\n self.single_package_upgrade_simulation_cmd = '''DEBIAN_FRONTEND=noninteractive apt-get -y --only-upgrade true -s install '''\n self.single_package_dependency_resolution_template = 'DEBIAN_FRONTEND=noninteractive LANG=en_US.UTF8 apt-get -y --only-upgrade true -s install '\n\n # Install update\n # --only-upgrade: upgrade only single package (only if it is installed)\n self.single_package_upgrade_cmd = '''sudo DEBIAN_FRONTEND=noninteractive apt-get -y --only-upgrade true install '''\n\n # Package manager exit code(s)\n self.apt_exitcode_ok = 0\n\n # auto OS updates\n self.update_package_list = 'APT::Periodic::Update-Package-Lists'\n self.unattended_upgrade = 'APT::Periodic::Unattended-Upgrade'\n self.os_patch_configuration_settings_file_path = '/etc/apt/apt.conf.d/20auto-upgrades'\n self.update_package_list_value = \"\"\n self.unattended_upgrade_value = \"\"\n\n # Miscellaneous\n os.environ['DEBIAN_FRONTEND'] = 'noninteractive' # Avoid a config prompt\n self.set_package_manager_setting(Constants.PKG_MGR_SETTING_IDENTITY, Constants.APT)\n self.STR_DPKG_WAS_INTERRUPTED = \"E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem.\"\n\n def refresh_repo(self):\n self.composite_logger.log(\"\\nRefreshing local repo...\")\n self.invoke_package_manager(self.repo_refresh)\n\n # region Get Available Updates\n def invoke_package_manager(self, command):\n \"\"\"Get missing updates using the command input\"\"\"\n self.composite_logger.log_debug('\\nInvoking package manager using: ' + command)\n code, out = self.env_layer.run_command_output(command, False, False)\n\n if code != self.apt_exitcode_ok and self.STR_DPKG_WAS_INTERRUPTED in out:\n self.composite_logger.log_error('[ERROR] YOU NEED TO TAKE ACTION TO PROCEED. The package manager on this machine is not in a healthy state, and '\n 'Patch Management cannot proceed successfully. Before the next Patch Operation, please run the following '\n 'command and perform any configuration steps necessary on the machine to return it to a healthy state: '\n 'sudo dpkg --configure -a')\n self.telemetry_writer.send_execution_error(command, code, out)\n error_msg = 'Package manager on machine is not healthy. To fix, please run: sudo dpkg --configure -a'\n self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)\n raise Exception(error_msg, \"[{0}]\".format(Constants.ERROR_ADDED_TO_STATUS))\n elif code != self.apt_exitcode_ok:\n self.composite_logger.log('[ERROR] Package manager was invoked using: ' + command)\n self.composite_logger.log_warning(\" - Return code from package manager: \" + str(code))\n self.composite_logger.log_warning(\" - Output from package manager: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n self.telemetry_writer.send_execution_error(command, code, out)\n error_msg = 'Unexpected return code (' + str(code) + ') from package manager on command: ' + command\n self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)\n raise Exception(error_msg, \"[{0}]\".format(Constants.ERROR_ADDED_TO_STATUS))\n # more known return codes should be added as appropriate\n else: # verbose diagnostic log\n self.composite_logger.log_debug(\"\\n\\n==[SUCCESS]===============================================================\")\n self.composite_logger.log_debug(\" - Return code from package manager: \" + str(code))\n self.composite_logger.log_debug(\" - Output from package manager: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n self.composite_logger.log_debug(\"==========================================================================\\n\\n\")\n return out\n\n def invoke_apt_cache(self, command):\n \"\"\"Invoke apt-cache using the command input\"\"\"\n self.composite_logger.log_debug('Invoking apt-cache using: ' + command)\n code, out = self.env_layer.run_command_output(command, False, False)\n if code != 0:\n self.composite_logger.log('[ERROR] apt-cache was invoked using: ' + command)\n self.composite_logger.log_warning(\" - Return code from apt-cache: \" + str(code))\n self.composite_logger.log_warning(\" - Output from apt-cache: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n error_msg = 'Unexpected return code (' + str(code) + ') from apt-cache on command: ' + command\n self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)\n raise Exception(error_msg, \"[{0}]\".format(Constants.ERROR_ADDED_TO_STATUS))\n # more known return codes should be added as appropriate\n else: # verbose diagnostic log\n self.composite_logger.log_debug(\"\\n\\n==[SUCCESS]===============================================================\")\n self.composite_logger.log_debug(\" - Return code from apt-cache: \" + str(code))\n self.composite_logger.log_debug(\" - Output from apt-cache: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n self.composite_logger.log_debug(\"==========================================================================\\n\\n\")\n return out\n\n # region Classification-based (incl. All) update check\n def get_all_updates(self, cached=False):\n \"\"\"Get all missing updates\"\"\"\n self.composite_logger.log_debug(\"\\nDiscovering all packages...\")\n if cached and not len(self.all_updates_cached) == 0:\n self.composite_logger.log_debug(\" - Returning cached package data.\")\n return self.all_updates_cached, self.all_update_versions_cached # allows for high performance reuse in areas of the code explicitly aware of the cache\n\n cmd = self.dist_upgrade_simulation_cmd_template.replace('', '')\n out = self.invoke_package_manager(cmd)\n self.all_updates_cached, self.all_update_versions_cached = self.extract_packages_and_versions(out)\n\n self.composite_logger.log_debug(\"Discovered \" + str(len(self.all_updates_cached)) + \" package entries.\")\n return self.all_updates_cached, self.all_update_versions_cached\n\n def get_security_updates(self):\n \"\"\"Get missing security updates\"\"\"\n self.composite_logger.log(\"\\nDiscovering 'security' packages...\")\n code, out = self.env_layer.run_command_output(self.prep_security_sources_list_cmd, False, False)\n if code != 0:\n self.composite_logger.log_warning(\" - SLP:: Return code: \" + str(code) + \", Output: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n\n cmd = self.dist_upgrade_simulation_cmd_template.replace('', '-oDir::Etc::Sourcelist=' + self.security_sources_list)\n out = self.invoke_package_manager(cmd)\n security_packages, security_package_versions = self.extract_packages_and_versions(out)\n\n self.composite_logger.log(\"Discovered \" + str(len(security_packages)) + \" 'security' package entries.\")\n return security_packages, security_package_versions\n\n def get_other_updates(self):\n \"\"\"Get missing other updates\"\"\"\n self.composite_logger.log(\"\\nDiscovering 'other' packages...\")\n other_packages = []\n other_package_versions = []\n\n all_packages, all_package_versions = self.get_all_updates(True)\n security_packages, security_package_versions = self.get_security_updates()\n\n for index, package in enumerate(all_packages):\n if package not in security_packages:\n other_packages.append(package)\n other_package_versions.append(all_package_versions[index])\n\n self.composite_logger.log(\"Discovered \" + str(len(other_packages)) + \" 'other' package entries.\")\n return other_packages, other_package_versions\n # endregion\n\n # region Output Parser(s)\n def extract_packages_and_versions(self, output):\n # sample output format\n # Inst coreutils [8.25-2ubuntu2] (8.25-2ubuntu3~16.10 Ubuntu:16.10/yakkety-updates [amd64])\n # Inst python3-update-manager [1:16.10.7] (1:16.10.8 Ubuntu:16.10/yakkety-updates [all]) [update-manager-core:amd64 ]\n # Inst update-manager-core [1:16.10.7] (1:16.10.8 Ubuntu:16.10/yakkety-updates [all])\n\n self.composite_logger.log_debug(\"\\nExtracting package and version data...\")\n packages = []\n versions = []\n\n search_text = r'Inst[ ](.*?)[ ].*?[(](.*?)[ ](.*?)[ ]\\[(.*?)\\]'\n search = re.compile(search_text, re.M | re.S)\n package_list = search.findall(str(output))\n\n for package in package_list:\n packages.append(package[0])\n versions.append(package[1])\n\n self.composite_logger.log_debug(\"Extracted package and version data for \" + str(len(packages)) + \" packages.\")\n return packages, versions\n # endregion\n # endregion\n\n # region Install Update\n def get_composite_package_identifier(self, package, package_version):\n return package + '=' + package_version\n\n def install_updates_fail_safe(self, excluded_packages):\n return\n # endregion\n\n # region Package Information\n def get_all_available_versions_of_package(self, package_name):\n \"\"\" Returns a list of all the available versions of a package \"\"\"\n # Sample output format\n # bash | 4.3-14ubuntu1.3 | http://us.archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages\n # bash | 4.3-14ubuntu1.2 | http://security.ubuntu.com/ubuntu xenial-security/main amd64 Packages\n # bash | 4.3-14ubuntu1 | http://us.archive.ubuntu.com/ubuntu xenial/main amd64 Packages\n\n package_versions = []\n\n cmd = self.single_package_check_versions.replace('', package_name)\n output = self.invoke_apt_cache(cmd)\n lines = output.strip().split('\\n')\n\n for line in lines:\n package_details = line.split(' |')\n if len(package_details) == 3:\n self.composite_logger.log_debug(\" - Applicable line: \" + str(line))\n package_versions.append(package_details[1].strip())\n else:\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n\n return package_versions\n\n def is_package_version_installed(self, package_name, package_version):\n \"\"\" Returns true if the specific package version is installed \"\"\"\n\n self.composite_logger.log_debug(\"\\nCHECKING PACKAGE INSTALL STATUS FOR: \" + str(package_name) + \" (\" + str(package_version) + \")\")\n\n # DEFAULT METHOD\n self.composite_logger.log_debug(\" - [1/2] Verifying install status with Dpkg.\")\n cmd = self.single_package_find_installed_dpkg.replace('', package_name)\n code, output = self.env_layer.run_command_output(cmd, False, False)\n lines = output.strip().split('\\n')\n\n if code == 1: # usually not found\n # Sample output format ------------------------------------------\n # dpkg-query: package 'mysql-client' is not installed and no information is available\n # Use dpkg --info (= dpkg-deb --info) to examine archive files,\n # and dpkg --contents (= dpkg-deb --contents) to list their contents.\n # ------------------------------------------ -------------------\n self.composite_logger.log_debug(\" - Return code: 1. The package is likely NOT present on the system.\")\n for line in lines:\n if 'not installed' in line and package_name in line:\n self.composite_logger.log_debug(\" - Discovered to be not installed: \" + str(line))\n return False\n else:\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n\n self.telemetry_writer.send_debug_info(\"[Installed check] Return code: 1. Unable to verify package not present on the system: \" + str(output))\n elif code == 0: # likely found\n # Sample output format ------------------------------------------\n # Package: mysql-server\n # Status: install ok installed\n # Priority: optional\n # Section: database\n # Installed-Size: 107\n # Maintainer: Ubuntu Developers \n # Architecture: all\n # Source: mysql-5.7\n # Version: 5.7.25-0ubuntu0.16.04.2\n # Depends: mysql-server-5.7\n # ------------------------------------------ --------------------\n self.composite_logger.log_debug(\" - Return code: 0. The package is likely present on the system.\")\n composite_found_flag = 0\n for line in lines:\n if 'Package: ' in line:\n if package_name in line:\n composite_found_flag = composite_found_flag | 1\n else: # should never hit for the way this is invoked, hence telemetry\n self.composite_logger.log_debug(\" - Did not match name: \" + str(package_name) + \" (\" + str(line) + \")\")\n self.telemetry_writer.send_debug_info(\"[Installed check] Name did not match: \" + package_name + \" (line=\" + str(line) + \")(out=\" + str(output) + \")\")\n continue\n if 'Version: ' in line:\n if package_version in line:\n composite_found_flag = composite_found_flag | 2\n else: # should never hit for the way this is invoked, hence telemetry\n self.composite_logger.log_debug(\" - Did not match version: \" + str(package_version) + \" (\" + str(line) + \")\")\n self.telemetry_writer.send_debug_info(\"[Installed check] Version did not match: \" + str(package_version) + \" (line=\" + str(line) + \")(out=\" + str(output) + \")\")\n continue\n if 'Status: ' in line:\n if 'install ok installed' in line:\n composite_found_flag = composite_found_flag | 4\n else: # should never hit for the way this is invoked, hence telemetry\n self.composite_logger.log_debug(\" - Did not match status: \" + str(package_name) + \" (\" + str(line) + \")\")\n self.telemetry_writer.send_debug_info(\"[Installed check] Status did not match: 'install ok installed' (line=\" + str(line) + \")(out=\" + str(output) + \")\")\n continue\n if composite_found_flag & 7 == 7: # whenever this becomes true, the exact package version is installed\n self.composite_logger.log_debug(\" - Package, Version and Status matched. Package is detected as 'Installed'.\")\n return True\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n self.composite_logger.log_debug(\" - Install status check did NOT find the package installed: (composite_found_flag=\" + str(composite_found_flag) + \")\")\n self.telemetry_writer.send_debug_info(\"Install status check did NOT find the package installed: (composite_found_flag=\" + str(composite_found_flag) + \")(output=\" + output + \")\")\n else: # This is not expected to execute. If it does, the details will show up in telemetry. Improve this code with that information.\n self.composite_logger.log_debug(\" - Unexpected return code from dpkg: \" + str(code) + \". Output: \" + str(output))\n self.telemetry_writer.send_debug_info(\"Unexpected return code from dpkg: Cmd=\" + str(cmd) + \". Code=\" + str(code) + \". Output=\" + str(output))\n\n # SECONDARY METHOD - Fallback\n # Sample output format\n # Listing... Done\n # apt/xenial-updates,now 1.2.29 amd64 [installed]\n self.composite_logger.log_debug(\" - [2/2] Verifying install status with Apt.\")\n cmd = self.single_package_find_installed_apt.replace('', package_name)\n output = self.invoke_package_manager(cmd)\n lines = output.strip().split('\\n')\n\n for line in lines:\n package_details = line.split(' ')\n if len(package_details) < 4:\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n else:\n self.composite_logger.log_debug(\" - Applicable line: \" + str(line))\n discovered_package_name = package_details[0].split('/')[0] # index out of bounds check is deliberately not being done\n if discovered_package_name != package_name:\n self.composite_logger.log_debug(\" - Did not match name: \" + discovered_package_name + \" (\" + package_name + \")\")\n continue\n if package_details[1] != package_version:\n self.composite_logger.log_debug(\" - Did not match version: \" + package_details[1] + \" (\" + str(package_details[1]) + \")\")\n continue\n if 'installed' not in package_details[3]:\n self.composite_logger.log_debug(\" - Did not find status: \" + str(package_details[3] + \" (\" + str(package_details[3]) + \")\"))\n continue\n self.composite_logger.log_debug(\" - Package version specified was determined to be installed.\")\n self.telemetry_writer.send_debug_info(\"[Installed check] Fallback code disagreed with dpkg.\")\n return True\n\n self.composite_logger.log_debug(\" - Package version specified was determined to NOT be installed.\")\n return False\n\n def get_dependent_list(self, package_name):\n \"\"\"Returns dependent List of the package\"\"\"\n cmd = self.single_package_dependency_resolution_template.replace('', package_name)\n\n self.composite_logger.log_debug(\"\\nRESOLVING DEPENDENCIES USING COMMAND: \" + str(cmd))\n output = self.invoke_package_manager(cmd)\n\n packages, package_versions = self.extract_packages_and_versions(output)\n if package_name in packages:\n packages.remove(package_name)\n\n self.composite_logger.log_debug(str(len(packages)) + \" dependent updates were found for package '\" + package_name + \"'.\")\n return packages\n\n def get_product_name(self, package_name):\n \"\"\"Retrieve product name \"\"\"\n return package_name\n\n def get_package_size(self, output):\n \"\"\"Retrieve package size from update output string\"\"\"\n # Sample line from output:\n # Need to get 0 B/433 kB of archives\n # or\n # Need to get 110 kB of archives.\n try:\n if \"is already the newest version\" in output:\n return Constants.UNKNOWN_PACKAGE_SIZE\n search_txt = r'Need to get[ ](.*?)[ ]B/(.*?)[ ]of'\n search = re.compile(search_txt, re.M | re.S)\n pkg_list = search.findall(str(output))\n if not pkg_list:\n search_txt = r'Need to get[ ](.*?)[ ]of'\n search = re.compile(search_txt, re.M | re.S)\n pkg_list = search.findall(str(output))\n if not pkg_list or pkg_list[0] == \"\":\n return Constants.UNKNOWN_PACKAGE_SIZE\n return pkg_list[0]\n elif pkg_list[0][1] == \"\":\n return Constants.UNKNOWN_PACKAGE_SIZE\n return pkg_list[0][1]\n except Exception as error:\n self.composite_logger.log_debug(\" - Could not get package size from output: \" + repr(error))\n return Constants.UNKNOWN_PACKAGE_SIZE\n # endregion\n\n # region auto OS updates\n def disable_auto_os_update(self):\n \"\"\" Disables auto OS updates on the machine only if they are enabled and logs the default settings the machine comes with \"\"\"\n try:\n self.composite_logger.log_debug(\"Disabling auto OS updates if they are enabled\")\n self.backup_image_default_patch_configuration_if_not_exists()\n self.update_os_patch_configuration_sub_setting(self.update_package_list, \"0\")\n self.update_os_patch_configuration_sub_setting(self.unattended_upgrade, \"0\")\n self.composite_logger.log(\"Successfully disabled auto OS updates\")\n except Exception as error:\n self.composite_logger.log_error(\"Could not disable auto OS updates. [Error={0}]\".format(repr(error)))\n\n def backup_image_default_patch_configuration_if_not_exists(self):\n \"\"\" Records the default system settings for auto OS updates within patch extension artifacts for future reference.\n We only log the default system settings a VM comes with, any subsequent updates will not be recorded\"\"\"\n try:\n if not self.image_default_patch_configuration_backup_exists():\n image_default_patch_configuration = self.env_layer.file_system.read_with_retry(self.os_patch_configuration_settings_file_path)\n settings = image_default_patch_configuration.strip().split('\\n')\n\n for setting in settings:\n if self.update_package_list in str(setting):\n self.update_package_list_value = re.search(self.update_package_list + ' *\"(.*?)\".', str(setting)).group(1)\n if self.unattended_upgrade in str(setting):\n self.unattended_upgrade_value = re.search(self.unattended_upgrade + ' *\"(.*?)\".', str(setting)).group(1)\n\n if self.update_package_list_value == \"\":\n self.composite_logger.log_debug(\"Machine did not have any value set for [Setting={0}]\".format(str(self.update_package_list)))\n\n if self.unattended_upgrade_value == \"\":\n self.composite_logger.log_debug(\"Machine did not have any value set for [Setting={0}]\".format(str(self.unattended_upgrade)))\n\n backup_image_default_patch_configuration_json = {\n self.update_package_list: self.update_package_list_value,\n self.unattended_upgrade: self.unattended_upgrade_value\n }\n\n self.composite_logger.log_debug(\"Logging default system configuration settings for auto OS updates. [Settings={0}] [Log file path={1}]\"\n .format(str(backup_image_default_patch_configuration_json), self.image_default_patch_configuration_backup_path))\n self.env_layer.file_system.write_with_retry(self.image_default_patch_configuration_backup_path, '{0}'.format(json.dumps(backup_image_default_patch_configuration_json)), mode='w+')\n except Exception as error:\n error_message = \"Exception during fetching and logging default auto update settings on the machine. [Exception={0}]\".format(repr(error))\n self.composite_logger.log_error(error_message)\n raise\n\n def is_image_default_patch_configuration_backup_valid(self, image_default_patch_configuration_backup):\n if self.update_package_list in image_default_patch_configuration_backup and self.unattended_upgrade in image_default_patch_configuration_backup:\n self.composite_logger.log_debug(\"Extension already has a valid backup of the default system configuration settings for auto OS updates.\")\n return True\n else:\n self.composite_logger.log_error(\"Extension does not have a valid backup of the default system configuration settings for auto OS updates.\")\n return False\n\n def update_os_patch_configuration_sub_setting(self, patch_configuration_sub_setting, value=\"0\"):\n \"\"\" Updates (or adds if it doesn't exist) the given patch_configuration_sub_setting with the given value in os_patch_configuration_settings_file \"\"\"\n try:\n # note: adding space between the patch_configuration_sub_setting and value since, we will have to do that if we have to add a patch_configuration_sub_setting that did not exist before\n self.composite_logger.log(\"Updating system configuration settings for auto OS updates. [Patch Configuration Sub Setting={0}] [Value={1}]\".format(str(patch_configuration_sub_setting), value))\n os_patch_configuration_settings = self.env_layer.file_system.read_with_retry(self.os_patch_configuration_settings_file_path)\n patch_configuration_sub_setting_to_update = patch_configuration_sub_setting + ' \"' + value + '\";'\n patch_configuration_sub_setting_found_in_file = False\n updated_patch_configuration_sub_setting = \"\"\n settings = os_patch_configuration_settings.strip().split('\\n')\n\n # update value of existing setting\n for i in range(len(settings)):\n if patch_configuration_sub_setting in settings[i]:\n settings[i] = patch_configuration_sub_setting_to_update\n patch_configuration_sub_setting_found_in_file = True\n updated_patch_configuration_sub_setting += settings[i] + \"\\n\"\n\n # add setting to configuration file, since it doesn't exist\n if not patch_configuration_sub_setting_found_in_file:\n updated_patch_configuration_sub_setting += patch_configuration_sub_setting_to_update + \"\\n\"\n\n #ToDo: This adds some whitespace at the beginning of the first line in the settings file which is auto adjusted in the file later, so shouldn't have any issues right now. strip()/lstrip() on the string, does not work, will have to test accross versions and identify the impact\n self.env_layer.file_system.write_with_retry(self.os_patch_configuration_settings_file_path, '{0}'.format(updated_patch_configuration_sub_setting.lstrip()), mode='w+')\n except Exception as error:\n self.composite_logger.log_error(\"Error occurred while updating system configuration settings for auto OS updates. [Patch Configuration={0}] [Error={1}]\".format(str(patch_configuration_sub_setting), repr(error)))\n raise\n # endregion\n\n def do_processes_require_restart(self):\n \"\"\"Defaulting this for Apt\"\"\"\n return False\n","sub_path":"src/core/src/package_managers/AptitudePackageManager.py","file_name":"AptitudePackageManager.py","file_ext":"py","file_size_in_byte":28560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136460398","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# clgplot is Copyright 2012-2017 Pontus Lurcock (pont at talvi dot net)\n# and released under the MIT license:\n\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\"\nA simple interactive application for plotting IRM acquisition data.\n\nclgplot reads two types of file: raw IRM acquisition data (as a two-column\nfile containing applied field and measured magnetization), and output files\nfrom the IrmUnmix program [1] which contain a representation of IRM\nacquisition data as a sum of cumulative log-Gaussian (CLG) curves. clgplot\nproduces a plot of the data and/or curves using pyplot. The plot can be viewed\non-screen and saved to a file.\n\nclgplot is copyright 2012-2017 by Pontus Lurcock, who may be contacted at\npont talvi net\n @ .\n\n[1] See http://www.geo.uu.nl/~forth/Software/irmunmix/ and\nhttp://dx.doi.org/10.1046/j.0956-540x.2001.01558.x\n\"\"\"\n\nimport tkinter\nimport os\nfrom matplotlib import pyplot\nimport re\nfrom math import sqrt, erf\nfrom numpy import pi, exp, log10, array, arange\nimport argparse\nfrom os.path import basename\nfrom tkinter.filedialog import askopenfilename\n\n\ndef gradient(xs, ys):\n \"\"\"Return a new array containing the gradients at all the x points\"\"\"\n result = []\n for i in range(0, len(xs)):\n grad1, grad2 = None, None\n if i < len(xs) - 1:\n grad1 = (ys[i + 1] - ys[i]) / (xs[i + 1] - xs[i])\n if i > 0:\n grad2 = (ys[i] - ys[i - 1]) / (xs[i] - xs[i - 1])\n if grad1 is None:\n grad1 = grad2\n if grad2 is None:\n grad2 = grad1\n result.append((grad1 + grad2) / 2)\n return array(result)\n\n\ndef x_for_half_max_y(xs, ys):\n \"\"\"Return the x value for which the corresponding y value is half\n of the maximum y value. If there is no exact corresponding x value,\n one is calculated by linear interpolation from the two\n surrounding values.\n\n :param xs: x values\n :param ys: y values corresponding to the x values\n :return:\n \"\"\"\n\n if len(xs) != len(ys):\n raise ValueError(\"xs and ys must be of equal length\")\n\n half_max_y = max(ys) / 2\n for i in range(len(xs)-1):\n if ys[i+1] >= half_max_y:\n x_dist = xs[i+1] - xs[i]\n y_dist = ys[i+1] - ys[i]\n y_offset = half_max_y - ys[i]\n if y_offset == 0:\n return xs[i]\n else:\n x_offset = y_offset / y_dist * x_dist\n return xs[i] + x_offset\n return None\n\n\nclass DataSeries:\n \"\"\"A lightly wrapped 2-column matrix with a method for reading\n it from a file.\"\"\"\n\n def __init__(self, data, name=None, filename=None):\n self.data = data\n self.filename = filename\n self.name = name\n if name is not None:\n self.name = name\n else:\n if filename is not None:\n self.name = os.path.basename(filename)\n else:\n self.name = None\n\n @staticmethod\n def read_file(filename, col1=0, col2=1, name=None):\n \"\"\"Reads a series from a two-column whitespace-delimited text file. If\n there are more than two columns, the extra ones are ignored. If there\n is a header line (or any other non-numeric line), it is ignored.\"\"\"\n\n rows = []\n # \"U\" for universal newlines\n with open(filename, \"U\") as fh:\n for line in fh.readlines():\n parts = line.split()\n try:\n position = float(parts[col1])\n if len(parts) > col2:\n value = float(parts[col2])\n else:\n print(\"WARNING: missing data at \" + str(position))\n value = 0\n rows.append([position, value])\n except ValueError:\n pass # ignore non-numeric lines\n data = array(rows).transpose()\n return DataSeries(data, name=name, filename=filename)\n\n\nclass Gaussian:\n def __init__(self, m_abs, m, bhalf, dp):\n self.m_abs = m_abs # absolute contribution (not used)\n self.m = m # relative contribution (size of peak)\n self.a = m / (dp * (2 * pi) ** 0.5) # corrected for dispersion\n self.bhalf = bhalf # mean log of field (position of peak)\n self.dp = dp # dispersion parameter (width of peak)\n\n def evaluate(self, x):\n (a, b, c) = (self.a, self.bhalf, self.dp)\n return a * exp(-(((x - b) ** 2) / (2 * (c ** 2))))\n\n def cdf(self, x):\n (a, b, c) = (self.a, self.bhalf, self.dp)\n return 0.5 * (1 + erf((x - b) / (sqrt(2 * c ** 2))))\n\n def to_csv_line(self):\n return \"%.2f,%.2f,%.2f,%.2f,%.2f\" % \\\n (self.m_abs, self.m, self.a, self.bhalf, self.dp)\n\n @staticmethod\n def csv_header():\n return \"M_abs,m_rel,a,Bhalf,DP\"\n\n\nclass IrmCurves:\n \"\"\"A collection of cumulative log-gaussian functions\n which can be evaluated to give a modelled IRM remanence\n for a specified applied field.\"\"\"\n\n def __init__(self, name, sirm, params):\n self.name = name\n self.sirm = sirm\n self.components = [Gaussian(*p) for p in params]\n\n def evaluate(self, x, normalize=False):\n result = sum([g.evaluate(x) for g in self.components])\n if not normalize:\n result = result * self.sirm\n return result\n\n @staticmethod\n def read_file(filename):\n re1 = re.compile(r\"^ True SIRM= +([0-9.E-]+)\")\n re2 = re.compile(r\"^ Abs Cont= +([0-9.E-]+)\")\n re3 = re.compile(r\"^ Rel Cont= +([0-9.E-]+) +Mean= +([0-9.E-]+) +\" +\n r\"DP= +([0-9.E-]+)\\s+$\")\n infile = open(filename)\n sirm = float(re1.search(infile.readline()).groups()[0])\n params = []\n infile.readline()\n while True:\n comp = infile.readline()\n if not comp.startswith(\" Component\"):\n break\n param = [float(re2.search(infile.readline()).groups()[0])]\n line3 = infile.readline()\n param += map(float, re3.search(line3).groups())\n params.append(param)\n infile.readline() # skip blank line\n return IrmCurves(basename(filename), sirm, params)\n\n def to_csv_line(self):\n result = self.name + \",\"\n result += \",\".join([c.to_csv_line() for c in self.components])\n return result\n\n def csv_header(self):\n return \"Sample,\" + \\\n \",\".join([Gaussian.csv_header()] * len(self.components))\n\n\ndef plot_clg_fit(series, curves, output_filename=None):\n sirm = 1\n if curves:\n sirm = curves.sirm\n\n if series:\n xs = list(map(log10, series.data[0][1:]))\n ys = series.data[1][1:]\n pyplot.plot(xs, gradient(xs, ys) / sirm, marker=\"o\",\n ls=\"\", color=\"black\", markerfacecolor=\"none\", markersize=6)\n\n if curves:\n xs = arange(0.1, 3, 0.02)\n ys = [curves.evaluate(x, True) for x in xs]\n pyplot.plot(xs, ys, linewidth=1.0, color=\"black\")\n for curve in curves.components:\n ys2 = [curve.evaluate(x) for x in xs]\n pyplot.plot(xs, ys2, linewidth=0.5, color=\"black\")\n\n pyplot.ylim(ymin=0)\n pyplot.xlabel(\"log10(Applied field (mT))\")\n pyplot.ylabel(\"Gradient of magnetization\")\n if output_filename:\n pyplot.savefig(output_filename)\n else:\n pyplot.ion()\n pyplot.show()\n\n\nclass App:\n def __init__(self, master, data=None, curves=None,\n plot_now=False):\n\n self.series = data\n self.curves = curves\n\n master.title(\"CLG Plot\")\n frame = tkinter.Frame(master)\n frame.grid(padx=20, pady=15)\n\n self.data_button = \\\n tkinter.Button(frame, text=\"Choose Data file\",\n command=self.choose_data_file)\n self.data_button.grid(row=0, pady=5)\n\n self.irmunmix_button = \\\n tkinter.Button(frame, text=\"Choose IrmUnmix file\",\n command=self.choose_curves_file)\n self.irmunmix_button.grid(row=1, pady=5)\n\n self.plot_button = \\\n tkinter.Button(frame, text=\"Plot data\", command=self.plot)\n self.plot_button.grid(row=2, pady=5)\n\n self.quit_button = tkinter.Button(frame, text=\"Quit\",\n command=frame.quit)\n self.quit_button.grid(row=3, pady=5)\n\n master.update_idletasks()\n\n w = master.winfo_screenwidth()\n h = master.winfo_screenheight()\n mastersize = tuple(\n int(_) for _ in master.geometry().split(\"+\")[0].split(\"x\"))\n x = w / 2 - mastersize[0] / 2\n y = h / 2 - mastersize[1] / 2\n master.geometry(\"%dx%d+%d+%d\" % (mastersize + (x, y)))\n\n if plot_now:\n self.plot()\n\n def choose_curves_file(self):\n input_file = \\\n askopenfilename(title=\"Select IrmUnmix parameter file\")\n if input_file:\n self.curves = IrmCurves.read_file(input_file)\n\n def choose_data_file(self):\n input_file = askopenfilename(title=\"Select IRM data file\")\n if input_file:\n self.series = DataSeries.read_file(input_file)\n\n def plot(self):\n plot_clg_fit(self.series, self.curves)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"usage: clgplot [options]\")\n parser.add_argument(\"-d\", \"--data\", dest=\"data_file\",\n help=\"Read IRM intensities from FILE.\", metavar=\"FILE\")\n parser.add_argument(\"-c\", \"--curves\", dest=\"curves_file\",\n help=\"Read curve parameters from FILE.\", metavar=\"FILE\")\n parser.add_argument(\"-p\", \"--plot\", action=\"store_true\", dest=\"plot_now\",\n default=False, help=\"Plot in GUI at once\")\n parser.add_argument(\"-n\", \"--no-gui\", action=\"store_true\",\n default=False, help=\"Don't start the GUI\")\n parser.add_argument(\"-o\", \"--output\", metavar=\"FILE\",\n help=\"Write plot to specified file\")\n args = parser.parse_args()\n\n data = None\n if args.data_file:\n data = DataSeries.read_file(args.data_file)\n hpcr = x_for_half_max_y(data.data[0], data.data[1])\n print(\"H'cr = {:.5g}\".format(hpcr))\n\n curves = None\n if args.curves_file:\n curves = IrmCurves.read_file(args.curves_file)\n\n if args.output:\n plot_clg_fit(data, curves, args.output)\n\n if not args.no_gui:\n root = tkinter.Tk()\n App(root, data=data, curves=curves,\n plot_now=args.plot_now)\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/clgplot.py","file_name":"clgplot.py","file_ext":"py","file_size_in_byte":11638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69406339","text":"\"\"\"\nYour module description\n\"\"\"\n\nfrom botocore.vendored import requests as requests\n\n\nURL = 'https://mysofie.com/api/v2/micro_task/reserve/'\n\nHEADERS = {'Authorization': 'Bearer RVVfU09VX0FfTEVOREE='}\n\n\ndef get_reserveds(**kwargs) -> dict:\n \"\"\"\"\n \n \"\"\"\n fields = [\n 'task_id', 'sofier', 'name', 'address', 'booked_on'\n ]\n \n kwargs['sofier'] = '*'\n response = requests.get(URL, headers=HEADERS, params=kwargs)\n \n data = [\n {key: value for key, value in each.items() if key in fields} \n for each in response.json().get('data', list())\n ]\n \n return fields, data\n ","sub_path":"serverless_aws/LAMBDA FUNCTIONS/reports/report_reserveds.py","file_name":"report_reserveds.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12246597","text":"#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport depthai as dai\nfrom crop_target.crop_target import CropTarget\n\ndef main():\n print(\"Check that target is aligned in the frame... Press 'q' to exit\")\n\n # Define target\n shape = 'rectangle'\n if shape == 'rectangle':\n center = np.array([[-0.0], [0.0], [3.0 - 0.054]]) # Center of plane\n size = np.array([0.5, 0.5]) # (width, height) in m\n angle = np.radians(0.0) # In degrees\n elif shape == 'circle':\n center = np.array([[0.0], [0.0], [2.0 - 0.054]]) # Center of shpere\n size = 0.139/2 # Radius in m\n angle = 0.0\n else:\n print(\"Not a valid shape!\")\n edge_width = 0\n target = CropTarget(shape, center, size, angle, edge_width)\n\n lrcheck = False # Better handling for occlusions\n extended = False # Closer-in minimum depth, disparity range is doubled\n subpixel = False # Better accuracy for longer distance, fractional disparity 32-levels\n resolution = (1280, 720)\n medianMap = {\n \"OFF\": dai.StereoDepthProperties.MedianFilter.MEDIAN_OFF,\n \"3x3\": dai.StereoDepthProperties.MedianFilter.KERNEL_3x3,\n \"5x5\": dai.StereoDepthProperties.MedianFilter.KERNEL_5x5,\n \"7x7\": dai.StereoDepthProperties.MedianFilter.KERNEL_7x7,\n }\n median = medianMap[\"7x7\"]\n\n\n def getDisparityFrame(frame):\n maxDisp = stereo.initialConfig.getMaxDisparity()\n disp = (frame * (255.0 / maxDisp)).astype(np.uint8)\n disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET)\n\n return disp\n\n\n print(\"Creating Stereo Depth pipeline\")\n pipeline = dai.Pipeline()\n\n camLeft = pipeline.create(dai.node.MonoCamera)\n camRight = pipeline.create(dai.node.MonoCamera)\n camRgb = pipeline.create(dai.node.ColorCamera)\n stereo = pipeline.create(dai.node.StereoDepth)\n xoutDisparity = pipeline.create(dai.node.XLinkOut)\n xoutDepth = pipeline.create(dai.node.XLinkOut)\n xoutRgb = pipeline.create(dai.node.XLinkOut)\n\n camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)\n camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)\n res = dai.MonoCameraProperties.SensorResolution.THE_720_P\n\n for monoCam in (camLeft, camRight): # Common config\n monoCam.setResolution(res)\n monoCam.setFps(30.0)\n\n stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)\n stereo.initialConfig.setMedianFilter(median) # KERNEL_7x7 default\n stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout\n stereo.setLeftRightCheck(lrcheck)\n stereo.setExtendedDisparity(extended)\n stereo.setSubpixel(subpixel)\n camRgb.setInterleaved(False)\n camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)\n camRgb.setPreviewSize(1280, 720)\n\n xoutDisparity.setStreamName(\"disparity\")\n xoutDepth.setStreamName(\"depth\")\n xoutRgb.setStreamName(\"rgb\")\n\n camLeft.out.link(stereo.left)\n camRight.out.link(stereo.right)\n stereo.disparity.link(xoutDisparity.input)\n stereo.depth.link(xoutDepth.input)\n camRgb.preview.link(xoutRgb.input)\n\n with dai.Device(pipeline) as device:\n # Create a receive queue for each stream\n depth_queue = device.getOutputQueue(\"depth\", 4, blocking=False)\n disp_queue = device.getOutputQueue(\"disparity\", 4, blocking=False)\n rgb_queue = device.getOutputQueue(name=\"rgb\", maxSize=4, blocking=False)\n \n while True:\n depth_frame = depth_queue.get().getCvFrame() # blocking call, will wait until a new data has arrived\n disp_frame = disp_queue.get().getCvFrame() # blocking call, will wait until a new data has arrived\n disp_frame = getDisparityFrame(disp_frame)\n rbg_frame = rgb_queue.get().getCvFrame() # blocking call, will wait until a new data has arrived\n\n calibData = device.readCalibration()\n color_intrinsic_matrix = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 1280, 720))\n color_extrinsic_matrix = np.hstack((np.identity(3), np.zeros((3,1))))\n \n depth_intrinsic_matrix = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720))\n depth_extrinsic_matrix = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.RGB, dai.CameraBoardSocket.RIGHT))\n depth_extrinsic_matrix = depth_extrinsic_matrix[:-1,:]\n depth_extrinsic_matrix[:,-1] = depth_extrinsic_matrix[:,-1] / 100 # Translation is in cm for some reason\n\n\n image_with_target = target.show_target_in_image(rbg_frame, color_extrinsic_matrix, color_intrinsic_matrix)\n depth_image_with_target = target.show_target_in_image(disp_frame, depth_extrinsic_matrix, depth_intrinsic_matrix)\n\n cv2.imshow(\"RGB\", image_with_target)\n cv2.imshow(\"Depth\", depth_image_with_target)\n if cv2.waitKey(1) == ord(\"q\"):\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"oak-d/scripts/data_analysis/align_target.py","file_name":"align_target.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214420308","text":"import tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.log_device_placement = True\nsess = tf.Session(config=config)\nset_session(sess)\n\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import skipgrams\nfrom keras.models import Sequential, Model\nfrom keras.layers import Reshape, Embedding, Input, Activation\nfrom keras.layers.merge import Dot\nfrom keras.utils import np_utils\nfrom keras.utils.data_utils import get_file\nimport gensim\n\nfrom string import punctuation\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nremove_terms = punctuation + '0123456789'\n\ndef preprocessing(text):\n\twords = word_tokenize(text)\n\t#print(words)\n\ttokens = [w for w in words if w.lower() not in remove_terms]\n\t#print(tokens)\n\t#stop = stopwords.words('english')\n\t#tokens = [token for token in tokens if token not in stop]\n\t#tokens = [word for word in tokens if len(word) > 3]\n\ttokens = [word for word in tokens if word.isalpha()]\n\t#print(tokens)\n\tlemma = WordNetLemmatizer()\n\ttokens = [lemma.lemmatize(word) for word in tokens]\n\t#print(tokens)\n\tpreprocessed_text = ' '.join(tokens)\n\t#print(preprocessed_text)\n\treturn preprocessed_text\n\ncorpus = open('guttenberg_astronomy.txt', encoding = 'utf8').readlines()\n#print(corpus)\ncorpus = [preprocessing(sentence) for sentence in corpus if sentence.strip() != '']\n#print(corpus)\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(corpus)\n\nX_train_tokens = tokenizer.texts_to_sequences(corpus)\n#print(X_train_tokens)\n\nvocab_size = len(tokenizer.word_index)+1\n#print(vocab_size)\n\nitems = tokenizer.word_index.items()\n\ndim_embedding = 300\n#inputs\ninputs = Input(shape=(1,), dtype = 'int32')\nw = Embedding(vocab_size, dim_embedding)(inputs)\n\n#context\nc_inputs = Input(shape=(1,), dtype='int32')\nc = Embedding(vocab_size, dim_embedding)(c_inputs)\n\nd = Dot(axes=2)([w,c])\n\nd = Reshape((1,), input_shape=(1,1))(d)\nd = Activation('sigmoid')(d)\n\nmodel = Model(inputs=[inputs, c_inputs], outputs = d)\n\nprint(model.summary())\n\nmodel.compile(loss = 'binary_crossentropy', optimizer = 'adam')\n\nn_epochs = 15\nfor epoch in range(n_epochs):\n\tloss = 0\n\tfor i, doc in enumerate(X_train_tokens):\n\t\t#print(i,doc)\n\t\tdata,labels = skipgrams(sequence = doc, vocabulary_size = vocab_size, window_size=4)\n\t\t#print(data, labels)\n\t\tx = [np.array(x) for x in zip(*data)]\n\t\ty = np.array(labels, dtype=np.int32)\n\t\t#print(x,y)\n\n\t\tif x:\n\t\t\tloss += model.train_on_batch(x,y)\n\n\tprint(\"epoch:\", epoch, '\\tloss:',loss)\n\nf = open('skipgram.txt', 'w', encoding='utf8')\nf.write('{} {}\\n'.format(vocab_size-1, dim_embedding))\n\nweights = model.get_weights()[0]\nfor word, i in items:\n\tf.write('{} {}\\n'.format(word, ' '.join(map(str, list(weights[i, :])))))\nf.close()\n\n\nw2v = gensim.models.KeyedVectors.load_word2vec_format('skipgram.txt', binary = False)\n\nprint(w2v.most_similar(positive=['solar']))\nprint(w2v.most_similar(positive=['kepler']))\n","sub_path":"MLinNLP/Word2Vec/SkipGram/skipgram.py","file_name":"skipgram.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103216567","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils import weight_norm\nimport torch.optim as optim\nimport torch.nn.functional as F\n\n\ndef load_data(file_path):\n dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H:%M:%S')\n dataset = pd.read_csv(file_path, usecols=[0,1],parse_dates=['date'], index_col='date', date_parser=dateparse)\n dataset.dropna(axis=0, how='any', inplace=True)\n dataset.index.name = 'date'\n return dataset\n\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n\tn_vars = 1 if type(data) is list else data.shape[1]\n\tdf = pd.DataFrame(data)\n\tcols, names = list(), list()\n\t# input sequence (t-n, ... t-1)\n\tfor i in range(n_in, 0, -1):\n\t\tcols.append(df.shift(i))\n\t\tnames += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n\tfor i in range(0, n_out):\n\t\tcols.append(df.shift(-i))\n\t\tif i == 0:\n\t\t\tnames += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n\t\telse:\n\t\t\tnames += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n\tagg = pd.concat(cols, axis=1)\n\tagg.columns = names\n\t# drop rows with NaN values\n\tif dropnan:\n\t\tagg.dropna(inplace=True)\n\treturn agg\n\ndef normalize_and_make_series(dataset, look_back):\n values = dataset.values\n values = values.astype('float64')\n # normalize features\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled = scaler.fit_transform(values)\n # frame as supervised learning\n column_num = dataset.columns.size\n reframed = series_to_supervised(scaled, look_back, 1)\n # drop columns we don't want to predict\n drop_column = []\n for i in range(look_back * column_num+1, (look_back + 1) * column_num):\n drop_column.append(i)\n reframed.drop(reframed.columns[drop_column], axis=1, inplace=True)\n return reframed, scaler\n\ndef split_data(dataset, reframed, look_back, split_time):\n column_num = dataset.columns.size\n train_size = len(dataset[dataset.index < split_time])\n\n values = reframed.values\n train = values[:train_size, :]\n test = values[train_size:, :]\n # split into input and outputs\n train_X, train_y = train[:, :-1], train[:, -1]\n test_X, test_y = test[:, :-1], test[:, -1]\n # reshape input to be 3D [samples, timesteps, features]\n train_X = train_X.reshape(train_X.shape[0], look_back, column_num)\n test_X = test_X.reshape(test_X.shape[0], look_back, column_num)\n train_X = torch.DoubleTensor(train_X)\n train_y = torch.DoubleTensor(train_y)\n test_X = torch.DoubleTensor(test_X)\n test_y = torch.DoubleTensor(test_y)\n return train_X, train_y, test_X, test_y","sub_path":"untils.py","file_name":"untils.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207717303","text":"#\n# Copyright 2019 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport PyNvCodec as nvc\nimport numpy as np\n\ngpuID = 0\nencFile = \"big_buck_bunny_1080p_h264.mov\"\ndecFile = open(\"big_buck_bunny_1080p_h264.nv12\", \"wb\")\n\nnvDec = nvc.PyNvDecoder(encFile, gpuID)\nnvDwl = nvc.PySurfaceDownloader(nvDec.Width(), nvDec.Height(), nvDec.PixelFormat(), gpuID)\n\nwhile True:\n rawSurface = nvDec.DecodeSingleSurface()\n if (rawSurface.Empty()):\n # Empty surface means we have reached EOF\n break\n \n rawFrame = nvDwl.DownloadSingleSurface(rawSurface)\n if not (rawFrame.size):\n break\n\n frameByteArray = bytearray(rawFrame)\n decFile.write(frameByteArray)","sub_path":"SampleSufraceDownload.py","file_name":"SampleSufraceDownload.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272365818","text":"\"\"\" routine for time convertion\n\"\"\"\nimport numpy\nfrom pylab import *\n\nimport datetime\n# second_leap=33 # there is a 32 seconds in 2000\nsecond_leap=0\nTAI0=datetime.datetime(1985, 1,1,0,0,0)\ndef_yyyy=2004\ndef day_of_year(yyyy=2005, mm=8, dd=15):\n \"\"\" get the day in a given year \n Arguments: \n yyyy, mm, dd: year, month and day \n Returns:\n days in year\n \"\"\"\n \n date=datetime.date(yyyy, mm, dd)\n date_ref=datetime.date(yyyy, 1, 1)\n delta=date-date_ref\n ndays=delta.days+1\n del date, date_ref, delta\n return ndays\n\ndef tai85_to_utc(tai85):\n \"\"\" convert a time in tai85 format to UTC string \n Arguments: \n tai85: the seconds since 1985-01-01 00:00:00\n Notes: \n the time_leap is done in a hard (hand) way \n \"\"\"\n \n date_new=TAI0+datetime.timedelta(seconds=(tai85-second_leap))\n utc=str(date_new)\n del date_new\n return utc\ndef utc_to_tai85(utc):\n \"\"\" convert utc string to tai85 format (seconds since 1993-01-01 00:00:00)\n Arguments:\n utc: utc time in yyyy-mm-dd hh:mi:ss\n returns:\n tai85 in seconds\n \"\"\"\n\n t1,t2=utc.split()\n syyyy, smm, sdd=t1.split('-')\n shh, smi, ssec=t2.split(':')\n yyyy=int(syyyy)\n mm=int(smm)\n dd=int(sdd)\n hh=int(shh)\n mi=int(smi)\n fsec=float(ssec)\n sec=int(fsec)\n iv_time=datetime.datetime(yyyy, mm, dd, hh, mi, sec)-TAI0\n tai85=3600.0*24.0*iv_time.days+iv_time.seconds+second_leap\n return tai85\ndef doy_to_tai85(doy, sec=0, yyyy=def_yyyy):\n \"\"\" convert day of year to tai85 format (seconds since 1993-01-01 00:00:00)\n Arguments:\n doy, sec, yyyy: day of year, seconds, and year\n returns:\n tai85 in seconds\n \"\"\"\n\n date0=datetime.datetime(yyyy, 1, 1, 0,0,0)\n date0=date0+datetime.timedelta(days=doy-1, seconds=sec)\n iv_time=date0-TAI0\n tai85=3600.0*24.0*iv_time.days+iv_time.seconds+second_leap\n del date0\n return tai85\ndef doy_to_utc(doy, sec=0, yyyy=def_yyyy):\n \"\"\" convert day of year to utc string\n Arguments:\n doy, sec, yyyy: day of year, seconds, and year\n returns:\n utc: the time in utc format yyyy-mm-dd hh:mm:ss\n \"\"\"\n \n date0=datetime.datetime(yyyy, 1, 1, 0,0,0)\n date0=date0+datetime.timedelta(days=doy-1, seconds=sec)\n utc=str(date0)\n del date0\n return utc\ndef doy_to_time_array(doy, yyyy=2005):\n utc=doy_to_utc(doy, 0, yyyy)\n yyyy, mm,dd, hh, mi, sec=utc_to_time_array(utc)\n return yyyy, mm, dd\n\ndef utc_to_time_array(utc):\n \"\"\" convert the utc string to yyyy, mm, dd, hh, mi, sec\n Arguments:\n utc: the time in utc format yyyy-mm-dd hh:mm:ss\n returns:\n yyyy, mm, dd, hh, mi, sec\n \"\"\"\n sd, sh=utc.split(' ')\n syyyy, smm, sdd=sd.split('-')\n shh, smi, ssec=sh.split(':')\n yyyy=int(syyyy)\n mm=int(smm)\n dd=int(sdd)\n hh=int(shh)\n mi=int(smi)\n sec=float(ssec)\n return yyyy, mm, dd, hh, mi, sec\ndef time_array_to_utc(yyyy,mm, dd, hh=0, mi=0, sec=0):\n \"\"\" convert yyyy, mm, dd, hh, mi, sec to utc string\n Arguments:\n yyyy, mm, dd, hh, mi, sec: year, month, day, hour, minute and second\n returns:\n utc: the time in utc format yyyy-mm-dd hh:mm:ss\n \"\"\"\n sec=int(sec)\n \n syyyy_mm_dd=r'%4.4d-%2.2d-%2.2d' %(yyyy, mm, dd)\n if (sec>=10):\n shh_mi_sec=r'%2.2d:%2.2d:%5.2f' %(hh, mi, sec)\n else:\n shh_mi_sec=r'%2.2d:%2.2d:%4.2f' %(hh, mi, sec)\n return syyyy_mm_dd+' '+shh_mi_sec\n\n \ndef get_tau(yyyy, mm, dd, hh=0, mi=0, sec=0):\n iv_time=datetime.datetime(yyyy, mm, dd, hh, mi, sec)-TAI0\n tai85=3600.0*24.0*iv_time.days+iv_time.seconds+second_leap\n return tai85\n\n\ndef next_doy(yyyy_in, doy_in, days=1, return_ymd=False):\n utc=doy_to_utc(doy_in, 0, yyyy_in)\n yyyy, mm, dd, hh, mi, sec=utc_to_time_array(utc)\n date0=datetime.datetime(yyyy, mm, dd, hh,mi,sec)\n date0=date0+datetime.timedelta(days=days, seconds=sec)\n utc=str(date0)\n yyyy, mm, dd, hh, mi, sec=utc_to_time_array(utc)\n doy=day_of_year(yyyy, mm, dd)\n if (return_ymd):\n return yyyy, mm, dd\n else:\n return yyyy, doy\n \ndef get_ut_time_slot(lt_st, lt_end, day_time_grid, lon, day_length=24.0*3600):\n\n tshift=day_length*lon/360.\n ut_st=lt_st-tshift\n ut_end=lt_end-tshift\n \n if (ut_end<0.0): # whole in earlier day \n ut_st=ut_st+day_length\n ut_end=ut_end+day_length\n\n usd_idx=where((day_time_grid>=ut_st) & (day_time_gridday_length): # whole in later day \n ut_st=ut_st-day_length\n ut_end=ut_end+day_length\n \n usd_idx=where((day_time_grid>=ut_st) & (day_time_grid=ut_st) & (day_time_grid<=day_length)\n chose2=(day_time_grid>=0.0) & (day_time_gridday_length): # cross later day\n \n ut_end=ut_end-day_length\n chose1=(day_time_grid>=ut_st) & (day_time_grid<=day_length)\n chose2=(day_time_grid>=0.0) & (day_time_grid=ut_st) & (day_time_grid= suma:\n print(\"Zakup udany. Wydane na klawiature\",klawiatura_cena,\"Wydane na pendrive\",pendrive_cena,\"Wydane srodki razem:\",suma,\"zl\")\nelse:\n print(\"Za malo srodkow\")\n\n","sub_path":"Day_6/Zadanie_5.py","file_name":"Zadanie_5.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55688298","text":"import socket\r\nimport time\r\n\r\n\r\nclass Klijent():\r\n\r\n def __init__(self, ime):\r\n self.ime = ime\r\n self.poruke = []\r\n\r\n\r\nklijenti = []\r\n\r\nHOST = socket.gethostname()\r\nPORT = 9000\r\n\r\ns = socket.socket()\r\ns.bind((HOST, PORT))\r\ns.listen(1)\r\n\r\nconn, addr = s.accept()\r\nprint('Connected by', addr)\r\n\r\nwhile True:\r\n opcija = conn.recv(1024).decode()\r\n if opcija == '1':\r\n vreme = time.strftime('%d/%m/%Y %H:%M:%S', time.localtime())\r\n conn.send(vreme.encode())\r\n elif opcija == '2':\r\n poruka = conn.recv(1024).decode()\r\n poruka = poruka.split(':')\r\n for k in klijenti:\r\n if k.ime == poruka[0]:\r\n k.poruke.append(poruka[1])\r\n break\r\n else:\r\n k = Klijent(poruka[0])\r\n k.poruke.append(poruka[1])\r\n klijenti.append(k)\r\n elif opcija == '3':\r\n ime = conn.recv(1024).decode()\r\n poruke = ''\r\n for k in klijenti:\r\n if k.ime == ime:\r\n for p in k.poruke:\r\n poruke += p + '\\n'\r\n conn.send(poruke.encode())\r\n break\r\n else:\r\n conn.send(':'.encode())\r\n elif opcija == '4':\r\n break\r\n\r\nconn.close()\r\ns.close()\r\n","sub_path":"Small programs/Klijent/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"137306306","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Jonathan Massey\n@description: Unpack flow field and plot the contours\n@contact: jmom1n15@soton.ac.uk\n\"\"\"\n\n# Imports\nimport os\nimport postproc.io as io\nimport postproc.calc as calc\nimport postproc.plotter as plotter\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nimport matplotlib.colors as colors\nfrom matplotlib import ticker, cm\nimport seaborn as sns\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom tkinter import Tcl\nimport imageio\nfrom tqdm import tqdm\nimport h5py\nfrom pygifsicle import optimize\n\n\ndef plot_2D_fp_isocontours(data, interest, fn_save, **kwargs):\n\n filled = kwargs.get('filled', True)\n title = kwargs.get('title', None)\n\n plt.style.use(['science', 'grid'])\n fig, ax = plt.subplots(figsize=(10, 7))\n cmap = sns.color_palette(\"icefire\", as_cmap=True)\n plt.title(title)\n divider = make_axes_locatable(ax)\n # Plot the window of interest\n ax.set_xlim(-0.2, 1.4)\n ax.set_ylim(-0.5, 0.5)\n\n X, Y = data.X, data.Y\n # Now plot what we are interested in\n if interest == 'p':\n vals = data.p\n # cmap = sns.color_palette(\"seismic\", as_cmap=True)\n elif interest == 'u':\n vals = data.U\n elif interest == 'v':\n vals = data.V\n elif interest == 'mag':\n U, V = data.U, data.V\n vals = np.sqrt(V ** 2 + U ** 2)\n # vals = vals * data.iter_correction(30)\n elif interest == 'rms':\n vals = data.rms()\n elif interest == 'rms_mag':\n vals = data.rms_mag()\n elif interest == 'vort':\n vals = calc.vortZ(data.U, data.V, x=X[0], y=Y[0], acc=2)\n # vals = -data.p * data.length_scale # Need to scale by length scale\n cmap = sns.color_palette(\"seismic\", as_cmap=True)\n elif interest == 'mat_file_vort':\n vals = data.omega\n cmap = sns.color_palette(\"seismic\", as_cmap=True)\n elif interest == 'mat_file':\n vals = data.U.T\n elif interest == 'snap_mat_file':\n s = kwargs.get('snap', 0)\n vals = data.mag_snap[s].T\n\n grey_color = '#dedede'\n rec = patches.Rectangle((0, -1 / 91.42), 1., 1/45.71, -theta, linewidth=0.2, edgecolor='black', facecolor=grey_color)\n ax.add_patch(rec)\n\n lim = [np.min(vals), np.max(vals)]\n lim = kwargs.get('lims', lim)\n # Put limits consistent with experimental data\n norm = colors.Normalize(vmin=lim[0], vmax=lim[1])\n lvls = kwargs.get('lvls', 11)\n step = kwargs.get('step', None)\n if step is not None:\n lvls = np.arange(lim[0], lim[1]+step, step)\n else:\n lvls = np.linspace(lim[0], lim[1], lvls)\n\n if filled:\n cs = ax.contourf(X, Y, np.transpose(vals),\n levels=lvls, vmin=lim[0], vmax=lim[1],\n norm=norm, cmap=cmap, extend='max')\n ax_cb = divider.new_horizontal(size=\"5%\", pad=0.05)\n fig.add_axes(ax_cb)\n plt.colorbar(cs, cax=ax_cb)\n ax_cb.yaxis.tick_right()\n ax_cb.yaxis.set_tick_params(labelright=True)\n # plt.setp(ax_cb.get_yticklabels()[::2], visible=False)\n # ax.clabel(cs, cs.levels[::2], inline_spacing=-9, inline=1, fontsize=10, fmt='%1.2f')\n del X, Y, vals\n ax.set_aspect(1)\n\n plt.savefig(fn_save, dpi=300)\n plt.close()\n\n\ndef vtr_to_mesh(fn, length_scale, rotation=2):\n rot = rotation / 180 * np.pi\n data = io.read_vtr(fn)\n # Get the grid\n x, y, z = data[2]\n X, Y = np.meshgrid(x / length_scale, y / length_scale)\n # Move grid so front of the foil is at (0, 0) to match exp\n X += 0.5\n mask_bool = ((X >= 0.) & (X <= 1.) & (Y <= 1 / 91.42) & (Y >= -1 / 91.42)).T\n X = np.cos(rot) * X + np.sin(rot) * Y\n Y = -np.sin(rot) * X + np.cos(rot) * Y\n\n u, v, w = data[0]\n U = np.cos(rot) * u + np.sin(rot) * v\n # U = ma.masked_where(mask_bool, U)\n V = -np.sin(rot) * u + np.cos(rot) * v\n # V = ma.masked_where(mask_bool, V)\n p = data[1]\n p = np.reshape(p, [np.shape(p)[0], np.shape(p)[2], np.shape(p)[3]])\n return X, Y, U, V, w, p\n\n\nclass SimFramework:\n \"\"\"\n Class that holds all the functions to extract dat from a paraview fn,\n average and plot the contours and an animation.\n \"\"\"\n\n def __init__(self, sim_dir, fn_root, **kwargs):\n rms = kwargs.get('rms', False)\n down = kwargs.get('downsample', 1)\n self.sim_dir = sim_dir\n datp_dir = os.path.join(sim_dir, 'datp')\n rotation = kwargs.get('rotation', 0)\n self.rot = rotation / 180 * np.pi\n self.length_scale = kwargs.get('length_scale', 96)\n # Find what you're looking for\n fns = [fn for fn in os.listdir(datp_dir) if fn.startswith(fn_root) and fn.endswith('.pvtr')]\n # Sort files\n fns = Tcl().call('lsort', '-dict', fns)\n\n if len(fns) > 1:\n print(\"More than one fn with this name. Taking time average.\")\n # Store snapshots of field\n self.snaps = []\n for fn in fns[::down]:\n snap = vtr_to_mesh(os.path.join(datp_dir, fn), self.length_scale)\n self.snaps.append(snap)\n del snap\n # Time average the flow field snaps\n mean_t = np.mean(np.array(self.snaps).T, axis=1)\n self.X, self.Y = mean_t[0:2]\n self.u, self.v, self.w = mean_t[2:-1]\n self.U, self.V = np.mean(self.u, axis=2), np.mean(self.v, axis=2)\n self.p = np.mean(mean_t[-1], axis=0)\n del mean_t\n else:\n assert (len(fns) > 0), 'You dont have '+fn_root+'.pvtr in your datp folder'\n self.X, self.Y, self.U, self.V, self.W, self.p = vtr_to_mesh(os.path.join(datp_dir, fns[0]),\n self.length_scale)\n self.U, self.V = np.squeeze(self.U), np.squeeze(self.V)\n self.p = np.squeeze(self.p)\n self.z = np.ones(np.shape(self.X))\n # --- Unpack mean flow quantities ---\n names = kwargs.get('names', ['t', 'dt', 'px', 'py', 'pz', 'vx', 'vy', 'vz', 'v2x', 'v2y', 'v2z'])\n fos = (io.unpack_flex_forces(os.path.join(self.sim_dir, 'fort.9'), names))\n self.fos = dict(zip(names, fos))\n\n def rms(self):\n means = np.mean(np.array(self.snaps).T, axis=1)[2:-1]\n fluctuations = np.array(flow.snaps)[:, 2:-1] - means\n del means\n rms = np.mean((fluctuations[:, 0] ** 2 + fluctuations[:, 1] ** 2 + fluctuations[:, 2] ** 2) ** (1 / 2))\n del fluctuations\n return np.mean(rms, axis=2)\n\n def rms_mag(self):\n mean = np.mean(np.array(self.snaps).T, axis=1)[2:-1]\n mean = np.sqrt(np.sum(mean**2))\n mag = np.array(flow.snaps)[:, 2:-1]**2\n mag = np.sum(mag, axis=1)**0.5\n fluc = []\n for snap in mag:\n fluc.append(snap - mean)\n del mag, mean\n\n return np.mean(np.mean(fluc, axis=0), axis=2)\n\n def vort_mag(self):\n return io.vort(self.U, self.V, self.W, x=self.X, y=self.Y, z=self.z)\n\n def downsample(self, skip=1):\n self.X, self.Y, self.U, self.V, self.p = np.mean(np.array(self.snaps[::(skip + 1)]).T, axis=1)\n\n\n# def save_frames(data, folder, interest):\n# for idx, snap in tqdm(enumerate(data), desc='Plotting frames'):\n# da = np.array(snap).T\n# plot_2D_fp_isocontours(da, interest, os.path.join(folder, str(idx) + '.png'))\n\n\nclass PIVFramework:\n \"\"\"\n Class that holds all the functions to extract dat from a .mat fn to a plottable form.\n \"\"\"\n def __init__(self, exp, fn, **kwargs):\n rms = kwargs.get('rms', False)\n mag = kwargs.get('mag', True)\n vort = kwargs.get('vort', False)\n data = {}\n f = h5py.File(exp)[fn]\n for k, v in f.items():\n data[k] = np.array(v)\n # Normalise with the chord length\n l, U_inf = data['chord_length'], data['U_inf']\n print(l)\n self.X, self.Y = data['X']/l, data['Y']/l\n self.u, self.v = data['VY']/U_inf, data['VX']/U_inf\n if mag:\n self.mag_snap = np.sqrt((np.einsum('...jk,...jk->...jk', self.u, self.u) +\n np.einsum('...jk,...jk->...jk', self.v, self.v)))\n\n mean = np.mean(self.mag_snap, axis=0)\n self.U = mean\n if rms:\n mag = np.sqrt((np.einsum('...jk,...jk->...jk', self.u, self.u) +\n np.einsum('...jk,...jk->...jk', self.v, self.v)))\n mean = np.array([np.mean(mag, axis=0)])\n fluc = np.sqrt((mag-mean)**2)\n self.U = np.mean(fluc, axis=0)\n if vort: # ddx-ddy\n omega = []\n for idx, (snap_u, snap_v) in enumerate(zip(self.u, self.v)):\n omega.append(np.array(calc.vortZ(snap_u, snap_v, x=self.X[:, 0], y=self.Y[0], acc=2)))\n self.omega = np.sum(omega, axis=0)/len(self.U)\n self.omega = self.omega.T\n self.omega = data['vort']/U_inf\n self.omega = np.squeeze(np.mean(self.omega, axis=0)).T\n\n\ndef animate(data, folder, interest):\n # save_frames(data, folder, interest)\n # Sort filenames to make sure they're in order\n fn_images = os.listdir(folder)\n fn_images = Tcl().call('lsort', '-dict', fn_images)\n # Create gif\n gif_path = folder + '/exp_'+interest+'.gif'\n with imageio.get_writer(gif_path, mode='I', duration=0.15) as writer:\n for filename in tqdm(fn_images[::3], desc='Loop images'):\n writer.append_data(imageio.imread(os.path.join(folder, filename)))\n optimize(gif_path)\n\n\ndef save_piv_frames(data, folder, interest):\n for snap in range(len(data.mag_snap)):\n plot_2D_fp_isocontours(data, interest, os.path.join(folder, str(snap) + '.png'),\n title=r'$ \\overline{|U|} $', lims=[0, 1.4], step=0.1, snap=snap)\n\n\nif __name__ == \"__main__\":\n plt.style.use(['science', 'grid'])\n theta = 12\n file = 'smooth_Re10k_AoA_2'\n exp_data = '/home/masseyjmo/Workspace/Lotus/projects/flat_plate/flow_field/exp_data/'+file+'.mat'\n\n data_root = '/home/masseyjmo/Workspace/Lotus/projects/flat_plate/AoA_12'\n tit = r'$ \\overline{|U|} $'\n flow = PIVFramework(exp_data, file)\n # plot_2D_fp_isocontours(flow, 'snap_mat_file', os.path.join(data_root, 'figures/exp_mag.pdf'),\n # title=tit, lims=[0, 1.4], step=0.15, snap=0)\n # # save_piv_frames(flow, os.path.join(data_root, 'figures/animation'), 'snap_mat_file')\n # # animate(flow, os.path.join(data_root, 'figures/animation'), 'snap_mat_file')\n # # tit = r'$ \\overline{||U|^{\\prime}|} $'\n # flow = PIVFramework(exp_data, file, rms=True)\n # plot_2D_fp_isocontours(flow, 'mat_file', os.path.join(data_root, 'figures/exp_rms_mag.pdf'),\n # title=tit, lims=[0, .2])\n # # tit = r'$ \\overline{|\\omega|} $'\n # # flow = PIVFramework(exp_data, file, vort=True)\n # # plot_2D_fp_isocontours(flow, 'mat_file_vort', os.path.join(data_root, 'figures/exp_vort.pdf'),\n # # title=tit, lims=[-115, 85], lvls=11)\n # for c in [256]:\n # tit = r'$ \\overline{|U|} $'\n # flow = SimFramework(os.path.join(data_root, str(c) + '/3D'), 'spTAv',\n # length_scale=c, rotation=12)\n # field = 'mag'\n # plot_2D_fp_isocontours(flow, field, os.path.join(data_root, 'figures/'+str(c)+'_sim_mag.pdf'),\n # title=tit, lims=[0, 1.4], step=0.1)\n # tit = r'$ \\overline{||U|^{\\prime}|} $'\n # flow = SimFramework(os.path.join(data_root, str(c)+'/3D'), 'spRms',\n # length_scale=c, rotation=12)\n # field = 'p'\n # plot_2D_fp_isocontours(flow, field, os.path.join(data_root, 'figures/'+str(c)+'_sim_rms_mag.pdf'),\n # title=tit, lims=[0, .2])\n\n # flow = SimFramework(os.path.join(data_root, str(c) + '/3D'), 'slVor',\n # length_scale=c, rotation=12)\n # tit = r'$ \\overline{|\\omega|} $'\n # field = 'vort'\n # plot_2D_fp_isocontours(flow, field, os.path.join(data_root, 'figures/sim_vort.pdf'),\n # title=tit, lims=[-115, 85], lvls=11)\n\n","sub_path":"flat_plate/Melike/flow_field.py","file_name":"flow_field.py","file_ext":"py","file_size_in_byte":12175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"501264192","text":"# -*- coding: latin-1 -*-\n\n\"\"\"This demo shows how to extend the texel tree by introducing new\ncontainer types.\n\"\"\"\n\nimport sys\nsys.path.insert(0, '..')\n\nfrom textmodel import TextModel\nfrom textmodel.texeltree import Characters\nfrom textmodel.container import Container\n\n\n\nclass Fraction(Container):\n def __init__(self, denominator, nominator, **kwds):\n self.denominator = denominator\n self.nominator = nominator\n Container.__init__(self, **kwds)\n\n def get_content(self):\n return self.denominator, self.nominator\n\n def get_emptychars(self):\n return '(;)'\n\n\nclass Root(Container):\n def __init__(self, content, **kwds):\n self.content = content\n Container.__init__(self, **kwds)\n\n def get_content(self):\n return [self.content]\n\n\n\ndef mk_textmodel(texel):\n model = TextModel()\n model.texel = texel\n return model\n\n\n\nfrac = Fraction(Characters(u'3'), Characters(u'4'))\nroot = Root(Characters(u'2'))\n\ntext = TextModel(u\"A text which contains some math\\n\\n\")\ntext.append(u'f = ')\ntext.append(mk_textmodel(frac))\ntext.append(u'\\n\\n')\ntext.append(mk_textmodel(root))\ntext.append(u' = 1.414214 ...')\ntext.texel.dump()\n","sub_path":"textmodel/demo/extending.py","file_name":"extending.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"111896220","text":"import re\nimport copy\nfrom django.template import Library\nfrom django.utils.safestring import mark_safe\nfrom django.conf import settings\n\nregister = Library()\n\n# 此方法在HTML中可以被调用,调用方式: {% show_menu \"aasdfd\" %}\n@register.simple_tag\ndef show_menu(a1):\n return mark_safe(\"菜单\")\n\n\n@register.inclusion_tag('menu.html')\ndef get_menu(request):\n \"\"\"\n :param request: 请求相关的所有数据\n :return:\n \"\"\"\n # request.method\n # request.session\n # request.path_info\n\n new_menu_list = copy.deepcopy(settings.MENU_LIST)\n flag = False\n for item in new_menu_list:\n for child in item['children']:\n reg = \"^{0}$\".format(child['url']) # ^/web/edit_user/(\\d+)/$\n if re.match(reg,request.path_info):\n if child['is_menu']:\n child['class'] = 'active'\n else:\n index = child['parant_index']\n item['children'][index]['class'] = 'active'\n item['class'] = \"\"\n flag = True\n break\n\n\n if flag:\n break\n\n return {'menus':new_menu_list}","sub_path":"day23/课件/s21day23/auto - 7 - 静态的菜单示例(最终版)/web/templatetags/xxxxxxxx.py","file_name":"xxxxxxxx.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"480366385","text":"# arrays are quite simply sets of the types of\n# variables we covered in variables.py.\n\ninteger_array = [8, 2, 5, 2, 5]\nstring_array = [\"hello\", \"q\", \"HI\", \"&*($&\"]\nboolean_array = [True, True, False, False, True]\n\n# Arrays generally should all be the same type.\n# However, this is not required, but highly recommended\n\n# we can reference the elements of arrays individually.\n# remember that the first element has the address of 0\n\ninteger_array[1] # = 2\nstring_array[1] # = \"q\"\nboolean_array[1] # True\n\n# we can reference a range\n\ninteger_array[2:3] # = [5, 2]\nstring_array[2:3] # = [\"HI\", \"&*($&\"]\nboolean_array[2:3] # = [False, False]\n\n# Arrays can have more than one dimension\n\nmulti_d_array = [\n [0, 2, 5, 8],\n [6, 1, 6, 1]\n [7, 2, 6, 7]\n]\n\n# This is a 2d array, with 3x4 size (12 elements)\n\n# arrays can be used to define color. Color is stored as three\n# integer values, from 0 to 255 (nothing to full), usually in the\n# format (red, green, blue).\n\nwhite = [255, 255, 255]\nblack = [0, 0, 0]\ngreen = [0, 255, 0]\npurple = [255, 0, 255]\n\n# When we put these last two concepts together, we find we can represent\n# images very well.\n\n# 2x2 pixel image with color values:\n# red, blue\n# green, black\n\nimage = [\n [ # row one\n [255, 0, 0], # red\n [0, 0, 255] # blue\n ],\n [ # row two\n [0, 255, 0], # green\n [0, 0, 0] # black\n ]\n]\n\n# whew, that array was THREE-dimensional\n# how do you think we would represent a video?\n# hint: a video is a set of images\n\n\n# yep, 4 dimensional! We have left the world of things we can physically model as an object\n","sub_path":"arrays.py","file_name":"arrays.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"403139844","text":"#!/usr/bin/python3\nimport random\nimport argparse\n\nmale_names = [\n \"Akahito\",\n \"Akeno\",\n \"Aki\",\n \"Akihiro\",\n \"Akihisa\",\n \"Akihito\",\n \"Akikazu\",\n \"Akinari\",\n \"Akinori\",\n \"Akio\",\n \"Akio\",\n \"Akira\",\n \"Amane\",\n \"Anzai\",\n \"Arata\",\n \"Arinori\",\n \"Aritomo\",\n \"Ashihei\",\n \"Atasuke\",\n \"Atshushi\",\n \"Atsumichi\",\n \"Atsumori\",\n \"Atsutane\",\n \"Azumamaro\",\n \"Baiko\",\n \"Bairei\",\n \"Bakin\",\n \"Basho\",\n \"Benjiro\",\n \"Benkei\",\n \"Bokkai\",\n \"Botan\",\n \"Buncho\",\n \"Bunjiro\",\n \"Bunrakuken\",\n \"Bunzo\",\n \"Bussho\",\n \"Chikafusa\",\n \"Chikao\",\n \"Chiko\",\n \"Chojiro\",\n \"Chomei\",\n \"Chuichi\",\n \"Dai\",\n \"Daisetsu\",\n \"Daisuke\",\n \"Danjuro\",\n \"Danno\",\n \"Dayu\",\n \"Denbe\",\n \"Doi\",\n \"Dokuohtei\",\n \"Doppo\",\n \"Ebizo\",\n \"Eichi\",\n \"Eichiro\",\n \"Eien\",\n \"Eiichi\",\n \"Eiji\",\n \"Eijiro\",\n \"Eikichi\",\n \"Eisaku\",\n \"Eisen\",\n \"Eishi\",\n \"Eisuke\",\n \"Eitoku\",\n \"Eizan\",\n \"Eizo\",\n \"Ekiken\",\n \"Ennosuke\",\n \"Etsuya\",\n \"Fujimaro\",\n \"Fujio\",\n \"Fukusaburu\",\n \"Fumiaki\",\n \"Fumihiko\",\n \"Fumihiro\",\n \"Fumimaro\",\n \"Fumio\",\n \"Gaho\",\n \"Gekko\",\n \"Gempachi\",\n \"Gengyo\",\n \"Genichi\",\n \"Genjo\",\n \"Gennai\",\n \"Gennosuke\",\n \"Genpaku\",\n \"Gesshin\",\n \"Gidayu\",\n \"Gihei\",\n \"Giichi\",\n \"Go\",\n \"Goemon\",\n \"Gombei\",\n \"Gonkuro\",\n \"Gonnohyoe\",\n \"Gonshiro\",\n \"Goro\",\n \"Gyokusho\",\n \"Gyukudo\",\n \"Hachemon\",\n \"Hachigoro\",\n \"Hachiro\",\n \"Hajime\",\n \"Hakuseki\",\n \"Hanshiro\",\n \"Haranobu\",\n \"Haru\",\n \"Haru\",\n \"Haruhiko\",\n \"Haruhiro\",\n \"Haruki\",\n \"Haruko\",\n \"Harumi\",\n \"Harunobu\",\n \"Hayato\",\n \"Heihachiro\",\n \"Heiji\",\n \"Heikichi\",\n \"Heizo\",\n \"Hideaki\",\n \"Hidehira\",\n \"Hidekazu\",\n \"Hideki\",\n \"Hidemichi\",\n \"Hideo\",\n \"Hidetada\",\n \"Hidetora\",\n \"Hidetoshi\",\n \"Hidetsugu\",\n \"Hideyori\",\n \"Hideyoshi\",\n \"Higashikuni\",\n \"Hikaru\",\n \"Hikosaburo\",\n \"Hikozaemon\",\n \"Hiro\",\n \"Hiroaki\",\n \"Hirobumi\",\n \"Hirofumi\",\n \"Hiroharu\",\n \"Hirohisa\",\n \"Hiroji\",\n \"Hirokazu\",\n \"Hirokichi\",\n \"Hirokumi\",\n \"Hiroshi\",\n \"Hiroshige\",\n \"Hirotada\",\n \"Hirotaka\",\n \"Hirotsugu\",\n \"Hiroya\",\n \"Hiroyasu\",\n \"Hiroyuki\",\n \"Hisahsi\",\n \"Hisaki\",\n \"Hisamitsu\",\n \"Hisanobu\",\n \"Hisashi\",\n \"Hisato\",\n \"Hisayuki\",\n \"Hitomaro\",\n \"Hitoshi\",\n \"Hogai\",\n \"Hoitsu\",\n \"Hokichi\",\n \"Hokusai\",\n \"Honzo\",\n \"Horiuchi\",\n \"Hoshi\",\n \"Hoshiko\",\n \"Hyobe\",\n \"Hyosuke\",\n \"Hyotaru\",\n \"Ichibei\",\n \"Ichiro\",\n \"Ichisake\",\n \"Ichiyo\",\n \"Ichizo\",\n \"Iemitsu\",\n \"Iemochi\",\n \"Ienobu\",\n \"Iesada\",\n \"Ieshige\",\n \"Ietsuna\",\n \"Ieyasu\",\n \"Ieyoshi\",\n \"Ikemoto\",\n \"Ikki\",\n \"Ikku\",\n \"Inejiro\",\n \"Ippei\",\n \"Isamu\",\n \"Isao\",\n \"Isei\",\n \"Isoruko\",\n \"Isoshi\",\n \"Iwane\",\n \"Iwao\",\n \"Izo\",\n \"Izumo\",\n \"Jakuchu\",\n \"Jin\",\n \"Jinzaburo\",\n \"Jiro\",\n \"Jo\",\n \"Joben\",\n \"Joji\",\n \"Jomei\",\n \"Josuke\",\n \"Jotaro\",\n \"Jou\",\n \"Juichi\",\n \"Jun\",\n \"Junichi\",\n \"Junichiro\",\n \"Junji\",\n \"Junnosuke\",\n \"Junzo\",\n \"Juro\",\n \"Jurobei\",\n \"Juzaburo\",\n \"Juzo\",\n \"Jyoji\",\n \"Kado\",\n \"Kadonomaro\",\n \"Kaemon\",\n \"Kafu\",\n \"Kagehisa\",\n \"Kagetoki\",\n \"Kageyasu\",\n \"Kaii\",\n \"Kakuei\",\n \"Kakuzo\",\n \"Kamatari\",\n \"Kamlyn\",\n \"Kan\",\n \"Kanbe\",\n \"Kane\",\n \"Kaneie\",\n \"Kanezane\",\n \"Kanjiro\",\n \"Kanko\",\n \"Kannon\",\n \"Kano\",\n \"Kansuke\",\n \"Kantaro\",\n \"Kanzaburo\",\n \"Kaori\",\n \"Kaoru\",\n \"Kata\",\n \"Katai\",\n \"Katsuhiko\",\n \"Katsuhito\",\n \"Katsumi\",\n \"Katsumoto\",\n \"Katsunan\",\n \"Katsunosuki\",\n \"Katsuyoshi\",\n \"Katsuyuki\",\n \"Katzumi\",\n \"Kawanari\",\n \"Kaz\",\n \"Kazu\",\n \"Kazuhiko\",\n \"Kazuhiro\",\n \"Kazuki\",\n \"Kazuko\",\n \"Kazuma\",\n \"Kazunori\",\n \"Kazuo\",\n \"Kazushi\",\n \"Kazushige\",\n \"Kazutoshi\",\n \"Kazuyuki\",\n \"Kei\",\n \"Keiji\",\n \"Keiki\",\n \"Keishi\",\n \"Keisuke\",\n \"Keita\",\n \"Keitaro\",\n \"Keizo\",\n \"Ken\",\n \"Kenichi\",\n \"Kenji\",\n \"Kenji\",\n \"Kenji\",\n \"Kenjiro\",\n \"Kenkichi\",\n \"Kenko\",\n \"Kensaku\",\n \"Kenshin\",\n \"Kentaro\",\n \"Kenzaburo\",\n \"Kenzan\",\n \"Kenzo\",\n \"Kichibei\",\n \"Kichisaburo\",\n \"Kiemon\",\n \"Kiichi\",\n \"Kijuro\",\n \"Kikaku\",\n \"Kikugoro\",\n \"Kikunojo\",\n \"Kimi\",\n \"Kiminobu\",\n \"Kimitada\",\n \"Kin\",\n \"Kingo\",\n \"Kinji\",\n \"Kinmochi\",\n \"Kinnojo\",\n \"Kinnosuke\",\n \"Kinzo\",\n \"Kioshi\",\n \"Kisho\",\n \"Kitahachi\",\n \"Kiyoemon\",\n \"Kiyohira\",\n \"Kiyohisa\",\n \"Kiyomasu\",\n \"Kiyomori\",\n \"Kiyonaga\",\n \"Kiyonobu\",\n \"Kiyonori\",\n \"Kiyoshi\",\n \"Kiyotaka\",\n \"Koan\",\n \"Kobo\",\n \"Koetsu\",\n \"Kohei\",\n \"Koichi\",\n \"Koin\",\n \"Koji\",\n \"Kojiro\",\n \"Kojuro\",\n \"Kokan\",\n \"Kokei\",\n \"Koki\",\n \"Kokushi\",\n \"Konosuke\",\n \"Konoye\",\n \"Konyo\",\n \"Korechika\",\n \"Korekiyo\",\n \"Korenaga\",\n \"Korin\",\n \"Koryusai\",\n \"Kosaku\",\n \"Kosami\",\n \"Koshiro\",\n \"Kosho\",\n \"Kotaro\",\n \"Koto\",\n \"Koyo\",\n \"Kozue\",\n \"Kuemon\",\n \"Kuma\",\n \"Kumanosuke\",\n \"Kuniaki\",\n \"Kunihiko\",\n \"Kunimatsu\",\n \"Kunimichi\",\n \"Kunio\",\n \"Kunisada\",\n \"Kunitaro\",\n \"Kuniyoshi\",\n \"Kuniyuki\",\n \"Kuri\",\n \"Kyoden\",\n \"Kyoichi\",\n \"Kyoji\",\n \"Kyoshi\",\n \"Kyuichi\",\n \"Kyushichi\",\n \"Kyuso\",\n \"Kyuwa\",\n \"Mabuchi\",\n \"Magbei\",\n \"Magobei\",\n \"Magohachi\",\n \"Makoto\",\n \"Mamoru\",\n \"Manabu\",\n \"Manobu\",\n \"Manzo\",\n \"Mareo\",\n \"Maresuke\",\n \"Marihito\",\n \"Maris\",\n \"Marise\",\n \"Maro\",\n \"Masaaki\",\n \"Masafumi\",\n \"Masaharu\",\n \"Masahide\",\n \"Masahiko\",\n \"Masahiro\",\n \"Masakado\",\n \"Masakazu\",\n \"Masaki\",\n \"Masami\",\n \"Masamichi\",\n \"Masamune\",\n \"Masanobu\",\n \"Masanori\",\n \"Masao\",\n \"Masaru\",\n \"Masashi\",\n \"Masashige\",\n \"Masatake\",\n \"Masato\",\n \"Masayoshi\",\n \"Masayuki\",\n \"Masazumi\",\n \"Mashai\",\n \"Mashashi\",\n \"Mashiro\",\n \"Masu\",\n \"Masuhiro\",\n \"Masujiro\",\n \"Masutaro\",\n \"Matabei\",\n \"Matashichi\",\n \"Matsu\",\n \"Matsudaira\",\n \"Matsuo\",\n \"Matsusuke\",\n \"Matsuta\",\n \"Matsuyo\",\n \"Meiji\",\n \"Michihiro\",\n \"Michinaga\",\n \"Michinori\",\n \"Michio\",\n \"Michizane\",\n \"Mieko\",\n \"Miki\",\n \"Mikio\",\n \"Minoru\",\n \"Misao\",\n \"Mito\",\n \"Mitsuharu\",\n \"Mitsuhide\",\n \"Mitsukuni\",\n \"Mitsunari\",\n \"Mitsuo\",\n \"Mitsuoki\",\n \"Mitsuzuka\",\n \"Miyazaki\",\n \"Miyoko\",\n \"Mobumasu\",\n \"Mochihito\",\n \"Mokichi\",\n \"Mokuami\",\n \"Momoru\",\n \"Montaro\",\n \"Monzaemon\",\n \"Morie\",\n \"Morihiro\",\n \"Morimasa\",\n \"Morio\",\n \"Moromao\",\n \"Moronobu\",\n \"Motoichi\",\n \"Motoki\",\n \"Motonobu\",\n \"Motoshige\",\n \"Mototsune\",\n \"Motoyasu\",\n \"Motoyuki\",\n \"Munemitsu\",\n \"Munemori\",\n \"Munenori\",\n \"Muneyaki\",\n \"Munoto\",\n \"Murai\",\n \"Mushanokoji\",\n \"Mutsohito\",\n \"Naganori\",\n \"Naizen\",\n \"Nakamaro\",\n \"Nakazo\",\n \"Namboku\",\n \"Nampo\",\n \"Naoaki\",\n \"Naofumi\",\n \"Naohiro\",\n \"Naoki\",\n \"Naoko\",\n \"Naomichi\",\n \"Naonobu\",\n \"Naosuke\",\n \"Naoya\",\n \"Naozane\",\n \"Narahiko\",\n \"Nariaki\",\n \"Nariakira\",\n \"Narihari\",\n \"Narihira\",\n \"Naruhiko\",\n \"Natsu\",\n \"Natsume\",\n \"Natsuo\",\n \"Nichiren\",\n \"Nikki\",\n \"Nikko\",\n \"Ninsei\",\n \"Niou\",\n \"Nissho\",\n \"Noboru\",\n \"Nobuatsu\",\n \"Nobuharu\",\n \"Nobuhiko\",\n \"Nobuhisa\",\n \"Nobuhito\",\n \"Nobukazu\",\n \"Nobuo\",\n \"Noburo\",\n \"Nobusuke\",\n \"Nobuyoki\",\n \"Nobuyori\",\n \"Nobuyoshi\",\n \"Nori\",\n \"Noriaki\",\n \"Norihide\",\n \"Norihisa\",\n \"Norinaga\",\n \"Norio\",\n \"Norishige\",\n \"Noritada\",\n \"Noritoshi\",\n \"Noriyori\",\n \"Noriyuki\",\n \"Norogumi\",\n \"Oda\",\n \"Ogai\",\n \"Okakura\",\n \"Okitsugu\",\n \"Okura\",\n \"Okyoito\",\n \"Omezo\",\n \"Oniji\",\n \"Orinosuke\",\n \"Osamu\",\n \"Otojiro\",\n \"Rai\",\n \"Raidon\",\n \"Razan\",\n \"Rei\",\n \"Reijiro\",\n \"Reizo\",\n \"Renjiro\",\n \"Renzo\",\n \"Rikiya\",\n \"Rikyu\",\n \"Ringo\",\n \"Rinji\",\n \"Rintaro\",\n \"Rkuemon\",\n \"Robun\",\n \"Roka\",\n \"Roku\",\n \"Rosanjin\",\n \"Ryo\",\n \"Ryobe\",\n \"Ryoichi\",\n \"Ryoko\",\n \"Ryoma\",\n \"Ryosei\",\n \"Ryozo\",\n \"Ryu\",\n \"Ryuichi\",\n \"Ryunosuke\",\n \"Ryushi\",\n \"Ryutaro\",\n \"Ryuzaburo\",\n \"Saburo\",\n \"Sachi\",\n \"Sachio\",\n \"Sadaharu\",\n \"Sadahige\",\n \"Sadakuno\",\n \"Sadanobu\",\n \"Sadao\",\n \"Sadatake\",\n \"Sadayoshi\",\n \"Saemon\",\n \"Saikaku\",\n \"Saionji\",\n \"Sakutaro\",\n \"Samba\",\n \"Saneatsu\",\n \"Sanetomo\",\n \"Sanjiro\",\n \"Sanjuro\",\n \"Sanraku\",\n \"Sanzo\",\n \"Satoru\",\n \"Satoshi\",\n \"Sawao\",\n \"Seibei\",\n \"Seiesnsui\",\n \"Seihachi\",\n \"Seiho\",\n \"Seiichi\",\n \"Seiji\",\n \"Seika\",\n \"Seiki\",\n \"Seinosuke\",\n \"Seiryo\",\n \"Seishiro\",\n \"Seishisai\",\n \"Seison\",\n \"Seitaro\",\n \"Sekien\",\n \"Sen\",\n \"Senichi\",\n \"Senzo\",\n \"Sessue\",\n \"Settan\",\n \"Sharaku\",\n \"Shiba\",\n \"Shichirobei\",\n \"Shigeaki\",\n \"Shigekazu\",\n \"Shigeki\",\n \"Shigeko\",\n \"Shigemasa\",\n \"Shigematsu\",\n \"Shigemori\",\n \"Shigenaga\",\n \"Shigenobu\",\n \"Shigeru\",\n \"Shigetaka\",\n \"Shigetoki\",\n \"Shigochiyo\",\n \"Shihei\",\n \"Shihi\",\n \"Shijo\",\n \"Shiki\",\n \"Shiko\",\n \"Shimei\",\n \"Shimpei\",\n \"Shingen\",\n \"Shinichi\",\n \"Shinji\",\n \"Shinkichi\",\n \"Shino\",\n \"Shinobu\",\n \"ShinriKiyaru\",\n \"Shinsaku\",\n \"Shinsui\",\n \"Shintaro\",\n \"Shinzaburo\",\n \"Shinzo\",\n \"Shirai\",\n \"Shiro\",\n \"Shirosama\",\n \"Shizue\",\n \"Sho\",\n \"Shoda\",\n \"Shogo\",\n \"Shohei\",\n \"Shoichi\",\n \"Shoin\",\n \"Shoji\",\n \"Shojiro\",\n \"Shoko\",\n \"Shoraku\",\n \"Shosuke\",\n \"Shotaro\",\n \"Shoyo\",\n \"Shozaburo\",\n \"Shozo\",\n \"Shuichi\",\n \"Shuji\",\n \"Shukishi\",\n \"Shuko\",\n \"Shumei\",\n \"Shumkichi\",\n \"Shun\",\n \"Shuncho\",\n \"Shungyosai\",\n \"Shunichi\",\n \"Shunji\",\n \"Shunko\",\n \"Shunmyo\",\n \"Shunsen\",\n \"Shunsho\",\n \"Shunso\",\n \"Shunsuke\",\n \"Shunen\",\n \"Shusake\",\n \"Shusaku\",\n \"Shusui\",\n \"Shuzo\",\n \"Soetsu\",\n \"Sofu\",\n \"Soh\",\n \"Soichiro\",\n \"Sojuro\",\n \"Sorai\",\n \"Sosa\",\n \"Soseki\",\n \"Soshitsu\",\n \"Soshu\",\n \"Sosuke\",\n \"Sotan\",\n \"Sotaro\",\n \"Sotatsu\",\n \"Sozen\",\n \"Sozui\",\n \"Ssekien\",\n \"Subaru\",\n \"Suezo\",\n \"Sugimoto\",\n \"Sugita\",\n \"Sukejuro\",\n \"Sukenobu\",\n \"Suketsune\",\n \"Sukeyasu\",\n \"Sumio\",\n \"Sumiteru\",\n \"Sumitomo\",\n \"Susumu\",\n \"Suzu\",\n \"Suzu\",\n \"Tabito\",\n \"Tadahisa\",\n \"Tadakuni\",\n \"Tadamasa\",\n \"Tadamichi\",\n \"Tadao\",\n \"Tadashi\",\n \"Tadasu\",\n \"Tadasuke\",\n \"Tadataka\",\n \"Tadayoshi\",\n \"Tadayuki\",\n \"Tadiyuki\",\n \"Taheiji\",\n \"Taikan\",\n \"Taisho\",\n \"Taisuke\",\n \"Taji\",\n \"Takaaki\",\n \"Takafumi\",\n \"Takahashi\",\n \"Takahiro\",\n \"Takakazu\",\n \"Takamasa\",\n \"Takamori\",\n \"Takamuku\",\n \"Takanibu\",\n \"Takanobu\",\n \"Takanori\",\n \"Takao\",\n \"Takashi\",\n \"Takauji\",\n \"Takayuki\",\n \"Takechi\",\n \"Takehide\",\n \"Takeichi\",\n \"Takeji\",\n \"Takejiro\",\n \"Takenao\",\n \"Takeo\",\n \"Takeru\",\n \"Takeshi\",\n \"Takesi\",\n \"Taki\",\n \"Takiji\",\n \"Takuboku\",\n \"Takuji\",\n \"Takuma\",\n \"Takuro\",\n \"Takuya\",\n \"Tamasaburo\",\n \"Tamasine\",\n \"Tameyoshi\",\n \"Tamotsu\",\n \"Tamuramaro\",\n \"Tanak\",\n \"Tango\",\n \"Tanjiro\",\n \"Tanosuke\",\n \"Tanyu\",\n \"Tanzan\",\n \"Taro\",\n \"Taro\",\n \"Taroemon\",\n \"Tarozaemon\",\n \"Tashiaki\",\n \"Tashiro\",\n \"Tasuku\",\n \"Tatsui\",\n \"Tatsukichi\",\n \"Tatsuya\",\n \"Tatsuzo\",\n \"Taysuke\",\n \"Teiji\",\n \"Teijo\",\n \"Teika\",\n \"Teiljo\",\n \"Teinosuke\",\n \"Tekkan\",\n \"Tenshin\",\n \"Terao\",\n \"Teriuihi\",\n \"Terumoto\",\n \"Teruo\",\n \"Tessai\",\n \"Tetsu\",\n \"Tetsuhiko\",\n \"Tetsui\",\n \"Tetsunori\",\n \"Tetsuo\",\n \"Tetsuya\",\n \"Tetsuyuki\",\n \"Tetsuzan\",\n \"Thoki\",\n \"Tobei\",\n \"Togai\",\n \"Tohaku\",\n \"Toichi\",\n \"Toin\",\n \"Toju\",\n \"Tokaji\",\n \"Toki\",\n \"Tokichiro\",\n \"Tokimasa\",\n \"Tokimune\",\n \"Tokugawa\",\n \"Tokuhei\",\n \"Tokuma\",\n \"Tokutomi\",\n \"Tomeo\",\n \"Tomiichi\",\n \"Tomiji\",\n \"Tomoaki\",\n \"Tomohiko\",\n \"Tomokazu\",\n \"Tomomi\",\n \"Tomonori\",\n \"Tomoyuki\",\n \"Ton\",\n \"Torajiro\",\n \"Torazo\",\n \"Torio\",\n \"Toru\",\n \"Toshi\",\n \"Toshiaki\",\n \"Toshiharu\",\n \"Toshikasu\",\n \"Toshikazu\",\n \"Toshiki\",\n \"Toshikuni\",\n \"Toshimichi\",\n \"Toshinobu\",\n \"Toshiro\",\n \"Toshitsugu\",\n \"Toshiyuki\",\n \"Toson\",\n \"Totoya\",\n \"Toyoaki\",\n \"Toyoharu\",\n \"Toyokazu\",\n \"Toyokuni\",\n \"Toyonobu\",\n \"Toyoshige\",\n \"Toyotomi\",\n \"Toyozo\",\n \"Tsugahara\",\n \"Tsugiharu\",\n \"Tsuginori\",\n \"Tsugumichi\",\n \"Tsukasa\",\n \"Tsumemasa\",\n \"Tsunayoshi\",\n \"Tsuneari\",\n \"Tsuneo\",\n \"Tsunesaburo\",\n \"Tsuneyo\",\n \"Tsuramatsu\",\n \"Tsurayaki\",\n \"Tsuruki\",\n \"Tsutomu\",\n \"Tsuyoshi\",\n \"Udo\",\n \"Ukon\",\n \"Ukyo\",\n \"Unkei\",\n \"Utaemon\",\n \"Utamara\",\n \"Utamuro\",\n \"Utemaro\",\n \"Waotaka\",\n \"Washi\",\n \"Washichi\",\n \"Yachi\",\n \"Yaichiro\",\n \"Yajirobei\",\n \"Yakamochi\",\n \"Yakumo\",\n \"Yamato\",\n \"Yasotaro\",\n \"Yasuhide\",\n \"Yasuhiko\",\n \"Yasuhiro\",\n \"Yasujiro\",\n \"Yasukazu\",\n \"Yasunari\",\n \"Yasunobu\",\n \"Yasuo\",\n \"Yasuoka\",\n \"Yasushi\",\n \"Yasutake\",\n \"Yasutoki\",\n \"Yasuyuki\",\n \"Yataro\",\n \"Yatsuhiro\",\n \"Yeijiro\",\n \"Yo\",\n \"Yodo\",\n \"Yohachi\",\n \"Yoichi\",\n \"Yoichibei\",\n \"Yoriie\",\n \"Yorikane\",\n \"Yoringa\",\n \"Yoritoki\",\n \"Yoritomo\",\n \"Yoriyoshi\",\n \"Yoriyuki\",\n \"Yosai\",\n \"Yoshi\",\n \"Yoshiaga\",\n \"Yoshiaki\",\n \"Yoshida\",\n \"Yoshifumi\",\n \"Yoshifusa\",\n \"Yoshihide\",\n \"Yoshihiro\",\n \"Yoshihisa\",\n \"Yoshihito\",\n \"Yoshii\",\n \"Yoshiiku\",\n \"Yoshikazu\",\n \"Yoshiki\",\n \"Yoshimasa\",\n \"Yoshimatsu\",\n \"Yoshimi\",\n \"Yoshimitsu\",\n \"Yoshimochi\",\n \"Yoshimune\",\n \"Yoshinaka\",\n \"Yoshino\",\n \"Yoshinobu\",\n \"Yoshinori\",\n \"Yoshio\",\n \"Yoshisada\",\n \"Yoshitaka\",\n \"Yoshitake\",\n \"Yoshiteru\",\n \"Yoshitoki\",\n \"Yoshitomo\",\n \"Yoshitora\",\n \"Yoshitoshi\",\n \"Yoshitsune\",\n \"Yoshiyuki\",\n \"Yoson\",\n \"Yosuke\",\n \"Yozo\",\n \"Yugoro\",\n \"Yuichi\",\n \"Yuji\",\n \"Yujiro\",\n \"Yuki\",\n \"Yukichi\",\n \"Yukinaga\",\n \"Yukio\",\n \"Yuko\",\n \"Yunosuke\",\n \"Yushiro\",\n \"Yusuke\",\n \"Yutaka\",\n \"Zenko\",\n \"Zeshin\"\n]\nfemale_names = [\n \"Ai\",\n \"Aiko\",\n \"Akane\",\n \"Akemi\",\n \"Aki\",\n \"Akiko\",\n \"Akina\",\n \"Akuro\",\n \"Amarante\",\n \"Amaya\",\n \"Ami\",\n \"Anda\",\n \"Aneko\",\n \"Arisa\",\n \"Asako\",\n \"Asami\",\n \"Atsuko\",\n \"Aya\",\n \"Ayaka\",\n \"Ayako\",\n \"Ayame\",\n \"Ayano\",\n \"Benten\",\n \"Chiaki\",\n \"Chie\",\n \"Chieko\",\n \"Chihiro\",\n \"Chika\",\n \"Chikako\",\n \"Chiko\",\n \"Chikuma\",\n \"Chinatsu\",\n \"Chisaki\",\n \"Chisato\",\n \"Chitose\",\n \"Chiyeko\",\n \"Chiyo\",\n \"Cho\",\n \"Dai\",\n \"Echiko\",\n \"Eiko\",\n \"Ema\",\n \"Emi\",\n \"Emiko\",\n \"Eri\",\n \"Eriko\",\n \"Etsuko\",\n \"Euiko\",\n \"Fujiko\",\n \"Fumi\",\n \"Fumie\",\n \"Fumiki\",\n \"Fumiko\",\n \"Fusae\",\n \"Fuyuko\",\n \"Gemmei\",\n \"Gen\",\n \"Gin\",\n \"Ginko\",\n \"Hama\",\n \"Hana\",\n \"Hanae\",\n \"Hanako\",\n \"Haniko\",\n \"Haru\",\n \"Haru\",\n \"Haruhi\",\n \"Haruka\",\n \"Harukichi\",\n \"Haruko\",\n \"Harumi\",\n \"Haruna\",\n \"Hatsue\",\n \"Hatsuyo\",\n \"Hide\",\n \"Hideko\",\n \"Hikaru\",\n \"Hiroe\",\n \"Hiroko\",\n \"Hiromi\",\n \"Hiroshi\",\n \"Hisa\",\n \"Hisae\",\n \"Hisako\",\n \"Hisano\",\n \"Hitomi\",\n \"Hitomo\",\n \"Hitoshi\",\n \"Honami\",\n \"Hoshi\",\n \"Hoshie\",\n \"Hoshiko\",\n \"Hoshiyo\",\n \"Ichi\",\n \"Iku\",\n \"Ikue\",\n \"Ikuko\",\n \"Inari\",\n \"Inoue\",\n \"Isako\",\n \"Ise\",\n \"Itsuko\",\n \"Izumi\",\n \"Jin\",\n \"Joruri\",\n \"Jun\",\n \"Junko\",\n \"Juri\",\n \"Kaede\",\n \"Kagami\",\n \"Kahori\",\n \"Kaida\",\n \"Kaiya\",\n \"Kaiyo\",\n \"Kameko\",\n \"Kami\",\n \"Kammi\",\n \"Kammie\",\n \"Kana\",\n \"Kanami\",\n \"Kaneko\",\n \"Kaori\",\n \"Kaoru\",\n \"Kasuga\",\n \"Kata\",\n \"Katsue\",\n \"Katsuko\",\n \"Katsumi\",\n \"Kaya\",\n \"Kayoko\",\n \"Kazue\",\n \"Kazuko\",\n \"Kazumi\",\n \"Kei\",\n \"Keiko\",\n \"Kichi\",\n \"Kiko\",\n \"Kikuko\",\n \"Kikyou\",\n \"Kimi\",\n \"Kimie\",\n \"Kimiko\",\n \"Kin\",\n \"Kinuko\",\n \"Kinuye\",\n \"Kinuyo\",\n \"Kioko\",\n \"Kiriko\",\n \"Kishi\",\n \"Kita\",\n \"Kiyo\",\n \"Kiyoko\",\n \"Kiyomi\",\n \"Kochiyo\",\n \"Kohana\",\n \"Koi\",\n \"Koiso\",\n \"Koken\",\n \"Koko\",\n \"Komachi\",\n \"Koto\",\n \"Kotono\",\n \"Kumi\",\n \"Kumiko\",\n \"Kuni\",\n \"Kunie\",\n \"Kuniko\",\n \"Kura\",\n \"Kuri\",\n \"Kyoko\",\n \"Machi\",\n \"Machiko\",\n \"Madoka\",\n \"Mae\",\n \"Maeko\",\n \"Maemi\",\n \"Mai\",\n \"Maiko\",\n \"Maiya\",\n \"Maki\",\n \"Makiko\",\n \"Mako\",\n \"Mami\",\n \"Mamiko\",\n \"Mana\",\n \"Manami\",\n \"Mari\",\n \"Mariko\",\n \"Marise\",\n \"Maru\",\n \"Masae\",\n \"Masako\",\n \"Masumi\",\n \"Matsu\",\n \"Matsuko\",\n \"Maya\",\n \"Mayako\",\n \"Mayo\",\n \"Mayoko\",\n \"Mayu\",\n \"Mayuko\",\n \"Mayumi\",\n \"Megu\",\n \"Megumi\",\n \"Michi\",\n \"Michie\",\n \"Michiko\",\n \"Michiru\",\n \"Michiyo\",\n \"Midori\",\n \"Mieko\",\n \"Miho\",\n \"Mihoko\",\n \"Miiko\",\n \"Miki\",\n \"Miliko\",\n \"Mina\",\n \"Minako\",\n \"Minami\",\n \"Mineko\",\n \"Mino\",\n \"Mio\",\n \"Misa\",\n \"Misako\",\n \"Misato\",\n \"Mitsu\",\n \"Mitsuko\",\n \"Mitsuyo\",\n \"Miwako\",\n \"Miya\",\n \"Miyako\",\n \"Miyo\",\n \"Miyoko\",\n \"Miyoshi\",\n \"Mizuki\",\n \"Moeko\",\n \"Momoko\",\n \"Mura\",\n \"Mutsuko\",\n \"Mutsumi\",\n \"Naho\",\n \"Nahoko\",\n \"Nami\",\n \"Nami\",\n \"Namie\",\n \"Namika\",\n \"Namiko\",\n \"Namiyo\",\n \"Nana\",\n \"Nanako\",\n \"Nanami\",\n \"Nao\",\n \"Naoko\",\n \"Naora\",\n \"Nari\",\n \"Nariko\",\n \"Naru\",\n \"Narumi\",\n \"Natsuko\",\n \"Natsumi\",\n \"Natsumi\",\n \"Nayoko\",\n \"Nene\",\n \"Nishi\",\n \"Nomi\",\n \"Nori\",\n \"Norie\",\n \"Noriko\",\n \"Nozomi\",\n \"Nyoko\",\n \"Ochiyo\",\n \"Oharu\",\n \"Oki\",\n \"Okichi\",\n \"Okiku\",\n \"Omitsu\",\n \"Orino\",\n \"Otsu\",\n \"Otsune\",\n \"Raicho\",\n \"Raku\",\n \"Ran\",\n \"Rei\",\n \"Reiko\",\n \"Remi\",\n \"Rie\",\n \"Rieko\",\n \"Rika\",\n \"Rikako\",\n \"Riku\",\n \"Rina\",\n \"Rinako\",\n \"Rini\",\n \"Risa\",\n \"Risako\",\n \"Ritsuko\",\n \"Romi\",\n \"Rui\",\n \"Rumiko\",\n \"Ruri\",\n \"Ruriko\",\n \"Ryoko\",\n \"Sachi\",\n \"Sachiko\",\n \"Sada\",\n \"Sadako\",\n \"Sae\",\n \"Saeko\",\n \"Saito\",\n \"Sakamae\",\n \"Saki\",\n \"Sakiko\",\n \"Sakue\",\n \"Sakuko\",\n \"Sakura\",\n \"Sakurako\",\n \"Sakuro\",\n \"Sama\",\n \"Sanako\",\n \"Saori\",\n \"Sata\",\n \"Satoko\",\n \"Satomi\",\n \"Satu\",\n \"Sawako\",\n \"Saya\",\n \"Sayo\",\n \"Sayoko\",\n \"Sayuri\",\n \"Sei\",\n \"Seiko\",\n \"Seka\",\n \"Seki\",\n \"Sen\",\n \"Setsuko\",\n \"Shige\",\n \"Shika\",\n \"Shina\",\n \"Shino\",\n \"Shinobu\",\n \"Shioko\",\n \"Shiori\",\n \"Shizu\",\n \"Shizue\",\n \"Shizuka\",\n \"Shoken\",\n \"Shoko\",\n \"Sui\",\n \"Suki\",\n \"Suko\",\n \"Sumi\",\n \"Sumie\",\n \"Sumiko\",\n \"Suzu\",\n \"Suzue\",\n \"Suzume\",\n \"Suzuko\",\n \"Tadako\",\n \"Tae\",\n \"Tai\",\n \"Taji\",\n \"Taka\",\n \"Takako\",\n \"Takara\",\n \"Tama\",\n \"Tamae\",\n \"Tamafune\",\n \"Tamaki\",\n \"Tamami\",\n \"Tami\",\n \"Tamika\",\n \"Tamiko\",\n \"Tamiyo\",\n \"Tanak\",\n \"Taniko\",\n \"Tansho\",\n \"Tara\",\n \"Taree\",\n \"Taura\",\n \"Taya\",\n \"Teruyo\",\n \"Toki\",\n \"Tokie\",\n \"Tokiko\",\n \"Tokiyo\",\n \"Toku\",\n \"Tomi\",\n \"Tomiko\",\n \"Tomoe\",\n \"Tomoko\",\n \"Tomomi\",\n \"Toshi\",\n \"Toshie\",\n \"Toshiko\",\n \"Toya\",\n \"Toyoko\",\n \"Tsuki\",\n \"Tsukiyama\",\n \"Tsuya\",\n \"Ume\",\n \"Umeka\",\n \"Umeko\",\n \"Urako\",\n \"Usagi\",\n \"Uta\",\n \"Utako\",\n \"Wattan\",\n \"Wazuka\",\n \"Yachi\",\n \"Yae\",\n \"Yaeko\",\n \"Yama\",\n \"Yasu\",\n \"Yasuko\",\n \"Yayoi\",\n \"Yodo\",\n \"Yoko\",\n \"Yori\",\n \"Yoriko\",\n \"Yoshe\",\n \"Yoshi\",\n \"Yoshike\",\n \"Yoshiko\",\n \"Yoshino\",\n \"Yu\",\n \"Yui\",\n \"Yuka\",\n \"Yukako\",\n \"Yukari\",\n \"Yuki\",\n \"Yukiko\",\n \"Yukiyo\",\n \"Yuko\",\n \"Yuma\",\n \"Yumako\",\n \"Yumi\",\n \"Yumiko\",\n \"Yuri\",\n \"Yuriko\",\n \"Yusuke\"\n]\nfamily_names = [\n \"Abe\",\n \"Abukara\",\n \"Adachi\",\n \"Aibu\",\n \"Aida\",\n \"Aihara\",\n \"Aizawa\",\n \"Ajibana\",\n \"Akaike\",\n \"Akamatsu\",\n \"Akatsuka\",\n \"Akechi\",\n \"Akera\",\n \"Akimoto\",\n \"Akita\",\n \"Akiyama\",\n \"Akutagawa\",\n \"Amagawa\",\n \"Amaya\",\n \"Amori\",\n \"Anami\",\n \"Ando\",\n \"Anzai\",\n \"Aoki\",\n \"Arai\",\n \"Arakaki\",\n \"Arakawa\",\n \"Araki\",\n \"Arakida\",\n \"Arato\",\n \"Arihyoshi\",\n \"Arishima\",\n \"Arita\",\n \"Ariwa\",\n \"Ariwara\",\n \"Asahara\",\n \"Asahi\",\n \"Asai\",\n \"Asano\",\n \"Asanuma\",\n \"Asari\",\n \"Ashia\",\n \"Ashida\",\n \"Ashikaga\",\n \"Asuhara\",\n \"Atshushi\",\n \"Ayabe\",\n \"Ayabito\",\n \"Ayugai\",\n \"Azama\",\n \"Chiba\",\n \"Chikamatsu\",\n \"Chikanatsu\",\n \"Chino\",\n \"Chishu\",\n \"Choshi\",\n \"Daishi\",\n \"Dan\",\n \"Date\",\n \"Dazai\",\n \"Deguchi\",\n \"Deushi\",\n \"Doi\",\n \"Ebina\",\n \"Ebisawa\",\n \"Eda\",\n \"Egami\",\n \"Eguchi\",\n \"Ekiguchi\",\n \"Endo\",\n \"Endoso\",\n \"Enoki\",\n \"Enomoto\",\n \"Erizawa\",\n \"Eto\",\n \"Etsuko\",\n \"Ezakiya\",\n \"Fuchida\",\n \"Fuchizaki\",\n \"Fugunaga\",\n \"Fujii\",\n \"Fujikage\",\n \"Fujimaki\",\n \"Fujimoto\",\n \"Fujioka\",\n \"Fujishima\",\n \"Fujita\",\n \"Fujiwara\",\n \"Fukao\",\n \"Fukayama\",\n \"Fukazawa\",\n \"Fukuda\",\n \"Fukumitsu\",\n \"Fukumoto\",\n \"Fukunaka\",\n \"Fukuoka\",\n \"Fukusaku\",\n \"Fukushima\",\n \"Fukuyama\",\n \"Fukuzawa\",\n \"Fumihiko\",\n \"Funabashi\",\n \"Funaki\",\n \"Funakoshi\",\n \"Furuhata\",\n \"Furusawa\",\n \"Fuschida\",\n \"Fuse\",\n \"Futabatei\",\n \"Fuwa\",\n \"Gakusha\",\n \"Genda\",\n \"Genji\",\n \"Gensai\",\n \"Godo\",\n \"Goto\",\n \"Gushiken\",\n \"Haga\",\n \"Hagino\",\n \"Hagiwara\",\n \"Hakamada\",\n \"Hama\",\n \"Hamacho\",\n \"Hamada\",\n \"Hamaguchi\",\n \"Hamamoto\",\n \"Han\",\n \"Hanabusa\",\n \"Hanari\",\n \"Handa\",\n \"Hara\",\n \"Harada\",\n \"Haruguchi\",\n \"Hasegawa\",\n \"Hasekura\",\n \"Hashi\",\n \"Hashimoto\",\n \"Hasimoto\",\n \"Hatakeda\",\n \"Hatakeyama\",\n \"Hatayama\",\n \"Hatoyama\",\n \"Hattori\",\n \"Hayakawa\",\n \"Hayami\",\n \"Hayashi\",\n \"Hayashida\",\n \"Hayata\",\n \"Hayuata\",\n \"Hida\",\n \"Hidaka\",\n \"Hideaki\",\n \"Hideki\",\n \"Hideyoshi\",\n \"Higa\",\n \"Higashi\",\n \"Higashikuni\",\n \"Higashiyama\",\n \"Higo\",\n \"Higoshi\",\n \"Higuchi\",\n \"Hike\",\n \"Hino\",\n \"Hira\",\n \"Hiraga\",\n \"Hirai\",\n \"Hiraki\",\n \"Hirano\",\n \"Hiranuma\",\n \"Hiraoka\",\n \"Hirase\",\n \"Hirasi\",\n \"Hirata\",\n \"Hiratasuka\",\n \"Hirayama\",\n \"Hiro\",\n \"Hirose\",\n \"Hirota\",\n \"Hiroyuki\",\n \"Hisamatsu\",\n \"Hishida\",\n \"Hishikawa\",\n \"Hitomi\",\n \"Hiyama\",\n \"Hohki\",\n \"Hojo\",\n \"Hokusai\",\n \"Honami\",\n \"Honda\",\n \"Hori\",\n \"Horigome\",\n \"Horigoshi\",\n \"Horiuchi\",\n \"Horri\",\n \"Hoshino\",\n \"Hosokawa\",\n \"Hosokaya\",\n \"Hotate\",\n \"Hotta\",\n \"Hyata\",\n \"Hyobanshi\",\n \"Ibi\",\n \"Ibu\",\n \"Ibuka\",\n \"Ichigawa\",\n \"Ichihara\",\n \"Ichikawa\",\n \"Ichimonji\",\n \"Ichiro\",\n \"Ichisada\",\n \"Ichiyusai\",\n \"Idane\",\n \"Iemochi\",\n \"Ienari\",\n \"Iesada\",\n \"Ieyasu\",\n \"Ieyoshi\",\n \"Igarashi\",\n \"Ihara\",\n \"Ii\",\n \"Iida\",\n \"Iijima\",\n \"Iitaka\",\n \"Ijichi\",\n \"Ijiri\",\n \"Ikeda\",\n \"Ikina\",\n \"Ikoma\",\n \"Imada\",\n \"Imagawa\",\n \"Imai\",\n \"Imaizumi\",\n \"Imamura\",\n \"Imoo\",\n \"Ina\",\n \"Inaba\",\n \"Inao\",\n \"Inihara\",\n \"Ino\",\n \"Inoguchi\",\n \"Inokuma\",\n \"Inomata\",\n \"Inoue\",\n \"Inouye\",\n \"Inukai\",\n \"Ippitsusai\",\n \"Irie\",\n \"Iriye\",\n \"Isaka\",\n \"Isayama\",\n \"Ise\",\n \"Iseki\",\n \"Iseya\",\n \"Ishibashi\",\n \"Ishida\",\n \"Ishiguro\",\n \"Ishihara\",\n \"Ishikawa\",\n \"Ishimaru\",\n \"Ishimura\",\n \"Ishinomori\",\n \"Ishio\",\n \"Ishiyama\",\n \"Isobe\",\n \"Isoda\",\n \"Isozaki\",\n \"Itagaki\",\n \"Itami\",\n \"Ito\",\n \"Itoh\",\n \"Iwahara\",\n \"Iwahashi\",\n \"Iwakura\",\n \"Iwasa\",\n \"Iwasaki\",\n \"Izawa\",\n \"Izumi\",\n \"Jinnai\",\n \"Jo\",\n \"Joshuya\",\n \"Joshuyo\",\n \"Jukodo\",\n \"Jumonji\",\n \"Kada\",\n \"Kagabu\",\n \"Kagawa\",\n \"Kahae\",\n \"Kahaya\",\n \"Kai\",\n \"Kaibara\",\n \"Kaima\",\n \"Kajahara\",\n \"Kajitani\",\n \"Kajiwara\",\n \"Kajiyama\",\n \"Kakinomoto\",\n \"Kakutama\",\n \"Kamachi\",\n \"Kamata\",\n \"Kamei\",\n \"Kameyama\",\n \"Kaminaga\",\n \"Kamio\",\n \"Kamioka\",\n \"Kamisaka\",\n \"Kamo\",\n \"Kamon\",\n \"Kan\",\n \"Kanada\",\n \"Kanagaki\",\n \"Kanegawa\",\n \"Kaneko\",\n \"Kanesaka\",\n \"Kano\",\n \"Karamorita\",\n \"Karube\",\n \"Karubo\",\n \"Kasahara\",\n \"Kasai\",\n \"Kasamatsu\",\n \"Kasaya\",\n \"Kase\",\n \"Kashiwabara\",\n \"Kashiwagi\",\n \"Kasuse\",\n \"Katabuchi\",\n \"Kataoka\",\n \"Katayama\",\n \"Katayanagi\",\n \"Kate\",\n \"Kato\",\n \"Katoaka\",\n \"Katsu\",\n \"Katsukawa\",\n \"Katsumata\",\n \"Katsura\",\n \"Katsushika\",\n \"Kawabata\",\n \"Kawabe\",\n \"Kawachi\",\n \"Kawagichi\",\n \"Kawagishi\",\n \"Kawaguchi\",\n \"Kawai\",\n \"Kawaii\",\n \"Kawakami\",\n \"Kawamata\",\n \"Kawamura\",\n \"Kawano\",\n \"Kawasaki\",\n \"Kawasawa\",\n \"Kawashima\",\n \"Kawasie\",\n \"Kawatake\",\n \"Kawate\",\n \"Kawayama\",\n \"Kawazu\",\n \"Kaza\",\n \"Kazuyoshi\",\n \"Kenkyusha\",\n \"Kenmotsu\",\n \"Kentaro\",\n \"Ki\",\n \"Kido\",\n \"Kihara\",\n \"Kijimuta\",\n \"Kijmuta\",\n \"Kikkawa\",\n \"Kikuchi\",\n \"Kikugawa\",\n \"Kikui\",\n \"Kikutake\",\n \"Kimio\",\n \"Kimiyama\",\n \"Kimura\",\n \"Kinashita\",\n \"Kinjo\",\n \"Kino\",\n \"Kinoshita\",\n \"Kinugasa\",\n \"Kira\",\n \"Kishi\",\n \"Kiski\",\n \"Kita\",\n \"Kitabatake\",\n \"Kitagawa\",\n \"Kitamura\",\n \"Kitano\",\n \"Kitao\",\n \"Kitoaji\",\n \"Kiyoura\",\n \"Ko\",\n \"Kobayashi\",\n \"Kobi\",\n \"Kodama\",\n \"Koga\",\n \"Koganezawa\",\n \"Kogara\",\n \"Kogo\",\n \"Koguchi\",\n \"Koike\",\n \"Koiso\",\n \"Koizumi\",\n \"Kojima\",\n \"Kokan\",\n \"Komagata\",\n \"Komatsu\",\n \"Komatsuzaki\",\n \"Komine\",\n \"Komiya\",\n \"Komon\",\n \"Komukai\",\n \"Komura\",\n \"Kon\",\n \"Konae\",\n \"Konda\",\n \"Kondo\",\n \"Konishi\",\n \"Kono\",\n \"Konoe\",\n \"Kora\",\n \"Koruba\",\n \"Koshin\",\n \"Kotabe\",\n \"Kotara\",\n \"Kotoku\",\n \"Kouda\",\n \"Koyama\",\n \"Koyanagi\",\n \"Kozu\",\n \"Kubo\",\n \"Kubodera\",\n \"Kubota\",\n \"Kudara\",\n \"Kudo\",\n \"Kuga\",\n \"Kumagae\",\n \"Kumagai\",\n \"Kumasaka\",\n \"Kunda\",\n \"Kunikida\",\n \"Kunisada\",\n \"Kuno\",\n \"Kunomasu\",\n \"Kuramochi\",\n \"Kuramoto\",\n \"Kurata\",\n \"Kurkawa\",\n \"Kurmochi\",\n \"Kuroda\",\n \"Kurofuji\",\n \"Kurogane\",\n \"Kurohiko\",\n \"Kuroki\",\n \"Kurosawa\",\n \"Kurotani\",\n \"Kurusu\",\n \"Kusaka\",\n \"Kusatsu\",\n \"Kusonoki\",\n \"Kusuhara\",\n \"Kusumoto\",\n \"Kusunoki\",\n \"Kutsuna\",\n \"Kuwabara\",\n \"Kyubei\",\n \"Maeda\",\n \"Maeno\",\n \"Maita\",\n \"Makioka\",\n \"Makuda\",\n \"Marubeni\",\n \"Marugo\",\n \"Maruyama\",\n \"Masanobu\",\n \"Masaoka\",\n \"Mashita\",\n \"Masuda\",\n \"Masuko\",\n \"Masuno\",\n \"Masuo\",\n \"Masuzoe\",\n \"Matano\",\n \"Matsubara\",\n \"Matsuda\",\n \"Matsukata\",\n \"Matsuki\",\n \"Matsumara\",\n \"Matsumiya\",\n \"Matsumoto\",\n \"Matsuo\",\n \"Matsuoka\",\n \"Matsura\",\n \"Matsushima\",\n \"Matsushina\",\n \"Matsushita\",\n \"Matsuzawa\",\n \"Mayuzumi\",\n \"Mazaki\",\n \"Mazawa\",\n \"Mihashi\",\n \"Miki\",\n \"Mimasuya\",\n \"Minabuchi\",\n \"Minatoya\",\n \"Minobe\",\n \"Misawa\",\n \"Mishima\",\n \"Mitsubishi\",\n \"Mitsukuri\",\n \"Mitsuwa\",\n \"Mitsuya\",\n \"Mitzusaka\",\n \"Miura\",\n \"Miyagi\",\n \"Miyahara\",\n \"Miyajima\",\n \"Miyake\",\n \"Miyamoto\",\n \"Miyata\",\n \"Miyazaki\",\n \"Miyazawa\",\n \"Miyoshi\",\n \"Mizoguchi\",\n \"Mizukawa\",\n \"Mizukuro\",\n \"Mizuno\",\n \"Mizutani\",\n \"Mochizuki\",\n \"Modegi\",\n \"Momotami\",\n \"Momotani\",\n \"Mori\",\n \"Moriguchi\",\n \"Morimoto\",\n \"Morinaga\",\n \"Morioka\",\n \"Morita\",\n \"Moriwaka\",\n \"Morri\",\n \"Moto\",\n \"Motoori\",\n \"Munkata\",\n \"Muraguchi\",\n \"Murakami\",\n \"Muraoka\",\n \"Murata\",\n \"Murkami\",\n \"Muro\",\n \"Muruyama\",\n \"Muso\",\n \"Mutsu\",\n \"Nagahama\",\n \"Nagai\",\n \"Nagako\",\n \"Nagano\",\n \"Nagasawa\",\n \"Nagase\",\n \"Nagashima\",\n \"Nagata\",\n \"Nagatsuka\",\n \"Nagumo\",\n \"Naito\",\n \"Nakada\",\n \"Nakadai\",\n \"Nakadan\",\n \"Nakae\",\n \"Nakagawa\",\n \"Nakahara\",\n \"Nakajima\",\n \"Nakamoto\",\n \"Nakamura\",\n \"Nakane\",\n \"Nakanishi\",\n \"Nakano\",\n \"Nakanoi\",\n \"Nakao\",\n \"Nakasato\",\n \"Nakasawa\",\n \"Nakasone\",\n \"Nakata\",\n \"Nakatoni\",\n \"Nakatsuka\",\n \"Nakayama\",\n \"Nakazawa\",\n \"Namiki\",\n \"Nanami\",\n \"Narahashi\",\n \"Narato\",\n \"Narita\",\n \"Nataga\",\n \"Natsume\",\n \"Nawabe\",\n \"Nemoto\",\n \"Niijima\",\n \"Nijo\",\n \"Ninomiya\",\n \"Nishi\",\n \"Nishihara\",\n \"Nishikawa\",\n \"Nishimoto\",\n \"Nishimura\",\n \"Nishimuraya\",\n \"Nishio\",\n \"Nishiwaki\",\n \"Nishiyama\",\n \"Nitta\",\n \"Nobunaga\",\n \"Nobusawa\",\n \"Noda\",\n \"Nogi\",\n \"Noguchi\",\n \"Nogushi\",\n \"Nomura\",\n \"Nonomura\",\n \"Noro\",\n \"Nosaka\",\n \"Nose\",\n \"Noto\",\n \"Nozaki\",\n \"Nozara\",\n \"Numajiri\",\n \"Numata\",\n \"Obata\",\n \"Obinata\",\n \"Obuchi\",\n \"Ochi\",\n \"Ochiai\",\n \"Ochida\",\n \"Odaka\",\n \"Ogata\",\n \"Ogawa\",\n \"Ogiwara\",\n \"Ogura\",\n \"Ogyu\",\n \"Ohba\",\n \"Ohira\",\n \"Ohishi\",\n \"Ohka\",\n \"Ohmae\",\n \"Ohmiya\",\n \"Oichi\",\n \"Oinuma\",\n \"Oishi\",\n \"Okabe\",\n \"Okada\",\n \"Okajima\",\n \"Okakura\",\n \"Okamoto\",\n \"Okamura\",\n \"Okanao\",\n \"Okanaya\",\n \"Okano\",\n \"Okasawa\",\n \"Okawa\",\n \"Okazaki\",\n \"Okazawaya\",\n \"Okimasa\",\n \"Okimoto\",\n \"Okimura\",\n \"Okita\",\n \"Okubo\",\n \"Okuda\",\n \"Okui\",\n \"Okuma\",\n \"Okumura\",\n \"Okura\",\n \"Omori\",\n \"Omura\",\n \"Onishi\",\n \"Ono\",\n \"Onoda\",\n \"Onoe\",\n \"Onohara\",\n \"Ooka\",\n \"Oonishi\",\n \"Osagawa\",\n \"Osaka\",\n \"Osaragi\",\n \"Oshima\",\n \"Oshin\",\n \"Oshiro\",\n \"Ota\",\n \"Otaka\",\n \"Otake\",\n \"Otani\",\n \"Otomo\",\n \"Otsu\",\n \"Otsuka\",\n \"Ouchi\",\n \"Oushima\",\n \"Outakara\",\n \"Outsuka\",\n \"Oyama\",\n \"Ozaki\",\n \"Ozawa\",\n \"Ozu\",\n \"Raikatuji\",\n \"Royama\",\n \"Ryusaki\",\n \"Sada\",\n \"Saeki\",\n \"Saga\",\n \"Sahashi\",\n \"Saigo\",\n \"Saiki\",\n \"Saionji\",\n \"Saito\",\n \"Saitoh\",\n \"Saji\",\n \"Sakagami\",\n \"Sakai\",\n \"Sakakibara\",\n \"Sakamoto\",\n \"Sakanoue\",\n \"Sakata\",\n \"Sakiyurai\",\n \"Sako\",\n \"Sakoda\",\n \"Sakubara\",\n \"Sakuma\",\n \"Sakuraba\",\n \"Sakurada\",\n \"Sakurai\",\n \"Sammiya\",\n \"Sanda\",\n \"Sanjo\",\n \"Sano\",\n \"Santo\",\n \"Saromi\",\n \"Sarumara\",\n \"Sasada\",\n \"Sasakawa\",\n \"Sasaki\",\n \"Sassa\",\n \"Satake\",\n \"Sato\",\n \"Satoh\",\n \"Satou\",\n \"Satoya\",\n \"Sawai\",\n \"Sawamatsu\",\n \"Sawamura\",\n \"Sayuki\",\n \"Segawa\",\n \"Sekigawa\",\n \"Sekine\",\n \"Sekozawa\",\n \"Sen\",\n \"Senmatsu\",\n \"Seo\",\n \"Serizawa\",\n \"Seyama\",\n \"Shiba\",\n \"Shibaguchi\",\n \"Shibanuma\",\n \"Shibasaki\",\n \"Shibasawa\",\n \"Shibata\",\n \"Shibue\",\n \"Shibukji\",\n \"Shichirobei\",\n \"Shidehara\",\n \"Shiga\",\n \"Shiganori\",\n \"Shige\",\n \"Shigeki\",\n \"Shigemitsu\",\n \"Shigi\",\n \"Shikitei\",\n \"Shikuk\",\n \"Shima\",\n \"Shimada\",\n \"Shimakage\",\n \"Shimamura\",\n \"Shimanouchi\",\n \"Shimaoka\",\n \"Shimazaki\",\n \"Shimazu\",\n \"Shimedzu\",\n \"Shimizu\",\n \"Shimohira\",\n \"Shimon\",\n \"Shimura\",\n \"Shimuzu\",\n \"Shinko\",\n \"Shinozaki\",\n \"Shinozuka\",\n \"Shintaro\",\n \"Shiokawa\",\n \"Shiomi\",\n \"Shiomiya\",\n \"Shionoya\",\n \"Shiotani\",\n \"Shioya\",\n \"Shirahata\",\n \"Shirai\",\n \"Shiraishi\",\n \"Shirakawa\",\n \"Shirane\",\n \"Shirasu\",\n \"Shiratori\",\n \"Shirokawa\",\n \"Shiroyama\",\n \"Shiskikura\",\n \"Shizuma\",\n \"Shobo\",\n \"Shoda\",\n \"Shunji\",\n \"Shunsen\",\n \"Siagyo\",\n \"Soga\",\n \"Sohda\",\n \"Soho\",\n \"Soma\",\n \"Someya\",\n \"Sone\",\n \"Sonoda\",\n \"Soseki\",\n \"Sotomura\",\n \"Suenami\",\n \"Sugai\",\n \"Sugase\",\n \"Sugawara\",\n \"Sugihara\",\n \"Sugimoto\",\n \"Sugimura\",\n \"Sugino\",\n \"Sugisata\",\n \"Sugita\",\n \"Sugitani\",\n \"Sugiyama\",\n \"Sumitimo\",\n \"Sunada\",\n \"Suzambo\",\n \"Suzuki\",\n \"Tabuchi\",\n \"Tadeshi\",\n \"Tagawa\",\n \"Taguchi\",\n \"Taira\",\n \"Taka\",\n \"Takabe\",\n \"Takagaki\",\n \"Takagawa\",\n \"Takagi\",\n \"Takahama\",\n \"Takahashi\",\n \"Takahasi\",\n \"Takaki\",\n \"Takamura\",\n \"Takano\",\n \"Takaoka\",\n \"Takara\",\n \"Takashita\",\n \"Takasu\",\n \"Takasugi\",\n \"Takayama\",\n \"Takecare\",\n \"Takei\",\n \"Takekawa\",\n \"Takemago\",\n \"Takemitsu\",\n \"Takemura\",\n \"Takeshi\",\n \"Takeshita\",\n \"Taketomo\",\n \"Takeuchi\",\n \"Takeushi\",\n \"Takewaki\",\n \"Takimoto\",\n \"Takishida\",\n \"Takishita\",\n \"Takita\",\n \"Takizawa\",\n \"Taku\",\n \"Takudo\",\n \"Takudome\",\n \"Tamaasa\",\n \"Tamazaki\",\n \"Tamura\",\n \"Tamuro\",\n \"Tanaka\",\n \"Tange\",\n \"Tani\",\n \"Taniguchi\",\n \"Tanizaki\",\n \"Tankoshitsu\",\n \"Tansho\",\n \"Tanuma\",\n \"Tarumi\",\n \"Tatenaka\",\n \"Tateno\",\n \"Tatsuko\",\n \"Tatsuno\",\n \"Tatsuya\",\n \"Tawaraya\",\n \"Tayama\",\n \"Temko\",\n \"Tenshin\",\n \"Terada\",\n \"Terajima\",\n \"Terakado\",\n \"Terauchi\",\n \"Teshigahara\",\n \"Teshima\",\n \"Tezuka\",\n \"Tochikura\",\n \"Toda\",\n \"Togo\",\n \"Tojo\",\n \"Tokaji\",\n \"Tokuda\",\n \"Tokudome\",\n \"Tokuoka\",\n \"Tomika\",\n \"Tomimoto\",\n \"Tomioka\",\n \"Tommii\",\n \"Tomonaga\",\n \"Tomori\",\n \"Tono\",\n \"Torii\",\n \"Torisawa\",\n \"Torisei\",\n \"Toru\",\n \"Toshishai\",\n \"Toshitala\",\n \"Toshusai\",\n \"Toyama\",\n \"Toyoda\",\n \"Toyoshima\",\n \"Toyota\",\n \"Toyotomi\",\n \"Tsubouchi\",\n \"Tsucgimoto\",\n \"Tsuchie\",\n \"Tsuchiyama\",\n \"Tsuda\",\n \"Tsuga\",\n \"Tsuji\",\n \"Tsujimoto\",\n \"Tsujimura\",\n \"Tsukada\",\n \"Tsukade\",\n \"Tsukahara\",\n \"Tsukamoto\",\n \"Tsukatani\",\n \"Tsukawaki\",\n \"Tsukehara\",\n \"Tsukioka\",\n \"Tsumemasa\",\n \"Tsumura\",\n \"Tsunoda\",\n \"Tsurimi\",\n \"Tsuruga\",\n \"Tsuruya\",\n \"Tsushima\",\n \"Tsutaya\",\n \"Tsutomu\",\n \"Tsutsumi\",\n \"Tsutsumida\",\n \"Uboshita\",\n \"Uchida\",\n \"Uchiyama\",\n \"Ueda\",\n \"Uehara\",\n \"Uemura\",\n \"Ueshima\",\n \"Uesugi\",\n \"Uetake\",\n \"Ugaki\",\n \"Ui\",\n \"Ukiyo\",\n \"Umari\",\n \"Umehara\",\n \"Umeki\",\n \"Uno\",\n \"Uoya\",\n \"Urayama\",\n \"Urogataya\",\n \"Usami\",\n \"Ushiba\",\n \"Utagawa\",\n \"Wakai\",\n \"Wakatsuki\",\n \"Watabe\",\n \"Watanabe\",\n \"Watari\",\n \"Watnabe\",\n \"Watoga\",\n \"Yakuta\",\n \"Yamabe\",\n \"Yamada\",\n \"Yamadera\",\n \"Yamagata\",\n \"Yamaguchi\",\n \"Yamaguchiya\",\n \"Yamaha\",\n \"Yamahata\",\n \"Yamakage\",\n \"Yamakawa\",\n \"Yamakazi\",\n \"Yamamoto\",\n \"Yamamura\",\n \"Yamana\",\n \"Yamanaka\",\n \"Yamane\",\n \"Yamanouchi\",\n \"Yamanoue\",\n \"Yamaoka\",\n \"Yamasaki\",\n \"Yamashita\",\n \"Yamato\",\n \"Yamauchi\",\n \"Yamawaki\",\n \"Yamazaki\",\n \"Yamhata\",\n \"Yamura\",\n \"Yanagawa\",\n \"Yanagi\",\n \"Yanagimoto\",\n \"Yanagita\",\n \"Yanasaki\",\n \"Yano\",\n \"Yasuda\",\n \"Yasuhiro\",\n \"Yasui\",\n \"Yasujiro\",\n \"Yasukawa\",\n \"Yasutake\",\n \"Yoemon\",\n \"Yokokawa\",\n \"Yokoyama\",\n \"Yonai\",\n \"Yone\",\n \"Yosano\",\n \"Yoshida\",\n \"Yoshida\",\n \"Yoshifumi\",\n \"Yoshihara\",\n \"Yoshikawa\",\n \"Yoshimatsu\",\n \"Yoshinobu\",\n \"Yoshioka\",\n \"Yoshitomi\",\n \"Yoshizaki\",\n \"Yoshizawa\",\n \"Yuasa\",\n \"Yuhara\",\n \"Yunokawa\"\n]\n\ndef get_female_name():\n return random.choice(female_names)\n\ndef get_male_name():\n return random.choice(male_names)\n\ndef get_family_name():\n return random.choice(family_names)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Generate japanese names.')\n parser.add_argument('-m', dest='show_male', action='store_const',\n const=True, default=False,\n help='show male names instead female')\n parser.add_argument('-f', dest='show_family_name', action='store_const',\n const=True, default=False,\n help='show family name')\n args = parser.parse_args()\n result = \"\"\n if args.show_family_name:\n result += get_family_name() + \" \"\n if args.show_male:\n result += get_male_name()\n else:\n result += get_female_name()\n print(result)\n","sub_path":"japanesenames.py","file_name":"japanesenames.py","file_ext":"py","file_size_in_byte":36792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"241640343","text":"# This is our test file\nfrom Sprint02 import *\n\ndef test_child_max(indi, families):\n if(child_max(indi, families) == \"ERROR: too many kids\"):\n return (\"ERROR:\", indi, \": too many kids\")\n #else: return(\"child max test passed\")\n\ndef test_quintuplets(indi, families):\n if(quintuplets(indi, families) == \"ERROR: more than 5 kids born at once\"):\n return (\"ERROR:\", indi, \": more than 5 kids born at once\")\n #else: return(\"quintuplets test passed\")\n\ndef test_the_deceased(individuals):\n result = the_deceased(individuals)\n if (\"ERROR\" in result):\n return result\n else:\n return \"The deceased: \" + str(result)\n\nfor indi in families:\n print(test_child_max(indi, families))\n print(test_quintuplets(indi, families))\nprint(test_the_deceased(individuals))","sub_path":"Tests02.py","file_name":"Tests02.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326756906","text":"\"\"\"Module containing common scenarios that can be used\nfor writing tests with less boiler-plate.\"\"\"\n\nfrom typing import List, Dict, Any\nimport unittest\n\nimport time\nfrom exonum_client import ExonumClient\nfrom suite import ExonumNetwork, ProcessOutput, ProcessExitResult\nfrom requests.exceptions import ConnectionError\n\nRETRIES_AMOUNT = 20\nARTIFACT_NAME = \"exonum-cryptocurrency-advanced\"\nARTIFACT_VERSION = \"1.0.0-rc.1\"\n\n\ndef run_dev_node(application: str) -> ExonumNetwork:\n \"\"\"Starts a single node in the run-dev mode and returns\n `ExonumNetwork` object with the running node.\n\n Example:\n\n >>> network = run_dev_node(\"exonum-cryptocurrency-advanced\")\"\"\"\n network = ExonumNetwork(application)\n\n network.run_dev()\n\n return network\n\n\ndef run_n_nodes(application: str, nodes_amount: int) -> ExonumNetwork:\n \"\"\"Creates and runs a network with N validators and return an\n `ExonumNetwork` object with it.\"\"\"\n\n address = \"127.0.0.1:{}\"\n\n # Assign peer ports starting from 6331.\n available_peer_port = 6331\n\n # Assign API ports starting from 8080.\n available_api_port = 8080\n\n network = ExonumNetwork(application)\n network.generate_template(nodes_amount)\n\n for i in range(nodes_amount):\n network.generate_config(i, address.format(available_peer_port))\n available_peer_port += 1\n\n for i in range(nodes_amount):\n public_api_address = address.format(available_api_port)\n private_api_address = address.format(available_api_port + 1)\n network.finalize(i, public_api_address, private_api_address)\n available_api_port += 2\n\n for i in range(nodes_amount):\n network.run_node(i)\n\n return network\n\n\ndef run_4_nodes(application: str) -> ExonumNetwork:\n \"\"\"Creates and runs a network with 4 validators and return an\n `ExonumNetwork` object with it.\n\n Example:\n\n >>> network = run_4_nodes(\"exonum-cryptocurrency-advanced\")\n >>> for i in range(1, network.validators_count()):\n ... print(network.api_address(i))\n ...\n '127.0.0.1', 8080, 8081\n '127.0.0.1', 8082, 8083\n '127.0.0.1', 8084, 8085\n '127.0.0.1', 8086, 8087\n \"\"\"\n return run_n_nodes(application, 4)\n\n\ndef assert_processes_exited_successfully(\n test: unittest.TestCase, outputs: List[ProcessOutput]\n) -> None:\n \"\"\"Asserts that all the processes exited successfully.\"\"\"\n for output in outputs:\n test.assertEqual(output.exit_result, ProcessExitResult.Ok)\n test.assertEqual(\n output.exit_code, 0, f\"Process exited with non-zero code: {output.stderr}\"\n )\n\n\ndef launcher_networks(network: ExonumNetwork) -> List[Dict[str, Any]]:\n \"\"\"Builds a network configuration for `exonum-launcher` from the\n `ExonumNetwork` object.\"\"\"\n networks = []\n for validator_id in range(network.validators_count()):\n host, public_port, private_port = network.api_address(validator_id)\n node_network = {\n \"host\": host,\n \"ssl\": False,\n \"public-api-port\": public_port,\n \"private-api-port\": private_port,\n }\n networks.append(node_network)\n\n # Temporary workaround: supervisor works in simple mode and we need only one node.\n return networks[:1]\n\n\ndef wait_network_to_start(network: ExonumNetwork) -> None:\n \"\"\"Wait for network starting\"\"\"\n wait_api_to_start(network)\n wait_for_block(network, 1)\n\n\ndef wait_for_block(network: ExonumNetwork, height: int = 1) -> None:\n \"\"\"Wait for block at specific height\"\"\"\n for validator_id in range(network.validators_count()):\n host, public_port, private_port = network.api_address(validator_id)\n client = ExonumClient(host, public_port, private_port)\n for _ in range(RETRIES_AMOUNT):\n if client.public_api.get_block(height).status_code == 200:\n break\n time.sleep(0.5)\n\n\ndef wait_api_to_start(network: ExonumNetwork) -> None:\n \"\"\"Wait for api starting\"\"\"\n for validator_id in range(network.validators_count()):\n host, public_port, private_port = network.api_address(validator_id)\n client = ExonumClient(host, public_port, private_port)\n for _ in range(RETRIES_AMOUNT):\n try:\n client.public_api.health_info()\n break\n except ConnectionError:\n time.sleep(0.5)\n\n\ndef generate_config(\n network: ExonumNetwork,\n deadline_height: int = 10000,\n consensus: dict = None,\n artifact_name: str = ARTIFACT_NAME,\n instances: dict = None,\n) -> dict:\n if instances is None:\n instances = {}\n cryptocurrency_advanced_config_dict = {\n \"networks\": launcher_networks(network),\n \"deadline_height\": deadline_height,\n \"consensus\": consensus,\n \"artifacts\": {\n \"cryptocurrency\": {\n \"runtime\": \"rust\",\n \"name\": artifact_name,\n \"version\": ARTIFACT_VERSION,\n }\n },\n \"instances\": instances,\n }\n\n return cryptocurrency_advanced_config_dict\n\n\ndef find_service_status(available_service, service_name):\n for service in available_service[\"services\"]:\n if service[\"spec\"][\"name\"] == service_name:\n return service[\"status\"][\"type\"]\n raise RuntimeError\n","sub_path":"test-suite/exonum-py-tests/suite/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"261063338","text":"#!/usr/bin/env python3\n#coding: utf8\n\nfrom library.bin import _scp\nfrom pa_nlp import nlp\nimport optparse\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser(usage = \"cmd srcDir targetDir\")\n #parser.add_option(\"-q\", \"--quiet\", action = \"store_true\", dest = \"verbose\",\n #default = False, help = \"\")\n parser.add_option(\"--exclude\", dest=\"excludePattern\", default=None)\n parser.add_option(\"-d\", action = \"store_true\", dest = \"delete\",\n default = False)\n (options, args) = parser.parse_args()\n assert len(args) == 2 and (\".\" == args[0] or \".\" == args[1])\n \n deleteOpt = \"--delete\" if options.delete else \"\"\n if options.excludePattern is not None:\n excludeOpt = f\"--exclude={options.excludePattern}\"\n else: \n excludeOpt = \"\"\n\n srcDir, port1 = _scp.replace_server(args[0])\n tgtDir, port2 = _scp.replace_server(args[1])\n srcDir += \"/\"\n tgtDir += \"/\"\n\n if not nlp.is_none_or_empty(port1):\n port_opt = f\"--port={port1}\"\n elif not nlp.is_none_or_empty(port2):\n port_opt = f\"--port={port2}\"\n else:\n port_opt = \"\"\n\n cmd = f\"rsync -ravutzhlog --progress -e ssh {port_opt} \" \\\n f\"{srcDir} {tgtDir} {excludeOpt} {deleteOpt}\"\n nlp.execute_cmd(cmd)\n\n","sub_path":"library/bin/_supdate.py","file_name":"_supdate.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"320930724","text":"import unittest\nfrom unittest.mock import patch\n\nfrom pia_scraper import Agency, PIA\n\nclass PiaTests(unittest.TestCase):\n GSAS_PIA_URL = \"https://www.gsa.gov/reference/gsa-privacy-program/privacy-impact-assessments-pia\"\n\n def test_agency(self):\n agency = Agency()\n\n self.assertEqual(agency.url, self.GSAS_PIA_URL)\n\n def test_get_urls(self):\n mock_html_response = \"\"\"\n

PIA Systems

\n \n \n \n \n \n \n \n \n \n \n \"\"\"\n\n agency = Agency()\n with patch('requests.get') as mock_get:\n # return fixture data\n mock_get.return_value.text = mock_html_response\n agency.get_pia_urls()\n\n self.assertEqual(agency.pias[0].pdf_url, \"https://gsa.gov/cdnstatic/Ancillary_Financial_Applications_AFA_PIA.pdf\")\n\n def test_download_pdf(self):\n pia = PIA(\"https://gsa.gov/cdnstatic/Ancillary_Financial_Applications_AFA_PIA.pdf\")\n with patch('requests.get') as mock_get:\n # return fixture data\n with open('tests/fixtures/fixture.pdf', \"rb\") as f:\n fixture_content = f.read()\n mock_get.return_value.content = fixture_content\n pia.download_pdf()\n\n self.assertEqual(pia.pdf_path, 'pias/Ancillary_Financial_Applications_AFA_PIA.pdf')\n\n def get_text_from_pdf(self):\n pia = PIA(pdf_path = \"tests/fixtures/fixture.pdf\")\n\n pia.get_text_from_pdf()\n self.assertEqual(pia.txt_path, \"tests/fixtures/fixture.txt\")\n self.assertTrue(\"Privacy Impact Assessment (PIA)\" in pia.full_text)\n\n def test_get_system_name(self):\n pia = PIA(txt_path = \"tests/fixtures/fixture.txt\")\n pia.get_text_from_txt()\n\n pia.get_system_name()\n self.assertTrue(\"Ancillary Financial Applications\" in pia.system_name)\n\n def test_get_authority(self):\n pia = PIA(txt_path = \"tests/fixtures/fixture.txt\")\n pia.get_text_from_txt()\n\n pia.get_authority()\n self.assertTrue(\"8 CFR 1232.7002\" in pia.authority)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/pia_tests.py","file_name":"pia_tests.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"112085368","text":"\"\"\"\nPatetrn\n\nA B C D\nA B C\nA B \nA\nn = 4\n\n\"\"\"\n\n# ASCII value of A=65, B=66 and so on\n# chr() function is used to convert ASCII values to character\n\n\nprint(\"Enter the number of rows\")\nn=int(input())\n\nprint(\"Here is the pattern\")\n\ni=n\n\nwhile i>=1:\n j=1\n while j<=i:\n print(chr(j+64),end=\" \")\n j+=1\n print(\"\\n\")\n i-=1\n","sub_path":"Python/Pattern_d9.py","file_name":"Pattern_d9.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595182924","text":"import logging\nfrom django.views.generic.base import TemplateResponseMixin, ContextMixin, View\n\nfrom ads_monitor.models import RuleExecutionResult, AdInsight\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServerJSTemplateView(TemplateResponseMixin, ContextMixin, View):\n def __init_called_js(self):\n self.__called_js = []\n\n def call_js(self, jsmodule, jsfunction, jsondata):\n if not self.__called_js:\n self.__init_called_js()\n self.__called_js.append({\n 'file': jsmodule,\n 'module': jsmodule,\n 'function': jsfunction,\n 'data': jsondata,\n })\n\n def __get_server_js(self):\n return 'console.log(\"hello,world\")'\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n context['server_js'] = self.__get_server_js()\n return self.render_to_response(context)\n\n\nclass RuleExecutionResultsView(ServerJSTemplateView):\n\n template_name = \"ads_monitor/rule_execution_results.html\"\n\n def get_context_data(self, **kwargs):\n context = \\\n super(RuleExecutionResultsView, self).get_context_data(**kwargs)\n r = RuleExecutionResult.objects.filter(result=False)\n r.select_related('adinsight')\n objs = r.all()\n if objs.count() > 0:\n context['rule_execution_results_headers'] = objs[0].to_dict().keys()\n context['rule_execution_results'] = \\\n [item.to_dict().values() for item in objs]\n return context\n\n","sub_path":"ads_monitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"511697028","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 23 17:41:32 2017\n\n@author: H0540603\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.metrics import r2_score\nimport matplotlib.pyplot as plt\n\nhome = r'D:\\Daten\\Auswertung_Feldarbeiten_Pt_Nobressart\\PYTHON\\Timeseries'\n \nclass Model_outputs():\n \n def __init__(self, model_output):\n out_dir = os.path.join(home, 'model_outputs')\n data = pd.read_csv(os.path.join(out_dir, model_output), sep='\\t', \n na_values = 'nan')\n date = pd.to_datetime(data['Year'].astype('int') * 1000 + \\\n data['DOY'].astype('int'), format='%Y%j') + \\\n pd.to_timedelta(data['Time'], unit = 'h')\n data = data.set_index(pd.DatetimeIndex(date))\n self.data = data.copy()\n self.modelName = 'TSEB' if 'TSEB' in model_output else 'OSEB'\n \n def get_observations(self, style='Mauder', get_data=False):\n ec_data = pd.read_csv(os.path.join(home, \n 'Flux_Comparison_%s.csv' % style))\n date_ec = pd.to_datetime(ec_data['T_end'], format = '%d.%m.%Y %H:%M')\n ec_data = ec_data.set_index(pd.DatetimeIndex(date_ec))\n self.observed_data = ec_data.copy()\n self.combined_data = ec_data.merge(right = self.data, left_index = True, \n right_index = True, how = 'left')\n if get_data:\n return ec_data\n \n def calcR2(self):\n good = self.combined_data[['H_corr', 'H_model', 'LE_corr', 'LE_model',\n 'G', 'G_model', 'Rn', 'Rn_model']].dropna(how = 'any')\n self.r2_H = r2_score(good.H_corr, good.H_model)\n self.r2_LE = r2_score(good.LE_corr, good.LE_model)\n self.r2_Rn = r2_score(good.Rn, good.Rn_model)\n self.r2_G = r2_score(good.G, good.G_model)\n \n def plot_timeseries(self, style='Mauder'):\n self.get_observations(style)\n self.calcR2()\n sys.path.append(r'D:\\Daten\\Auswertung_Fendt_2016\\PYTHON')\n import plot_utils\n pal = plot_utils.load_sns()\n\n model_data = self.data.copy()\n ec_data = self.observed_data.copy()\n sns.set(context = \"paper\", style = 'white',\n rc = {'axes.labelsize': 16.0, 'figure.figsize': [9.5, 6], \n 'legend.fontsize': 16.0, 'xtick.labelsize': 16.0,\n 'ytick.labelsize': 16.0, 'xtick.major.size': 4.0,\n 'ytick.major.size': 4.0})\n \n fig, ax = plt.subplots(4,1)\n ax[0].plot(ec_data['LE_corr'], label = 'EC', color = pal[0])\n ax[0].plot(model_data['LE_model'], label = self.modelName, \n color = pal[4], alpha = 0.8)\n ax[0].set_xticklabels([])\n ax[0].set_ylabel('LE')\n ax[0].text(0.99, 0.96,'R$^2$$_{%s}$: %.2f' % (self.modelName, \n self.r2_LE), \n horizontalalignment='right', fontsize = 16,\n verticalalignment='top', transform=ax[0].transAxes)\n \n ax[1].plot(ec_data['H_corr'], label = 'H EC', color = pal[0])\n ax[1].plot(model_data['H_model'], label = self.modelName, \n color = pal[4], alpha = 0.8)\n ax[1].set_xticklabels([])\n ax[1].set_ylabel('H')\n ax[1].text(0.99, 0.96,'R$^2$$_{%s}$: %.2f' % (self.modelName,\n self.r2_H), \n horizontalalignment='right', fontsize = 16,\n verticalalignment='top', transform=ax[1].transAxes)\n \n ax[2].plot(ec_data['G'], label = 'G EC', color = pal[0])\n ax[2].plot(model_data['G_model'], label = self.modelName, \n color = pal[4], alpha = 0.8)\n ax[2].set_xticklabels([])\n ax[2].set_ylabel('G')\n ax[2].text(0.99, 0.96,'R$^2$$_{%s}$: %.2f' % (self.modelName,\n self.r2_G),\n horizontalalignment='right', fontsize = 16,\n verticalalignment='top', transform=ax[2].transAxes)\n \n ax[3].plot(ec_data['Rn'], label = 'Rn EC', color = pal[0])\n ax[3].plot(model_data['Rn_model'], label = self.modelName, \n color = pal[4], alpha=0.8)\n ax[3].set_ylabel('Rn')\n ax[3].text(0.99, 0.96,'R$^2$$_{%s}$: %.2f' % (self.modelName,\n self.r2_Rn), \n horizontalalignment='right', fontsize = 16,\n verticalalignment='top', transform=ax[3].transAxes)\n \n ax[0].set_title('Comparison of %s and EC fluxes, Petit Nobressart' % (\n self.modelName), fontsize = 16, y = 1.08)\n ax[0].legend(bbox_to_anchor=(1.03, 1), loc=2, borderaxespad=0.)\n \n","sub_path":"Timeseries/evaluate_timeseries.py","file_name":"evaluate_timeseries.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69345459","text":"#-*- coding: utf-8 -*-\n#from sparta.lib.base import *\nfrom pylons import request, response, session, tmpl_context as c, url\nfrom pylons.controllers.util import abort, redirect\nfrom sparta.lib.base import BaseController, render\n\nfrom sparta.model import *\nfrom sparta.model.meta import Session #[SH] Added\nfrom AutoPages import *\n\n# [SH] 아래 3줄 \nimport shutil \nimport uuid \nfrom sparta.lib.libupload import Upload\n\nlog = logging.getLogger(__name__)\n\nclass BoardController(BaseController):\n \n\n\tdef menu(self):\n\t\t\"\"\" 게시판 포럼 - 좌측 메뉴 \"\"\"\n\t\t\n\t\tif CheckNotAdmin():\n\t\t\taccLevel = session[\"UserType\"]\n\t\t\taccField = \"AdminBoard.AccessView\"\n\t\telse:\n\t\t\taccLevel = None ; accField = None\n\t\t\n\t\t# 권한별 게시판 차등 표시\n\t\trx = Archive(\"AdminBoard\").getRecords(\n\t\t\t\"AdminBoard.ViewMode > 0,AdminBoard.BType IN 0;1\",\n\t\t\t\"BCode,BName,BType,ViewMode,AccessView\",\n\t\t\t\"AdminBoard.SubFolder ASC,AdminBoard.SortNumber ASC\",\n\t\t\taccField = accField, accLevel = accLevel )\n\t\t\n\t\tboardArray = []\n\t\t\n\t\tfor board in rx:\n\t\t\tmenuBoard = {\"text\" : board[\"BName\"], \"title\": board[\"BName\"], \"leaf\":True, \"iconCls\":\"icon-comment\", \"taburl\":\"/board/list/\"+board[\"BCode\"]}\n\t\t\tboardArray.append(menuBoard)\n\t\t\n\t\tForumRoot = [ { \"text\":\"포럼\", \"leaf\":False, \"expanded\":True, \"children\": boardArray } ]\n\t\treturn jsonOutput(ForumRoot)\n\n\n\t# 게시판 메인 로드\n\tdef index(self):\n\t\tautoFrameData(c, __name__)\n\t\t\n\t\tc.firstLoadPage = \"/board/main/\"\n\t\tc.firstLoadTitle = \"Forum\"\n\t\tc.leftAccordPanels\t= \"\"\"[{title:\"Navigation\",url:\"/board/menu/\", rootText:\"Boards\", iconCls:\"icon-workspace\",disableEdit:true}]\"\"\" \n\t\t\n\t\treturn render(\"frame.html\")\n\t\n\t\n\t\n\tdef main(self):\n\t\t\"\"\" 포럼 메인에 미니게시판 출력 \"\"\"\n\t\t\n\t\tif CheckNotAdmin():\n\t\t\taccLevel = session[\"UserType\"]\n\t\t\taccField = \"AdminBoard.AccessView\"\n\t\telse:\n\t\t\taccLevel = None\n\t\t\taccField = None\n\t\t\n\t\trx = Archive(\"AdminBoard\").getRecords(\"AdminBoard.ViewMode == 2\", \"BCode,BName,BType,ViewMode\", \"AdminBoard.SortNumber ASC\", accField=accField, accLevel=accLevel )\t\t \n\t\tbList = \"\";tList = [];cList = []\n\t\t \n\t\tfor board1 in rx:\n\t\t\tboardtable = board1[\"BCode\"]\n\t\t\tboardaddr = \"/board/list/%s\" % boardtable\n\t\t\tboardname = board1[\"BName\"]\n\t\t\t\n\t\t\tbList = addStr(bList, \"[%s]\" %(self.mini_list(boardtable, boardname)))\n\t\t\t\n\t\t\ttList.append(\"'\"+boardname+\"'\")\n\t\t\tcList.append(\"'\"+boardtable+\"'\")\n\t\t\t\n\t\tc.BoardNames = HTML(\"var BoardName = [%s];\" % (\",\".join(tList)) )\t\t\t \n\t\tc.BoardCodes = HTML(\"var BoardCode = [%s];\" % (\",\".join(cList)) )\n\t\tc.BoardList = HTML( \"var BoardData = [%s];\" % (bList) )\n\t\t\n\t\treturn render(\"Board_Main.html\")\n\n\n\tdef mini_list(self, id, tName):\n\t\t\"\"\" 미니게시판 JSON 출력 : 내부 호출용 \"\"\"\n\t\t\n\t\t#[TODO] 권한 채킹 필요\n\t\tboardID = str(id)\n\t\tclist = \"Board.IDX,Board.Number,Board.Title,Board.CreateBy,Board.CreateDate,Board.Category,Board.ReplyCount\"\n\t\tsort1 = \"Board.CreateDate DESC\"\n\t\tquery1 = \"Board.BoardID == \" + boardID\n\t\talist = ArchiveLister(\"Board_mini\", Archive=\"Board\", ViewColumns=clist, SortColumns=sort1, WhereString=query1, GroupField=\"\", Start=0, Limit=5)\n\t\treturn alist.render(outMode=\"json\")\n\n\n\tdef list(self, id, id2=None):\n\t\tboardID = str(id)\n\t\ttName\t= None\n\t\t\n\t\trx = Archive(\"AdminBoard\").getRecords(\"\", \"BCode,BName,DisplayCate,DisplayStat,DisplayReply\", \"AdminBoard.SortNumber ASC\" )\n\n\t\tfor board1 in rx:\n\t\t\tif boardID == board1[\"BCode\"]:\n\t\t\t\ttName = board1[\"BName\"] \n\t\t\t\tDsCate = board1[\"DisplayCate\"]\n\t\t\t\tDsStat = board1[\"DisplayStat\"]\n\t\t\t\tDsReply = board1[\"DisplayReply\"]\n\t\t\n\t\t# 게시판 출력\n\t\tif tName:\n\t\t\tc.BoardCode = boardID\n\t\t\tc.BoardName = tName\n\t\t\tc.DisplayCate = DsCate\n\t\t\tc.DisplayStat = DsStat\n\t\t\tc.DisplayReply = DsReply\n\t\telse:\n\t\t\treturn u\"잘못된 게시판 ID 이거나 권한이 없습니다.\"\n\t\t\n\t\treturn render(\"Board_List.html\")\n\n \n\tdef listup(self, id, id2=None):\n\t\t\"\"\" 게시물 리스트 업 \"\"\"\n\t\t\n\t\tboardID = str(id)\n\t\tclist = \"Board.Number,Board.IDX,Board.Category,Board.Status,Board.Title,Board.CreateBy,Board.CreateByName,Board.CreateDate,Board.ViewCount,Board.ReplyCount\"\n\t\tsort1 = \"Board.CreateDate DESC\"\n\t\tquery1 = \"Board.BoardID == \" + boardID\n\t\t\t\t\n\t\t# Board에는 AccessView가 없음\n\t\talist = ArchiveLister(\"Board_Base\", Archive=\"Board\", ViewColumns=clist, SortColumns=sort1, WhereString=query1, GroupField=\"\") #, AccField=True )\n\t\treturn alist.render(outMode=\"json\")\n\n\tdef view(self, id, id2):\n\t\t\"\"\" 게시물 읽기. 템플릿에 직접 값을 넣는 다 \"\"\"\n\t\tboardID = str( id )\t\t#[NOTE] unicode to string\n\t\ttName = None\n\t\t\n\t\t# 게시판 형태를 맞추기 위한 게시판 설정을 읽어온다.\n\t\trx = Archive(\"AdminBoard\").getRecords(\"AdminBoard.BCode == \" + boardID, \"BName,DisplayCate,DisplayStat,DisplayReply,AccessReply\", \"AdminBoard.SortNumber ASC\" )\n\t\t\n\t\tfor board1 in rx:\n\t\t\ttName\t= board1[\"BName\"]\n\t\t\tDsCate\t= board1[\"DisplayCate\"]\n\t\t\tDsStat\t= board1[\"DisplayStat\"]\n\t\t\tDsReply = board1[\"DisplayReply\"]\n\t\t\tAlReply = board1[\"AccessReply\"]\n\t\t\t\t\n\t\t# 게시물 레코드 읽기 \n\t\tcolStr = \"CreateBy,CreateByName,Title,BoardID,Content,ViewCount,CreateDate,UploadName1,UploadLink1\"\t\t \n\t\trow = Archive(\"Board\").getValues(\"Board.IDX == \" + id2, colStr )\n\t\t\n\t\tif not tName: return HTML(u'잘못된 게시판 코드 입니다.')\n\t\tif not row: return HTML(u'존재하지 않는 게시물입니다')\n\t\tif boardID != row[\"BoardID\"]: return HTML(u'잘못된 접근 입니다.')\n\t\t\n\t\t#[TODO] 아래주석 확인\n\t\t# 권한 채킹 if CheckLevel(\"AdminBoard\", \"AccessRead\", \"AdminBoard.BCode == \" + boardID):\n\t\t# else:c.Content = HTML(u'볼수 있는 권한이 없습니다.')\n\t\t\n\t\t# 여기서부터 게시물 출력\n\t\tc.IDX = id2\n\t\tc.PNID = \"PN_Board_%s\" % id2\n\t\tc.BoardCode = boardID\n\t\tc.BoardName = tName\n\t\tc.DisplayCate = DsCate\n\t\tc.DisplayStat = DsStat\n\t\tc.DisplayReply = DsReply\n\t\tc.AllowReply = AlReply\n\t\t\n\t\t# 첨부파일 개수 가져오기 [SH]\n\t\tattach_q = Session.query(BoardAttach)\n\t\tc.attach_cnt = attach_q.filter_by(article_idx=id2).count()\n\t\t\n\t\tif c.attach_cnt > 0:\n\t\t\tc.article_list = self.getArticleAttach(id2)\n\t\telse:\n\t\t\tc.article_list = \"[]\"\n\t\t# --------------------------------\n\n\t\tc.WriterName = u\"%s (%s)\" % ( unicode(row[\"CreateByName\"]), unicode(row[\"CreateBy\"]) )\n\t\tc.WriterID = row[\"CreateBy\"] # 삭제할때 사용됨\n\t\t\n\t\t# Template Pasting\n\t\tfor cname in colStr.split(\",\"):\n\t\t\tif str( type( row[cname] ) ) == \"\":\n\t\t\t\tsetattr( c, cname, row[cname].strftime(\"%Y-%m-%d %H:%M\") )\n\t\t\telse:\n\t\t\t\tsetattr( c, cname, HTML(row[cname]) )\n\t\t\n\t\t# 첨부 파일 표시, Jpg, png 파일인 경우, 컨텐츠에 직접 디스플레이\n\t\tif row[\"UploadName1\"]:\n\t\t#\t c.UploadFile1 = HTML('%s' %( row[\"UploadLink1\"], row[\"UploadName1\"]) )\n\t\t\tif row[\"UploadName1\"][-4:].lower() in (\".jpg\", \".png\"):\n\t\t\t\tc.Content = HTML('

%s' %( row[\"UploadLink1\"], row[\"UploadName1\"], row[\"UploadLink1\"], row[\"Content\"]) )\n\t\t#else:\n\t\t#\t c.UploadFile1 = u\"첨부없음\"\n\t\t\t\n\t\t# 보기 카운트 증가 ( [TODO] 나중에 이벤트로 처리해야할 부분 )\n\t\tif row[\"CreateBy\"] != session[\"UserID\"]:\n\t\t\t# Pass 0은 Archive의 추가이벤트를 발생하지않는 단순 변경하라는 플래그\n\t\t\tnewVC = int(row[\"ViewCount\"]) + 1\n\t\t\tdSave = {\"IDX\": id2, \"ViewCount\": newVC, \"_pass_\": 0 }\n\t\t\tArchive(\"Board\").New( **dSave )\n\t\t\tc.ViewCount = newVC\n\t\t\t\n\t\treturn render(\"Board_View.html\")\n\t\n\t# [SH] Controller에서 쓰기 위한 Action이 아니다 -------------\n\tdef getArticleAttach(self, id):\n\t\t# 첨부파일 데이터 가져오기\n\t\tattach_q = Session.query(BoardAttach)\n\t\tattach = attach_q.filter_by(article_idx=id).all()\n\t\t\n\t\t# { boxLabel: 'Item 1', name:'cb-col-1' },\n\t\tattachItem = list()\n\t\t\n\t\tfor row in attach:\n\t\t\tattachItem.append({ \"boxLabel\": \"%s(%s)\" % (row.org_path, row.filesize), \"name\" : \"delete_file\", \"inputValue\" : row.id})\n\t\t\n\t\treturn json.dumps(attachItem)\n\t# --------------------------------------------\n\n\tdef view_json(self, id, id2):\n\t\t\"\"\" 수정할때 게시물을 Json으로 값을 가져온다. id=BoardID, id2=Board.IDX \"\"\"\n\t\t#[TODO] 권한 처리\n\t\toutData = Archive(\"Board\").getValues( \"Board.IDX == %s\" % id2, \"Board.IDX,Board.BoardID,Board.Status,Board.Category,Board.Content,Board.Title\")\n\t\t#[TODO] BoardID Check\n\t\t#[TODO] BoardID Access Check\n\t\treturn jsonOutput( outData, \"data\")\n\t\n\tdef write(self, id, id2=None):\n\t\t\"\"\" 게시물 작성 처리 \"\"\"\n\t\t\n\t\tdSave = {}\n\t\tbData = Archive(\"AdminBoard\").getValues(\"AdminBoard.BCode == \"+str(id), \"IDX,AccessWrite\" )\n\t\t\n\t\tif bData:\n\t\t\tif bData[\"AccessWrite\"] == \"\" or bData[\"AccessWrite\"].find(session[\"UserType\"]) > -1:\n\t\t\t\t\n\t\t\t\tCopyDict( srcDic=request.params, destDic=dSave, keyList=\"IDX,Category,Status\")#, prefix=\"Board.\")\n\t\t\t\t\n\t\t\t\tdSave[\"BoardID\"] = str(id)\n\t\t\t\tdSave[\"BoardIDX\"] = bData[\"IDX\"]\n\t\t\t\tdSave[\"Title\"] = request.params[\"Title\"]\n\t\t\t\tdSave[\"Content\"] = request.params[\"Content\"]\n\t\t\t\t\n# [SH] 이 아래줄 부터 쭈욱 \n\t\t\t\tarticle_id = Archive(\"Board\").New( **dSave )\n\t\t\t\t\n\t\t\t\t# File Upload\n\t\t\t\tobjUpload = Upload()\n\t\t\t\tobjUpload.BoardDir(dSave[\"BoardID\"])\n\t\t\t\tupload_files = request.params.getall(\"bf_file\")\n\t\t\t\t\n\t\t\t\t# 업로드한 파일 목록 가져오기(수정 모드일때)\n\t\t\t\tif dSave.has_key(\"IDX\"):\n\t\t\t\t\t\n\t\t\t\t\t# File Delete\n\t\t\t\t\tselDeletedCheck = request.params.getall(\"delete_file\")\n\t\t\t\t\t\n\t\t\t\t\tfrom sqlalchemy import and_\n\t\t\t\t\t\n\t\t\t\t\tboard_attach_q = Session.query(BoardAttach)\n\t\t\t\t\tboard_attach = board_attach_q.filter(\n\t\t\t\t\t\tand_(\n\t\t\t\t\t\t\tBoardAttach.article_idx == dSave[\"IDX\"],\n\t\t\t\t\t\t\tBoardAttach.id.in_(selDeletedCheck)\n\t\t\t\t\t\t)\n\t\t\t\t\t).all()\n\t\t\t\t\t\n\t\t\t\t\t# 파일이 있을때만 삭제하도록 변경\n\t\t\t\t\tfor attach_item in board_attach:\n\t\t\t\t\t\tif os.path.exists(attach_item.new_path):\n\t\t\t\t\t\t\tos.unlink(attach_item.new_path)\n\t\t\t\t\t\t\tSession.delete(attach_item)\n\t\t\t\t\t\n\t\t\t\t\tSession.commit()\n\t\t\t\t\n\t\t\t\tfor upload_file in upload_files:\n\t\t\t\t\tif type(upload_file) == unicode:\n\t\t\t\t\t\t# 빈 문자열이 넘어온 경우 처리를 건너뛴다.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\t# 실제 파일 업로드를 여기에서 수행한다.\n\t\t\t\t\tnew_file = objUpload.upload_file_move(upload_file)\n\t\t\t\t\t\n\t\t\t\t\tboard_attach_row = BoardAttach()\n\t\t\t\t\tboard_attach_row.board_code = unicode(dSave[\"BoardID\"])\n\t\t\t\t\tif dSave.has_key(\"IDX\"):\n\t\t\t\t\t\tboard_attach_row.article_idx = dSave[\"IDX\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tboard_attach_row.article_idx = article_id\n\t\t\t\t\tboard_attach_row.org_path = unicode(new_file[\"O_PATH\"])\n\t\t\t\t\tboard_attach_row.new_path = new_file[\"D_PATH\"]\n\t\t\t\t\tboard_attach_row.filesize = unicode(new_file[\"SIZE\"])\n\t\t\t\t\t\n\t\t\t\t\tSession.add(board_attach_row)\n\t\t\t\t\tSession.commit()\n# ------------------------------------------------------------\t\t\t\t \n\t\t\t\treturn okMessage(\"작성되었습니다.\")\n\t\t\telse:\n\t\t\t\treturn errMessage(\"작성 권한이 없습니다.\")\n\t\telse:\n\t\t\treturn errMessage(\"게시판 구분아이디가 잘못 전달되었습니다\")\n\t\n\tdef remove(self, id, id2):\n\t\t# [SH] -------------------------------\n\t\tboard_attach_q = Session.query(BoardAttach)\n\t\tboard_attach = board_attach_q.filter_by(article_idx = id2).all()\n\t\t\n\t\t# 파일이 있을때만 삭제하도록 변경\n\t\tfor attach_item in board_attach:\n\t\t\tif os.path.exists(attach_item.new_path):\n\t\t\t\tos.unlink(attach_item.new_path)\n\t\t\t\tSession.delete(attach_item)\n\t\t\n\t\tSession.commit()\n\t\t# ---------------------- [SH] \n\t\tArchive(\"Board\").Remove( id2 )\n\t\treturn okMessage(\"삭제되었습니다.\")\n\n\tdef getCategory(self, id):\n\t\t\"\"\" 콤보박스용 카테고리 리스트 데이터 \"\"\"\n\t\t\n\t\trx = Archive(\"AdminBoard\").getValue(\"AdminBoard.BCode == \" + str(id), \"CateList\")\n\t\treturn \"[['\" + rx.replace(\",\",\"'],['\") + \"']]\"\n\n\tdef getStatus(self, id):\n\t\t\"\"\" 콤보박스용 상황표시 데이터 \"\"\"\n\t\t \n\t\trx = Archive(\"AdminBoard\").getValue(\"AdminBoard.BCode == \" + str(id), \"StatList\")\n\t\treturn \"[['\" + rx.replace(\",\",\"'],['\") + \"']]\"\n\n\tdef replylist_json(self, id):\n\n\t\tclist = \"BoardReply.IDX,BoardReply.Content,BoardReply.CreateBy,BoardReply.CreateDate\"\n\t\tsort1 = \"BoardReply.CreateDate ASC\"\n\t\tquery1 = \"BoardReply.ParentIDX == \" + str(id)\n\t\t\n\t\talist = ArchiveLister( \"BoardReply\", Archive=\"BoardReply\", ViewColumns=clist, SortColumns=sort1, WhereString=query1, GroupField=\"\" )\n\t\talist.Callbacks[\"listing\"] = self.replylist_event\n\t\treturn alist.render(outMode=\"json\")\n\n\tdef replylist_event(cls, row, itself):\n\t\tif (row[\"BoardReply_CreateBy\"] == session[\"UserID\"]) or CheckAdmin():\n\t\t\trow[\"IsRemovable\"] = True\n\t\telse:\n\t\t\trow[\"IsRemovable\"] = False\n\t\treturn row\n\t\n\t\n\tdef writeReply(self, id=None, id2=None):\n\t\ttry:\n\t\t\tpidx = request.params[\"ParentIDX\"]\n\t\t\t#[NEED] 권한 확인\n\t\t\t\n\t\t\tdSave = {}\n\t\t\tdSave[\"ParentIDX\"] = pidx\n\t\t\tdSave[\"Content\"] = request.params[\"Content\"]\n\t\t\tdSave[\"CreateBy\"] = session[\"UserID\"]\n\t\t\t\n\t\t\tArchive(\"BoardReply\").New( **dSave )\n\t\t\t\n\t\t\t# Reply Recount\n\t\t\tarcv = Archive(\"BoardReply\")\n\t\t\tRDB = arcv.Search(columns=\"BoardReply.IDX\", where=\"BoardReply.ParentIDX == \" + str(pidx) )\n\t\t\tdSave = {\"IDX\": pidx, \"ReplyCount\": RDB[0], \"_pass_\": True }\n\t\t\tArchive(\"Board\").New(**dSave)\n\t\t\t\n\t\texcept Exception as err:\n\t\t\ttraceback.print_exc(file=sys.stdout)\n\t\t\treturn errMessage( str(err) )\n\t\treturn okMessage( \"작성 되었습니다\" )\n\n\tdef removeReply(self, id):\n\t\ttry:\n\t\t\t#[NEED] 권한 확인\n\t\t\n\t\t\tarcv = Archive(\"BoardReply\")\n\t\t\tpidx = arcv.getValue(id, \"ParentIDX\")\n\t\t\n\t\t\tresult = Archive(\"BoardReply\").remove( id )\n\t\t\n\t\t\t# Reply Recount\n\t\t\tRDB = arcv.Search(columns=\"BoardReply.IDX\", where=\"BoardReply.ParentIDX == \" + str(pidx) )\n\t\t\tdSave = {\"IDX\": pidx, \"ReplyCount\": RDB[0], \"_pass_\": True }\n\t\t\tArchive(\"Board\").New(**dSave)\n\n\t\texcept Exception as err:\n\t\t\ttraceback.print_exc(file=sys.stdout)\n\t\t\treturn errMessage( str(err) )\n\t\treturn okMessage( \"삭제 되었습니다\" )\n\t\n","sub_path":"Sparta/sparta/controllers/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":13759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"202992431","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\nfrom .forms import WeeklyscheduleForm, ScheduledTimeForm, ScheduledTimeModelForm\nfrom django.views.generic import *\nfrom datetime import datetime, time\nfrom django.db import connection\n\n\nfrom .models import *\n\n\nclass ScheduledTime_list_view(ListView):\n\ttemplate_name = \"ScheduledTime/schedule_list.html\"\n\tdef get(self,request, wid):\n\t\tqueryset = ScheduledTime.objects.raw('SELECT * FROM \"scheduled_time\" WHERE \"scheduled_time\".\"wid\"=%s', [wid])\n\t\t# queryset = ScheduledTime.objects.filter(wid = wid)\n\t\t# print(queryset.query)\n\t\t# ws = Weeklyschedule.objects.filter(id = wid)\n\t\t# print(ws.query)\n\t\tws = Weeklyschedule.objects.raw('SELECT * FROM \"weeklyschedule\" WHERE \"weeklyschedule\".\"id\"=%s', [wid])[0]\n\t\tstaff = ws.sid\n\n\t\treturn render(request,self.template_name,{'object_list' : queryset, 'wid' : wid, 'staff': staff})\n\n\nclass ScheduledTime_create_view(View):\n\ttemplate_name = \"ScheduledTime/create.html\"\n\tdef get(self, request, wid, *args, **kwargs):\n\t\tform = ScheduledTimeForm()\n\t\tcontext = {\"form\": form,\n\t\t\t\t\t\"wid\": wid}\n\t\treturn render(request,self.template_name, context)\n\n\tdef post(self,request,wid,*args,**kwargs):\n\t\tform = ScheduledTimeForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tdata = form.cleaned_data\n\t\t\tddate = data.get('date')\n\t\t\t# print(ddate)\n\t\t\t# date_ = datetime.strptime(ddate, '%Y-%m-%d').date()\n\t\t\tst = data.get('starttime')\n\t\t\tet = data.get('endtime')\n\t\t\tnt = data.get('notes')\n\t\t\t# ScheduledTime.objects.create(date = ddate, starttime = st, endtime = et, wid = schedule, notes = nt)\n\t\t\tconnection.cursor().execute('INSERT into \"scheduled_time\" values(%s,%s,%s,%s,%s)',(ddate,st,et,wid,nt))\n\t\t\t# print(ScheduledTime.objects.get(wid=10400001))\n\t\t# if form.is_valid():\n\t\t# \tform.save()\n\t\t# \t# form.save()\n\t\tcontext = {\"form\": form,\n\t\t\t\t\t\"wid\": wid}\n\t\treturn render(request,self.template_name,context)\n\n\t\n\nclass ScheduledTime_update_view(UpdateView):\n\ttemplate_name = \"ScheduledTime/update.html\"\n\tform_class = ScheduledTimeModelForm\n\tdef get_object(self):\n\t\tdate = self.kwargs.get(\"date\")\n\t\t# print(date)\n\t\tdate_ = datetime.strptime(date, '%Y-%m-%d').date()\n\t\tstarttime = self.kwargs.get(\"starttime\")\n\t\tst = datetime.strptime(starttime, '%X').time()\n\t\tendtime = self.kwargs.get(\"endtime\")\n\t\tet = datetime.strptime(endtime, '%X').time()\n\t\twid = self.kwargs.get(\"wid\")\n\t\t# ww = Weeklyschedule.objects.get(id = wid)\n\t\t# obj = get_object_or_404(ScheduledTime, wid = ww, starttime = st, endtime = et, date = date_)\n\t\tobj = ScheduledTime.objects.raw('SELECT * FROM \"scheduled_time\" WHERE \"scheduled_time\".\"date\" = %s AND \"scheduled_time\".\"starttime\" = %s AND \"scheduled_time\".\"endtime\" = %s AND \"scheduled_time\".\"wid\" = %s',(date,starttime,endtime,wid)) \n\t\treturn obj[0]\n\n\tdef get_success_url(self):\n\t\twid = self.kwargs.get(\"wid\")\n\t\tstr =\"\"\n\t\tstr = str + '/schedule/' + wid + '/list/'\n\t\treturn str\n\nclass ScheduledTime_delete_view(View):\n\ttemplate_name = \"ScheduledTime/delete.html\"\n\tform_class = ScheduledTimeModelForm\n\tdef get_object(self):\n\t\tdate = self.kwargs.get(\"date\")\n\t\t# print(date)\n\t\tdate_ = datetime.strptime(date, '%Y-%m-%d').date()\n\t\tstarttime = self.kwargs.get(\"starttime\")\n\t\tst = datetime.strptime(starttime, '%X').time()\n\t\tendtime = self.kwargs.get(\"endtime\")\n\t\tet = datetime.strptime(endtime, '%X').time()\n\t\twid = self.kwargs.get(\"wid\")\n\t\t# ww = Weeklyschedule.objects.get(id = wid)\n\t\t# obj = get_object_or_404(ScheduledTime, wid = ww, starttime = st, endtime = et, date = date_)\n\t\tobj = ScheduledTime.objects.raw('SELECT * FROM \"scheduled_time\" WHERE \"scheduled_time\".\"date\" = %s AND \"scheduled_time\".\"starttime\" = %s AND \"scheduled_time\".\"endtime\" = %s AND \"scheduled_time\".\"wid\" = %s',(date,starttime,endtime,wid)) \n\t\treturn obj[0]\n\n\tdef get(self, request, *args, **kwargs):\n\t\tcontext = {}\n\t\tobj = self.get_object()\n\t\tif obj is not None:\n\t\t\tcontext['obj']=obj\n\t\treturn render(request,self.template_name, context)\n\n\tdef post(self,request,*args,**kwargs):\n\t\tdate = self.kwargs.get(\"date\")\n\t\t# print(date)\n\t\tdate_ = datetime.strptime(date, '%Y-%m-%d').date()\n\t\tstarttime = self.kwargs.get(\"starttime\")\n\t\tst = datetime.strptime(starttime, '%X').time()\n\t\tendtime = self.kwargs.get(\"endtime\")\n\t\tet = datetime.strptime(endtime, '%X').time()\n\t\twid = self.kwargs.get(\"wid\")\n\t\tstr =\"\"\n\t\tstr = str + '/schedule/' + wid + '/list/'\n\t\tcontext = {}\n\t\tobj = self.get_object()\n\t\tif obj is not None:\n\t\t\tcontext['obj'] = None\n\t\t\tconnection.cursor().execute('DELETE FROM \"scheduled_time\" WHERE \"scheduled_time\".\"date\" = %s AND \"scheduled_time\".\"starttime\" = %s AND \"scheduled_time\".\"endtime\" = %s AND \"scheduled_time\".\"wid\" = %s',(date,starttime,endtime,wid))\n\t\t\treturn redirect(str)\n\t\treturn render(request,self.template_name,context)\n","sub_path":"304env/src/DASHospital/WeeklySchedule/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75443063","text":"from functions import *\r\nfrom constants import *\r\n\r\n# reading constant keys\r\nf = open(\"keys.txt\", \"r\")\r\nn = int(f.readline())\r\ne_yours = int(f.readline())\r\nd = int(f.readline())\r\nf.close()\r\n\r\n\r\ndef encipher(message, n, e):\r\n k_object = k_constant()\r\n k = k_object.constant\r\n l_object = l_constant()\r\n l = l_object.constant\r\n\r\n indicator = k # indicator gets the number letters in a plaintext unit\r\n ciphered_message = [] # ciphered message list holds single letters, while ciphered message string will return a whole word\r\n ciphered_message_string = ' '\r\n initial_text_number = k - 1 # adding variables to be able to change number 'k' of letters on plaintext unit\r\n k = initial_text_number\r\n\r\n # dividing user's message into groups of k letters\r\n message_list = [(message[i:i + indicator]) for i in range(0, len(message), indicator)]\r\n\r\n for i in range(messageLength(message, indicator)):\r\n\r\n message = message_list[\r\n i] # enciphering each letter at the time by getting its index and calculating ciphered number\r\n enciphering_number = 0 # detailed information on how the system works can be found in RSA math description on the web\r\n\r\n for x in range(len(message)):\r\n\r\n index = alphabet.index(message[x])\r\n\r\n enciphering_number = enciphering_number + (index * (len(alphabet) ** k))\r\n\r\n if k == 0:\r\n k = initial_text_number # changing the exponent to its initial value if it gets to 0 in case of longer texts\r\n else:\r\n k = k - 1\r\n\r\n enciphering_function = pow(enciphering_number, e, n) # evaluating the enciphering function\r\n\r\n z = l - 1 # new variable 'z' exponent gets value of ciphered text units -1\r\n\r\n for s in range(l):\r\n q, r = divmod(enciphering_function, (len(alphabet) ** z))\r\n\r\n if q > len(\r\n alphabet): # dividing by remainder number evaluated by enciphering function, then for l letter ciphertext units\r\n q = pow(q, 1, len(alphabet))\r\n\r\n ciphered_message.append(alphabet[q])\r\n z = z - 1\r\n enciphering_function = r\r\n\r\n for x in ciphered_message:\r\n ciphered_message_string += x\r\n\r\n return ciphered_message_string\r\n","sub_path":"Flask_Backend_API/api/enciphering.py","file_name":"enciphering.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"565039845","text":"import os\nimport sys\nimport numpy as np\n\n# Set to complete to use all the data\n# Set to sub to use training/dev sets only\n# FEATURE_EXP: logmel or raw or MFCC or MFCC_concat or text\n# WHOLE_TRAIN: This setting is for mitigating the variable length of the data\n# by zero padding\nEXPERIMENT_DETAILS = {'FEATURE_EXP': 'raw',\n 'AUDIO_MODE_IS_CONCAT_NOT_SHORTEN': True,\n 'MAKE_DATASET_EQUAL': False,\n 'FEATURE_DIMENSIONS': 55000,\n 'FREQ_BINS': 1,\n 'SUB_DIR': 'exp_1a',\n 'DATASET_IS_BACKGROUND': False,\n 'CONVERT_TO_IMAGE': False,\n 'WHOLE_TRAIN': False,\n 'WINDOW_SIZE': 1024,\n 'OVERLAP': 50,\n 'SAMPLE_RATE': 16000,\n 'REMOVE_BACKGROUND': True,\n 'SECONDS_TO_SEGMENT': 30}\n# Set True to split data into genders\nGENDER = True\nWINDOW_FUNC = np.hanning(EXPERIMENT_DETAILS['WINDOW_SIZE'])\nFMIN = 0\nFMAX = EXPERIMENT_DETAILS['SAMPLE_RATE'] / 2\nHOP_SIZE = EXPERIMENT_DETAILS['WINDOW_SIZE'] -\\\n round(EXPERIMENT_DETAILS['WINDOW_SIZE'] * (EXPERIMENT_DETAILS['OVERLAP'] / 100))\n\nif EXPERIMENT_DETAILS['FEATURE_EXP'] == 'text':\n FEATURE_FOLDERS = None\nelse:\n FEATURE_FOLDERS = ['audio_data', 'logmel']\nEXP_FOLDERS = ['log', 'model', 'condor_logs']\n\nif EXPERIMENT_DETAILS['AUDIO_MODE_IS_CONCAT_NOT_SHORTEN']:\n extension = 'concat'\nelse:\n extension = 'shorten'\nif EXPERIMENT_DETAILS['MAKE_DATASET_EQUAL']:\n data_eq = '_equalSet'\nelse:\n data_eq = ''\nif EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND']:\n bkgnd = '_bkgnd'\nelse:\n bkgnd = ''\n\nif EXPERIMENT_DETAILS['FEATURE_EXP'] == 'logmel' or EXPERIMENT_DETAILS[\n 'FEATURE_EXP'] == 'MFCC' or EXPERIMENT_DETAILS['FEATURE_EXP'] == \\\n 'MFCC_concat':\n if EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND']:\n FOLDER_NAME = f\"BKGND_{EXPERIMENT_DETAILS['FEATURE_EXP']}\" \\\n f\"_{str(EXPERIMENT_DETAILS['FREQ_BINS'])}_exp\"\n elif not EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND'] and \\\n EXPERIMENT_DETAILS['REMOVE_BACKGROUND']:\n if EXPERIMENT_DETAILS['WHOLE_TRAIN']:\n FOLDER_NAME = f\"{EXPERIMENT_DETAILS['FEATURE_EXP']}\" \\\n f\"_{str(EXPERIMENT_DETAILS['FREQ_BINS'])}_WIN_\" \\\n f\"{str(EXPERIMENT_DETAILS['WINDOW_SIZE'])}_OVERLAP_\" \\\n f\"{str(EXPERIMENT_DETAILS['OVERLAP'])}_WHOLE_expq\"\n else:\n FOLDER_NAME = f\"{EXPERIMENT_DETAILS['FEATURE_EXP']}\" \\\n f\"_{str(EXPERIMENT_DETAILS['FREQ_BINS'])}_WIN_\" \\\n f\"{str(EXPERIMENT_DETAILS['WINDOW_SIZE'])}_OVERLAP_{str(EXPERIMENT_DETAILS['OVERLAP'])}_exp\"\n elif not EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND'] and not \\\n EXPERIMENT_DETAILS['REMOVE_BACKGROUND']:\n FOLDER_NAME = f\"{EXPERIMENT_DETAILS['FEATURE_EXP']}\" \\\n f\"_{str(EXPERIMENT_DETAILS['MEL_BINS'])}_with_backgnd_exp\"\nelif EXPERIMENT_DETAILS['FEATURE_EXP'] == 'raw':\n if EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND']:\n FOLDER_NAME = f\"BKGND_{EXPERIMENT_DETAILS['FEATURE_EXP']}_exp\"\n elif not EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND'] and \\\n EXPERIMENT_DETAILS['REMOVE_BACKGROUND']:\n FOLDER_NAME = f\"{EXPERIMENT_DETAILS['FEATURE_EXP']}_exp\"\n elif not EXPERIMENT_DETAILS['DATASET_IS_BACKGROUND'] and not \\\n EXPERIMENT_DETAILS['REMOVE_BACKGROUND']:\n FOLDER_NAME = f\"{EXPERIMENT_DETAILS['FEATURE_EXP']}_with_backgnd_exp\"\nelse:\n FOLDER_NAME = f\"{EXPERIMENT_DETAILS['FEATURE_EXP']}_exp\"\nEXP_NAME = f\"{extension}{data_eq}{bkgnd}\"\n\nif sys.platform == 'win32':\n DATASET = os.path.join('C:', '\\\\Users', 'Kacper', 'Desktop', 'Praca_mgr', 'baza_danych') # here is database folder with all zip files\n WORKSPACE = os.path.join('C:', '\\\\Users', 'Kacper', 'Desktop', 'Praca_mgr', 'Python', 'daic_woz_2') # here is folder to store output of this program\n TRAIN_SPLIT_PATH = os.path.join(DATASET, 'train_split_Depression_AVEC2017.csv')\n DEV_SPLIT_PATH = os.path.join(DATASET, 'dev_split_Depression_AVEC2017.csv')\n TEST_SPLIT_PATH = os.path.join(DATASET, 'test_split_Depression_AVEC2017.csv')\n FULL_TRAIN_SPLIT_PATH = os.path.join(DATASET, 'full_train_split_Depression_AVEC2017.csv')\n COMP_DATASET_PATH = os.path.join(DATASET, 'complete_Depression_AVEC2017.csv')\nelif sys.platform == 'linux' and not os.uname()[1] == 'andrew-ubuntu':\n DATASET = os.path.join('/mnt/c/Users/Kacper/Desktop/Praca_mgr/baza_danych') # here is database folder with all zip files in Ubuntu transcription\n # set the path of the workspace (where the code is)\n WORKSPACE_FILES_DIR = \\\n '/mnt/c/Users/Kacper/Desktop/Praca_mgr/Python/daic_woz_process' # here is folder with pycharm project in Ubuntu transcription\n # set the path of the workspace (where the models/output will be stored)\n WORKSPACE_MAIN_DIR = '/mnt/c/Users/Kacper/Desktop/Praca_mgr/Python/daic_woz_2' # here is folder to store output of this program in Ubuntu transcription\n TRAIN_SPLIT_PATH = os.path.join(DATASET, 'train_split_Depression_AVEC2017.csv')\n DEV_SPLIT_PATH = os.path.join(DATASET, 'dev_split_Depression_AVEC2017.csv')\n TEST_SPLIT_PATH = os.path.join(DATASET, 'test_split_Depression_AVEC2017.csv')\n FULL_TRAIN_SPLIT_PATH = os.path.join(DATASET, 'full_train_split_Depression_AVEC2017.csv')\n COMP_DATASET_PATH = os.path.join(DATASET, 'complete_Depression_AVEC2017.csv')\nelif os.uname()[1] == 'andrew-ubuntu':\n DATASET = '/mnt/6663b3e6-a12f-49e8-b881-421cebf2f8c6/datasets/DAIC-WOZ'\n WORKSPACE_MAIN_DIR = '/mnt/6663b3e6-a12f-49e8-b881-421cebf2f8c6/daic_woz_2'\n WORKSPACE_FILES_DIR = os.path.join('/home', 'andrew', 'PycharmProjects',\n 'daic_woz_process')\n TRAIN_SPLIT_PATH = os.path.join(DATASET, 'train_split_Depression_AVEC2017.csv')\n DEV_SPLIT_PATH = os.path.join(DATASET, 'dev_split_Depression_AVEC2017.csv')\n TEST_SPLIT_PATH = os.path.join(DATASET, 'test_split_Depression_AVEC2017.csv')\n FULL_TRAIN_SPLIT_PATH = os.path.join(DATASET, 'full_train_split_Depression_AVEC2017.csv')\n COMP_DATASET_PATH = os.path.join(DATASET, 'complete_Depression_AVEC2017.csv')\nelse:\n DATASET = os.path.join('Users', 'andrewbailey', 'OneDrive', 'DAIC-WOZ')\n WORKSPACE = os.path.join('Users', 'andrewbailey', 'OneDrive', 'Coding', 'PycharmProjects', 'daic_woz_2')\n TRAIN_SPLIT_PATH = os.path.join('Users', 'andrewbailey', 'OneDrive', 'DAIC-WOZ', 'train_split_Depression_AVEC2017.csv')\n DEV_SPLIT_PATH = os.path.join('Users', 'andrewbailey', 'OneDrive', 'DAIC-WOZ', 'dev_split_Depression_AVEC2017.csv')\n TEST_SPLIT_PATH = os.path.join('Users', 'andrewbailey', 'OneDrive', 'DAIC-WOZ', 'test_split_Depression_AVEC2017.csv')\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538953356","text":"def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n\t\t\ndef compinatorics(n,r):\n p=1\n for i in range(n-r+1,n+1):\n p*=i\t\n return p//factorial(r)\n\t\ns=0\nfor i in range(23,101):\n for j in range(2,i//2+1):\n if compinatorics(i,j)>1000000 :\n s+=i-2*j+1\n break\n\nprint(s)\n","sub_path":"prob53.py","file_name":"prob53.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"187828053","text":"#######Seperated the data into three train sets(Blog, JabRef, and My Volts) using Organization ID and running different models.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n################----------------------------###################################\r\n####MY-VOLTS########################\r\n\r\ntrain = pd.read_csv('tcdml1920-rec-click-pred--training.csv')\r\ntest = pd.read_csv(\"tcdml1920-rec-click-pred--test.csv\")\r\n\r\n# Myvolts dataset\r\ntrain_myVolts = train[train['organization_id'] == 4]\r\ntrain_jabRef = train[train['organization_id'] == 1]\r\ntrain_theBlog = train[train['organization_id'] == 8]\r\ntrain_theBlog\r\n\r\ntest_myVolts = test[test['organization_id'] == 4]\r\ntest_jabRef = test[test['organization_id'] == 1]\r\n\r\ntest_theBlog = test[test['organization_id'] == 8]\r\n\r\ntrain_myVolts[(train_myVolts['query_char_count']=='\\\\N') & (train_myVolts['set_clicked']==1)].describe()\r\n\r\ny_col = ['set_clicked']\r\n# rec_processing_time if it is too large the user may close the page and leave. This only removes 113 records.\r\ntrain_myVolts = train_myVolts[train_myVolts['rec_processing_time']<9.2]\r\n\r\n# import pandas_profiling \r\n# train_myVolts[feature_cols].profile_report()\r\nfeature_cols_to_drop = ['app_lang','app_version','application_type','clicks',\r\n 'ctr','document_language_provided','first_author_id','local_hour_of_request'\r\n ,'local_time_of_request','num_pubs_by_first_author','number_of_authors', 'organization_id'\r\n ,'query_document_id','rec_processing_time','recommendation_set_id','session_id'\r\n ,'time_recs_displayed','time_recs_recieved','time_recs_viewed','timezone_by_ip','user_id'\r\n ,'user_java_version','user_os','user_os_version','user_timezone','year_published'\r\n ,'response_delivered','number_of_recs_in_set']\r\nprint('columns',train_myVolts.columns)\r\nfeature_cols=train_myVolts.columns.drop('set_clicked').drop(feature_cols_to_drop)\r\n\r\n\r\nprint('Values with \\\\N Train',train_myVolts[feature_cols].isnull().sum())\r\nprint(len(train_myVolts[feature_cols].columns)) \r\ntrain_myVolts[train_myVolts[feature_cols]=='\\\\N']= np.nan\r\nprint('Values with NANs Train',train_myVolts[feature_cols].isnull().sum())\r\nfrom sklearn.impute import SimpleImputer\r\nFillingbyMedian=SimpleImputer(strategy='median')\r\nFillingbyMean=SimpleImputer(strategy='mean')\r\n\r\ntrain_myVolts['query_detected_language']=train_myVolts['query_detected_language'].fillna('missing')\r\ntrain_myVolts['abstract_detected_language']=train_myVolts['abstract_detected_language'].fillna('missing')\r\n\r\ntrain_myVolts['query_char_count']=FillingbyMean.fit_transform(train_myVolts['query_char_count'].values.reshape(-1,1))\r\ntrain_myVolts['query_word_count'] = FillingbyMedian.fit_transform(train_myVolts['query_word_count'].values.reshape(-1,1))\r\ntrain_myVolts['abstract_word_count'] = FillingbyMedian.fit_transform(train_myVolts['abstract_word_count'].values.reshape(-1,1))\r\ntrain_myVolts['abstract_char_count'] = FillingbyMedian.fit_transform(train_myVolts['abstract_char_count'].values.reshape(-1,1))\r\n\r\nmissing_value = train_myVolts[(train_myVolts['hour_request_received']== 23.0) & (train_myVolts['number_of_recs_in_set']==7)].query_char_count.mean()\r\nprint('missing value',missing_value)\r\ntrain_myVolts[train_myVolts['recommendation_set_id']==311406.0].query_char_count =missing_value\r\n\r\n\r\ntest_myVolts['query_char_count']=FillingbyMean.fit_transform(test_myVolts['query_char_count'].values.reshape(-1,1))\r\n\r\nall_data_myVolts = pd.concat([train_myVolts,test_myVolts],ignore_index=True)\r\ntrain_myVolts.query_identifier.head()\r\n\r\n# IMPORTANT Predict missing value based on other columns!!\r\nall_data_myVolts['query_char_count'].describe()\r\nall_data_myVolts.groupby(\"item_type\")['query_char_count'].mean().sort_values()\r\n\r\ndef item_type_estimator(i):\r\n \"\"\"Grouping item_type feature by query_char_count \"\"\"\r\n a = 0\r\n if i<59:\r\n a = \"TVs & monitors\"\r\n elif i>=59 and i<62:\r\n a = \"Networking\"\r\n elif i>=62 and i<64:\r\n a = \"Home entertainment\"\r\n elif i>=64 and i<66:\r\n a = \"Photo & frames\"\r\n elif i>= 66 and i<67.3:\r\n a = \"Music making & pedals\"\r\n elif i>= 67.3 and i<68:\r\n a = \"Everything else\"\r\n elif i>=68 and i<68.8:\r\n a = 'DAB & audio'\r\n elif i>=68.8 and i<71.5:\r\n a = 'DVD players'\r\n elif i>=71.5 and i<77.5:\r\n a = 'Gaming & toys'\r\n else:\r\n a = \"Hard drives & NAS\"\r\n return a\r\n\r\nall_data_myVolts.groupby(\"cbf_parser\")['query_char_count'].mean().sort_values()\r\n\r\ndef cbf_parser_estimator(i):\r\n \"\"\"Grouping item_type feature by query_char_count\"\"\"\r\n a = 0\r\n if i<69:\r\n a = \"mlt_QP\"\r\n elif i>=69 and i<70:\r\n a = \"edismax_QP\"\r\n else:\r\n a = \"standard_QP\"\r\n return a\r\n\r\n# train_myVolts['item_type']=train_myVolts['item_type'].fillna('missing')\r\n# train_myVolts['cbf_parser']=train_myVolts['cbf_parser'].fillna('missing')\r\n\r\n##applying cabin estimator function. \r\ntrain_myVolts_Null_item_type= train_myVolts[train_myVolts['item_type'].isnull()]\r\ntrain_myVolts_Not_Null_item_type= train_myVolts[train_myVolts['item_type'].notnull()]\r\ntrain_myVolts_Null_cbf_parser= train_myVolts[train_myVolts['cbf_parser'].isnull()]\r\ntrain_myVolts_Not_Null_cbf_parser= train_myVolts[train_myVolts['cbf_parser'].notnull()]\r\n\r\ntrain_myVolts_Null_item_type['item_type']=train_myVolts_Null_item_type.query_char_count.apply(lambda x: item_type_estimator(x))\r\ntrain_myVolts_Null_cbf_parser['cbf_parser']=train_myVolts_Null_cbf_parser.query_char_count.apply(lambda x: cbf_parser_estimator(x))\r\n\r\n\r\ntrain_data_1=pd.concat([train_myVolts_Null_item_type, train_myVolts_Not_Null_item_type], axis=0)\r\ntrain_data_2=pd.concat([train_myVolts_Null_cbf_parser, train_myVolts_Not_Null_cbf_parser], axis=0)\r\n\r\ntrain_myVolts['item_type']=train_data_1['item_type']\r\ntrain_myVolts['cbf_parser']=train_data_2['cbf_parser']\r\ntrain_myVolts['country_by_ip']=train_myVolts['country_by_ip'].fillna('missing')\r\nprint('Values with NANs Train',train_myVolts[feature_cols].isnull().sum())\r\n\r\ny = train_myVolts.set_clicked\r\nX = train_myVolts[feature_cols]\r\nfrom category_encoders import TargetEncoder\r\ntarget_encode = TargetEncoder()\r\ntarget_encode.fit(X, y)\r\nX = target_encode.transform(X)\r\n\r\nfrom sklearn.model_selection import train_test_split \r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=1234)\r\n\r\n##check\r\nX_train.to_csv('X_train4.csv',index=False)\r\ny_train.to_csv('y_train4.csv',index=False)\r\n\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\n#from sklearn.ensemble import RandomForestClassifier\r\nfrom xgboost import XGBClassifier\r\nlogreg1 =LogisticRegression ()\r\n# logreg1 = RandomForestClassifier(n_estimators=500)\r\n# for random forest is 0.9920 but logistic is 0.9922\r\nlogreg2 = XGBClassifier()\r\n\r\n# fit model\r\nlogreg1.fit(X_train, y_train)\r\nlogreg2.fit(X_train, y_train)\r\n\r\n# make class predictions for the testing set\r\ny_pred_class1 = logreg1.predict(X_test) #pred1\r\ny_pred_class2 = logreg2.predict(X_test) #pred2\r\n#np.sqrt(metrics.mean_squared_error(y_test, y_pred_class))\r\n\r\n# calculate accuracy\r\nfrom sklearn import metrics\r\nprint('Accuracy Score1',metrics.accuracy_score(y_test, y_pred_class1))\r\nprint('Accuracy Score2',metrics.accuracy_score(y_test, y_pred_class2))\r\n# this produces a 2x2 numpy array (matrix)\r\nprint('Confusion Matrix1',metrics.confusion_matrix(y_test, y_pred_class1))\r\nprint('Confusion Matrix2',metrics.confusion_matrix(y_test, y_pred_class2))\r\n\r\n\r\nprint('Values with \\\\N Test',test_myVolts.isnull().sum())\r\ntest_myVolts[test_myVolts=='\\\\N']= np.nan\r\nprint('Values with NANs Train',train_myVolts.isnull().sum())\r\n\r\ntest_myVolts['query_detected_language']=test_myVolts['query_detected_language'].fillna('missing')\r\ntest_myVolts['abstract_detected_language']=test_myVolts['abstract_detected_language'].fillna('missing')\r\n\r\n#test_myVolts['item_type']=test_myVolts['item_type'].fillna('missing')\r\n#test_myVolts['cbf_parser']=test_myVolts['cbf_parser'].fillna('missing')\r\n\r\n##applying cabin estimator function. \r\ntest_myVolts_Null_item_type= test_myVolts[test_myVolts['item_type'].isnull()]\r\ntest_myVolts_Not_Null_item_type= test_myVolts[test_myVolts['item_type'].notnull()]\r\ntest_myVolts_Null_cbf_parser= test_myVolts[test_myVolts['cbf_parser'].isnull()]\r\ntest_myVolts_Not_Null_cbf_parser= test_myVolts[test_myVolts['cbf_parser'].notnull()]\r\n\r\n\r\ntest_myVolts_Null_item_type['item_type']=test_myVolts_Null_item_type.query_char_count.apply(lambda x: item_type_estimator(x))\r\ntest_myVolts_Null_cbf_parser['cbf_parser']=test_myVolts_Null_cbf_parser.query_char_count.apply(lambda x: cbf_parser_estimator(x))\r\ntest_data_1=pd.concat([test_myVolts_Null_item_type, test_myVolts_Not_Null_item_type], axis=0)\r\ntest_data_2=pd.concat([test_myVolts_Null_cbf_parser, test_myVolts_Not_Null_cbf_parser], axis=0)\r\n\r\ntest_myVolts['item_type']=test_data_1['item_type']\r\ntest_myVolts['cbf_parser']=test_data_2['cbf_parser']\r\n\r\n\r\ntest_myVolts['country_by_ip']=test_myVolts['country_by_ip'].fillna('missing')\r\n\r\ntest_myVolts['query_char_count']=FillingbyMean.fit_transform(test_myVolts['query_char_count'].values.reshape(-1,1))\r\ntest_myVolts['query_word_count'] = FillingbyMedian.fit_transform(test_myVolts['query_word_count'].values.reshape(-1,1))\r\n\r\ntest_myVolts['abstract_word_count'] = FillingbyMedian.fit_transform(test_myVolts['abstract_word_count'].values.reshape(-1,1))\r\ntest_myVolts['abstract_char_count'] = FillingbyMedian.fit_transform(test_myVolts['abstract_char_count'].values.reshape(-1,1))\r\n\r\n\r\n# split X and y into training and testing sets\r\n################################################\r\nE = test_myVolts[feature_cols]\r\nE = target_encode.transform(E)\r\npredicted_value1 = logreg1.predict(E)\r\npredicted_value2 = logreg2.predict(E)\r\n\r\nfinal_predValue= predicted_value1\r\n# stacked_predictions = np.column_stack((y_pred_class1,y_pred_class2))\r\n# stacked_test_predictions = np.column_stack((predicted_value1,B2))\r\n\r\n# meta_model = LogisticRegression()\r\n# meta_model.fit(stacked_predictions,y_test)\r\n# B = meta_model.predict(stacked_test_predictions) #Final_prediction\r\nE.head()\r\n\r\ndfVolts=pd.DataFrame()\r\ndfVolts['recommendation_set_id'] = test_myVolts['recommendation_set_id']\r\ndfVolts['set_clicked'] = final_predValue\r\n\r\ndfVolts.to_csv('ML_Assignment2_My_Volts_20191116.csv',index=False)\r\n\r\ndfVolts[dfVolts['set_clicked']==1].sum()\r\n\r\n\r\n###############-----------------------------###################################\r\n##################JABREF#######################################################\r\n\r\n#####Reading Trainf and Test Data\r\ntrain_df = pd.read_csv('tcdml1920-rec-click-pred--training.csv')\r\ntest_df = pd.read_csv(\"tcdml1920-rec-click-pred--test.csv\")\r\n\r\n###filtering JabRef\r\ntrain_df_jabRef = train_df[train_df['organization_id'] == 1]\r\ntest_df_jabRef = test_df[test_df['organization_id'] == 1]\r\n\r\n##replacing \\N values with NAN\r\n\r\ntrain_df_jabRef = train_df_jabRef.replace(\"\\\\N\",np.nan)\r\n\r\ntest_df_jabRef = test_df_jabRef.replace(\"\\\\N\",np.nan)\r\n##########Removing columns with more than 50 percent NA values\r\n\r\npct_null = train_df_jabRef.isnull().sum() / len(train_df_jabRef)\r\nmissing_features = pct_null[pct_null > 0.50].index\r\ntrain_df_jabRef.drop(missing_features, axis=1, inplace=True)\r\n\r\npct_null = test_df_jabRef.isnull().sum() / len(test_df_jabRef)\r\nmissing_features = pct_null[pct_null > 0.50].index\r\ntest_df_jabRef.drop(missing_features, axis=1, inplace=True)\r\n\r\n######Removing more columns from test data\r\ntest_df_jabRef = test_df_jabRef.drop(['time_recs_recieved','time_recs_displayed','time_recs_viewed'],axis=1)\r\ntest_df_jabRef.insert(27, \"set_clicked\",np.nan) \r\n\r\n####################\r\ntrain_df_jabRef.describe()\r\ntrain_df_jabRef.info()\r\n\r\n##Finding Correlation\r\nprint(train_df_jabRef.corr(method ='pearson'))\r\n\r\n######Removing more Train and Test Columns\r\n\r\ntrain_df_jabRef = train_df_jabRef.drop(['item_type','application_type','query_identifier','request_received','response_delivered','rec_processing_time','timezone_by_ip','local_time_of_request','number_of_recs_in_set'],axis=1)\r\ntest_df_jabRef = test_df_jabRef.drop(['item_type','application_type','query_identifier','request_received','response_delivered','rec_processing_time','timezone_by_ip','local_time_of_request','number_of_recs_in_set'],axis=1)\r\n\r\n######Fillling categorical NA values Train Data\r\ntrain_df_jabRef[\"query_detected_language\"] = train_df_jabRef[\"query_detected_language\"].fillna(method='ffill')\r\ntrain_df_jabRef[\"app_lang\"] = train_df_jabRef[\"app_lang\"].fillna(method='ffill')\r\ntrain_df_jabRef[\"country_by_ip\"] = train_df_jabRef[\"country_by_ip\"].fillna(method='ffill')\r\n\r\n######Fillling categorical NA values Test Data\r\ntest_df_jabRef[\"query_detected_language\"] = test_df_jabRef[\"query_detected_language\"].fillna(method='ffill')\r\ntest_df_jabRef[\"app_lang\"] = test_df_jabRef[\"app_lang\"].fillna(method='ffill')\r\ntest_df_jabRef[\"country_by_ip\"] = test_df_jabRef[\"country_by_ip\"].fillna(method='ffill')\r\n\r\n######Fillling numerical NA values Train Data\r\ntrain_df_jabRef[\"local_hour_of_request\"] = train_df_jabRef[\"local_hour_of_request\"].fillna(train_df_jabRef[\"local_hour_of_request\"].median())\r\ntrain_df_jabRef[\"recommendation_algorithm_id_used\"] = train_df_jabRef[\"recommendation_algorithm_id_used\"].fillna(train_df_jabRef[\"recommendation_algorithm_id_used\"].median())\r\ntrain_df_jabRef[\"app_version\"] = train_df_jabRef['app_version'].replace('*unknown*','NA')\r\ntrain_df_jabRef[\"app_version\"] = train_df_jabRef['app_version'].replace(np.nan,'NA')\r\n\r\n######Fillling numerical NA values Test Data\r\ntest_df_jabRef[\"local_hour_of_request\"] = test_df_jabRef[\"local_hour_of_request\"].fillna(test_df_jabRef[\"local_hour_of_request\"].median())\r\ntest_df_jabRef[\"recommendation_algorithm_id_used\"] = test_df_jabRef[\"recommendation_algorithm_id_used\"].fillna(test_df_jabRef[\"recommendation_algorithm_id_used\"].median())\r\ntest_df_jabRef[\"app_version\"] = test_df_jabRef['app_version'].replace('*unknown*','NA')\r\ntest_df_jabRef[\"app_version\"] = test_df_jabRef['app_version'].replace(np.nan,'NA')\r\n\r\nFillingbyMean=SimpleImputer(strategy='mean')\r\ntest_df_jabRef['query_char_count']=FillingbyMean.fit_transform(test_df_jabRef['query_char_count'].values.reshape(-1,1))\r\n\r\ntrain_df_jabRef['query_char_count']=FillingbyMean.fit_transform(train_df_jabRef['query_char_count'].values.reshape(-1,1))\r\n\r\nall_data = pd.concat([train_df_jabRef,test_df_jabRef],ignore_index=True)\r\n\r\npredict1 = all_data.copy()\r\npredict1 = predict1.drop(['ctr'],axis=1)\r\n\r\npredict1.groupby(\"cbf_parser\")['query_char_count'].mean().sort_values()\r\npredict1_cbfparser_null= predict1[predict1['cbf_parser'].isnull()]\r\n\r\ndef cbf_parser_estimator_jab(i):\r\n \"\"\"Grouping item_type feature by the first letter\"\"\"\r\n a = 0\r\n if i<62:\r\n a = \"mlt_QP\"\r\n elif i>=62 and i<66.5:\r\n a = \"edismax_QP\"\r\n else:\r\n a = \"standard_QP\"\r\n return a\r\n\r\npredict1_cbfparser_null['cbf_parser']=predict1_cbfparser_null.query_char_count.apply(lambda x: cbf_parser_estimator_jab(x))\r\npredict1_cbfparser_Not_null= predict1[predict1['cbf_parser'].notnull()]\r\nx = pd.concat([predict1_cbfparser_Not_null, predict1_cbfparser_null], axis=0)\r\npredict1['cbf_parser'] = x['cbf_parser']\r\n\r\n#####Categorical Encoding\r\npredict1 = predict1.replace('unknown','NA')\r\npredict1['algorithm_class'] = predict1['algorithm_class'].replace(np.nan,'NA')\r\npredict1['cbf_parser'] = predict1['cbf_parser'].replace(np.nan,'NA')\r\n\r\npredict2 = predict1.copy()\r\npredict2 = pd.get_dummies(predict2, columns=['query_detected_language','cbf_parser','algorithm_class', 'app_version', 'app_lang', 'country_by_ip'])\r\n\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nlb_style = LabelBinarizer()\r\npredict2[\"search_title\"] = lb_style.fit_transform(predict2[\"search_title\"])\r\npredict2[\"search_keywords\"] = lb_style.fit_transform(predict2[\"search_keywords\"])\r\npredict2[\"search_abstract\"] = lb_style.fit_transform(predict2['search_abstract'])\r\n\r\nTest_JabRef_df = predict2[predict2['clicks'] == 'nA']\r\ntrain_jabRef_df = predict2[predict2['clicks'] != 'nA']\r\n\r\ntrain_jabRef_df = train_jabRef_df.drop(['clicks'],axis=1)\r\nTest_JabRef_df = Test_JabRef_df.drop(['clicks'],axis=1)\r\n\r\n######Model Predict\r\nX = np.array(train_jabRef_df.drop('set_clicked',axis=1))\r\ny = np.array(train_jabRef_df['set_clicked'])\r\nfrom sklearn.model_selection import KFold, train_test_split\r\nkfold = KFold(n_splits=100, shuffle=True, random_state=42)\r\nscores = []\r\nfor train_index, test_index in kfold.split(train_jabRef_df): \r\n X_train, X_test = X[train_index], X[test_index]\r\n y_train,y_test = y[train_index],y[test_index]\r\n \r\nfrom sklearn.tree import DecisionTreeClassifier\r\nclf = DecisionTreeClassifier(criterion=\"entropy\",max_depth=32)\r\nclf.fit(X_train, y_train)\r\n# make class predictions for the testing set\r\ny_pred_class_co = clf.predict(X_test)\r\n#np.sqrt(metrics.mean_squared_error(y_test, y_pred_class))\r\n# calculate accuracy\r\nfrom sklearn import metrics\r\nprint('Accuracy Score',metrics.accuracy_score(y_test, y_pred_class_co))\r\n\r\n##########Implementing Model on Test Data\r\nTest_JabRef_df.drop('set_clicked',axis=1,inplace=True)\r\ny_pred_read = clf.predict(Test_JabRef_df)\r\n\r\ndfJabref=pd.DataFrame()\r\ndfJabref['recommendation_set_id'] = Test_JabRef_df['recommendation_set_id']\r\ndfJabref['set_clicked'] = y_pred_read\r\n\r\ndfJabref.to_csv('JabRefPrediction.csv',index=False)\r\n\r\n#predict_tests = pd.read_csv(\"tcdml1920-rec-click-pred--test.csv\")\r\n#predict_tests['set_clicked'] = pd.DataFrame(y_pred_read).iloc[:,-1]\r\n#pd.DataFrame(y_pred_read).to_csv(\"JabRefPrediction.csv\", index = False)\r\n\r\n################----------------------------###################################\r\n#######BLOG######################\r\n\r\nfrom category_encoders import TargetEncoder\r\n#from sklearn.ensemble import RandomForestClassifier\r\nfrom xgboost import XGBClassifier\r\n\r\n#train = pd.read_csv(\"tcdml1920-rec-click-pred--training.csv\")\r\n#test = pd.read_csv(\"tcdml1920-rec-click-pred--test.csv\")\r\n\r\ntrain_theBlog = train[train['organization_id'] == 8]\r\n\r\nprint(len(train_theBlog.columns))\r\ntrain_theBlog = train_theBlog.drop([\"user_id\", \"session_id\", \"document_language_provided\",\"year_published\",\"number_of_authors\",\"first_author_id\",\"num_pubs_by_first_author\",\"app_version\",\"app_lang\",\"user_os\",\"user_os_version\",\"user_java_version\",\"user_timezone\"], axis=1)\r\ntrain_theBlog = train_theBlog.drop([\"response_delivered\", \"rec_processing_time\", \"number_of_recs_in_set\", \"time_recs_recieved\", \"time_recs_displayed\", \"time_recs_viewed\", \"clicks\",\"ctr\"], axis=1)\r\nprint('Values with \\\\N Train',train_theBlog.isnull().sum())\r\n\r\nprint(len(train_theBlog.columns))\r\n\r\n \r\ntrain_theBlog[train_theBlog=='\\\\N']= np.nan\r\n\r\nprint('Values with NANs Train',train_theBlog.isnull().sum())\r\n\r\nfrom sklearn.impute import SimpleImputer\r\nFillingbyMedian=SimpleImputer(strategy='median')\r\n\r\nFillingbyMean=SimpleImputer(strategy='mean')\r\n\r\nFillingbyMode=SimpleImputer(strategy='most_frequent')\r\n\r\n###############################################################################\r\ntrain_theBlog['query_detected_language']=train_theBlog['query_detected_language'].fillna('missing')\r\ntrain_theBlog['abstract_detected_language']=train_theBlog['abstract_detected_language'].fillna('missing')\r\n\r\ntrain_theBlog['item_type']=train_theBlog['item_type'].fillna('missing')\r\ntrain_theBlog['cbf_parser']=train_theBlog['cbf_parser'].fillna('missing')\r\n\r\n\r\ntrain_theBlog['query_char_count']=FillingbyMean.fit_transform(train_theBlog['query_char_count'].values.reshape(-1,1))\r\ntrain_theBlog['query_document_id'] = FillingbyMedian.fit_transform(train_theBlog['query_document_id'].values.reshape(-1,1))\r\ntrain_theBlog['abstract_word_count'] = FillingbyMode.fit_transform(train_theBlog['abstract_word_count'].values.reshape(-1,1))\r\ntrain_theBlog['abstract_char_count'] = FillingbyMode.fit_transform(train_theBlog['abstract_char_count'].values.reshape(-1,1))\r\ntrain_theBlog['timezone_by_ip'] = FillingbyMedian.fit_transform(train_theBlog['timezone_by_ip'].values.reshape(-1,1))\r\n\r\ny = train_theBlog.set_clicked\r\n\r\ntrain_theBlog = train_theBlog.drop('set_clicked',axis=1)\r\n# define X and y\r\nfeature_cols = train_theBlog.columns\r\n\r\n# X is a matrix, hence we use [] to access the features we want in feature_cols\r\nX = train_theBlog[feature_cols]\r\n\r\n################################################\r\ntarget_encode = TargetEncoder()\r\ntarget_encode.fit(X, y)\r\nX = target_encode.transform(X)\r\n################################################\r\nfrom sklearn.model_selection import train_test_split \r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,random_state=1234)\r\n\r\n#from sklearn.linear_model import LogisticRegression\r\n\r\n# instantiate model\r\n#logreg = LogisticRegression()\r\nlogreg = XGBClassifier()\r\n\r\n# fit model\r\nlogreg.fit(X_train, y_train)\r\n\r\n# make class predictions for the testing set\r\ny_pred_class = logreg.predict(X_test)\r\n#np.sqrt(metrics.mean_squared_error(y_test, y_pred_class))\r\n\r\n# calculate accuracy\r\nfrom sklearn import metrics\r\nprint('Accuracy Score',metrics.accuracy_score(y_test, y_pred_class))\r\n\r\n# IMPORTANT: first argument is true values, second argument is predicted values\r\n# this produces a 2x2 numpy array (matrix)\r\nprint('Confusion Matrix',metrics.confusion_matrix(y_test, y_pred_class))\r\n\r\n#Predicting on Testing Data\r\ntest_theBlog = test[test['organization_id'] == 8]\r\ntest_theBlog = test_theBlog.drop([\"user_id\", \"session_id\", \"document_language_provided\",\"year_published\",\"number_of_authors\",\"first_author_id\",\"num_pubs_by_first_author\",\"app_version\",\"app_lang\",\"user_os\",\"user_os_version\",\"user_java_version\",\"user_timezone\"], axis=1)\r\ntest_theBlog = test_theBlog.drop([\"response_delivered\", \"rec_processing_time\", \"number_of_recs_in_set\", \"time_recs_recieved\", \"time_recs_displayed\", \"time_recs_viewed\", \"clicks\",\"ctr\"], axis=1)\r\n\r\nprint('Values with \\\\N Test',test_theBlog.isnull().sum())\r\ntest_theBlog[test_theBlog=='\\\\N']= np.nan\r\nprint('Values with NANs Train',train_theBlog.isnull().sum())\r\n###############################################################################\r\ntest_theBlog['query_detected_language']=test_theBlog['query_detected_language'].fillna('missing')\r\ntest_theBlog['abstract_detected_language']=test_theBlog['abstract_detected_language'].fillna('missing')\r\n\r\ntest_theBlog['item_type']=test_theBlog['item_type'].fillna('missing')\r\ntest_theBlog['cbf_parser']=test_theBlog['cbf_parser'].fillna('missing')\r\n\r\n\r\ntest_theBlog['query_char_count']=FillingbyMean.fit_transform(test_theBlog['query_char_count'].values.reshape(-1,1))\r\ntest_theBlog['query_document_id'] = FillingbyMedian.fit_transform(test_theBlog['query_document_id'].values.reshape(-1,1))\r\ntest_theBlog['abstract_word_count'] = FillingbyMode.fit_transform(test_theBlog['abstract_word_count'].values.reshape(-1,1))\r\ntest_theBlog['abstract_char_count'] = FillingbyMode.fit_transform(test_theBlog['abstract_char_count'].values.reshape(-1,1))\r\ntest_theBlog['timezone_by_ip'] = FillingbyMedian.fit_transform(test_theBlog['timezone_by_ip'].values.reshape(-1,1))\r\n\r\ntest_theBlog = test_theBlog.drop('set_clicked',axis=1)\r\ntest_theBlog_backup = test_theBlog\r\n# define X and y\r\nfeature_cols = test_theBlog.columns\r\nprint(\"feature_cols\",feature_cols)\r\n\r\n\r\n\r\n# X is a matrix, hence we use [] to access the features we want in feature_cols\r\ntest_theBlog = test_theBlog[feature_cols]\r\n\r\n# split X and y into training and testing sets\r\n################################################\r\nE = test_theBlog\r\nE = target_encode.transform(E)\r\n\r\npredicted_value = logreg.predict(E)\r\n\r\ndfBlog=pd.DataFrame()\r\ndfBlog['recommendation_set_id'] = test_theBlog_backup['recommendation_set_id']\r\ndfBlog['set_clicked'] = predicted_value\r\n\r\ndfBlog.to_csv('ML_Assignment2_Recommender_SystemXGboost.csv',index=False)\r\n\r\n################----------------------------###################################\r\n#Code to combine the all three content providers\r\npred_myVolts = pd.read_csv('ML_Assignment2_My_Volts_20191116.csv')\r\npred_jabRef = pd.read_csv('JabRefPrediction.csv')\r\npred_theBlog = pd.read_csv('ML_Assignment2_Recommender_SystemXGboost.csv')\r\n\r\npred = pd.concat([pred_myVolts,pred_jabRef, pred_theBlog],ignore_index=True)\r\npred=pred.sort_values(by=['recommendation_set_id'])\r\npred.to_csv(\"tcdml1920-rec-click-pred--submission file.csv\",index=False,float_format='%.f')\r\n'done'","sub_path":"Team29_RecommendorSystems_Machine Learning_Classification Model.py","file_name":"Team29_RecommendorSystems_Machine Learning_Classification Model.py","file_ext":"py","file_size_in_byte":24274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652462883","text":"import wx\nfrom korail7 import Korail\nimport time as t\nfrom datetime import datetime\nimport requests\nimport time as t\nimport threading\nimport os\n\n\nnum_rep = {}\nbase_num = [str(i) for i in range(0,100)]\nnum_rep_dict = {i:'0'+i if int(i) <10 else i for i in base_num}\n\ntrain_dict = {'ktx':100,'itx':101,'무궁화':102}\nuser_list = [['김다혜','1261067799','dngkgkzlzl7*'],\n ['이교운','010-2857-7771','2rydnstm@@']]\nuser_dict = {'김다혜':{'id':'1261067799','pwd':'dngkgkzlzl7*'},\n '이교운':{'id':'010-2857-7771','pwd':'2rydnstm@@'}}\ntime_interval = 1.0\n\n\ndef get_elapsed_time(start,curr):\n gap = curr - start\n hh = int(gap/3600)\n mm = int((gap-3600*int(gap/3600))/60)\n ss = int(gap - 3600*int(gap/3600) -60*int((gap-3600*int(gap/3600))/60))\n return num_rep_dict[str(hh)]+\":\"+num_rep_dict[str(mm)]+\":\"+num_rep_dict[str(ss)]\n\nclass ExamplePanel(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n self.run = True\n self.InitUI()\n \n def InitUI(self,):\n # create some sizers\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n grid = wx.GridBagSizer(hgap=5, vgap=5)\n hSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n # A Button\n self.button =wx.Button(self, label=\"확인\")\n self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)\n\n # A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it\n self.logger = wx.TextCtrl(self, size=(500,300), style=wx.TE_MULTILINE | wx.TE_READONLY)\n\n self.lbl_user = wx.StaticText(self, label=\"사용자\")\n grid.Add(self.lbl_user, pos=(0,0))\n self.edit_user = wx.ComboBox(self,-1,'김다혜', size=(95, -1), choices=[i[0] for i in user_list], style=wx.CB_DROPDOWN)\n grid.Add(self.edit_user, pos=(0,1))\n\n\n # Radio Boxes\n train_list = train_dict.keys()\n self.edit_train_type = wx.RadioBox(self, label=\"What color would you like ?\", pos=(20, 210), choices=train_list, majorDimension=3,\n style=wx.RA_SPECIFY_COLS)\n grid.Add(self.edit_train_type, pos=(1,0), span=(1,2))\n \n self.dep_list = ['서울','대전','울산']\n self.lbl_dep = wx.StaticText(self, label=\"출발지\")\n grid.Add(self.lbl_dep, pos=(2,0))\n self.edit_dep = wx.ComboBox(self,-1,'서울', size=(95, -1), choices=self.dep_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_dep, pos=(2,1))\n \n self.arr_list = ['서울','대전','울산']\n self.lbl_arr = wx.StaticText(self, label=\"도착지\")\n grid.Add(self.lbl_arr, pos=(3,0))\n self.edit_arr = wx.ComboBox(self,-1,'대전', size=(95, -1), choices=self.arr_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_arr, pos=(3,1))\n \n # the combobox Control int(datetime.now().strftime('%Y'))\n self.year_list = [str(i) for i in range(int(datetime.now().strftime('%Y')),int(datetime.now().strftime('%Y'))+2)]\n self.lbl_year = wx.StaticText(self, label=\"출발(년/year)\")\n grid.Add(self.lbl_year, pos=(4,0))\n self.edit_year = wx.ComboBox(self,-1,str(int(datetime.now().strftime('%Y'))), size=(95, -1), choices=self.year_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_year, pos=(4,1))\n \n self.month_list = [str(i) for i in range(1,13)]\n self.lbl_month = wx.StaticText(self, label=\"출발(월/month)\")\n grid.Add(self.lbl_month, pos=(5,0))\n self.edit_month = wx.ComboBox(self,-1,str(int(datetime.now().strftime('%m'))), size=(95, -1), choices=self.month_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_month, pos=(5,1))\n \n self.day_list = [str(i) for i in range(1,32)]\n self.lbl_day = wx.StaticText(self, label=\"출발(일/day)\")\n grid.Add(self.lbl_day, pos=(6,0))\n self.edit_day = wx.ComboBox(self,-1,str(int(datetime.now().strftime('%d'))), size=(95, -1), choices=self.day_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_day, pos=(6,1))\n \n self.hour_list = [str(i) for i in range(1,25)]\n self.lbl_hour = wx.StaticText(self, label=\"출발(시간)\")\n grid.Add(self.lbl_hour, pos=(7,0))\n self.edit_hour = wx.ComboBox(self,-1,str(int(datetime.now().strftime('%H'))),size=(95, -1), choices=self.hour_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_hour, pos=(7,1))\n \n self.min_list =[str(i) for i in range(1,61)]\n self.lbl_min = wx.StaticText(self, label=\"출발(분)\")\n grid.Add(self.lbl_min, pos=(8,0))\n self.edit_min = wx.ComboBox(self,-1,str(int(datetime.now().strftime('%M'))), size=(95, -1), choices=self.min_list, style=wx.CB_DROPDOWN)\n grid.Add(self.edit_min, pos=(8,1))\n \n self.lbl_count = wx.StaticText(self, label=\"출발시간으로 부터 예약하길 원하는 기차 수:\")\n grid.Add(self.lbl_count, pos=(9,0))\n self.edit_count = wx.TextCtrl(self, value=\"1\", size=(140,-1))\n grid.Add(self.edit_count, pos=(9,1))\n self.lbl_count_ex1 = wx.StaticText(self, label=\"(ex. 1로 설정시 출발시간으로 부터 가장 가까운 열차만 예약시도\")\n grid.Add(self.lbl_count_ex1, pos=(10,0))\n self.lbl_count_ex2 = wx.StaticText(self, label=\"(ex. 2로 설정시 출발시간으로 부터 가장 가까운 2개 열차 예약시도\")\n grid.Add(self.lbl_count_ex2, pos=(11,0))\n \n hSizer.Add(grid, 0, wx.ALL, 5)\n hSizer.Add(self.logger)\n mainSizer.Add(hSizer, 0, wx.ALL, 5)\n mainSizer.Add(self.button, 0, wx.CENTER)\n self.SetSizerAndFit(mainSizer)\n\n\n def OnClick(self,event):\n self.logger.AppendText(\"열차: \")\n self.logger.AppendText(\"%s\\n\" %self.edit_train_type.GetStringSelection())\n self.logger.AppendText(\"출발지: \")\n self.logger.AppendText(self.edit_dep.GetValue()+\"\\n\")\n self.logger.AppendText(\"도착지: \")\n self.logger.AppendText(self.edit_arr.GetValue()+\"\\n\")\n self.logger.AppendText(\"출발시간: \")\n self.logger.AppendText(self.edit_year.GetValue()+\"/\"+self.edit_month.GetValue()+\"/\"+self.edit_day.GetValue() \\\n +\" \"+num_rep_dict[self.edit_hour.GetValue()]+\":\"+num_rep_dict[self.edit_min.GetValue()]+\"\\n\")\n self.logger.AppendText(\"예약 가능 기차 수: \")\n self.logger.AppendText(self.edit_count.GetValue()+\"\\n\")\n dep = self.edit_dep.GetValue().encode('utf-8','ignore')\n arr = self.edit_arr.GetValue().encode('utf-8','ignore')\n date = self.edit_year.GetValue()+num_rep_dict[self.edit_month.GetValue()]+num_rep_dict[self.edit_day.GetValue()]\n dep_time = num_rep_dict[self.edit_hour.GetValue()]+num_rep_dict[self.edit_min.GetValue()]+'00'\n train_type = self.edit_train_type.GetStringSelection().encode('utf-8','ignore')\n train_count = range(int(self.edit_count.GetValue()))\n user_id = user_dict[self.edit_user.GetValue().encode('utf-8','ignore')]['id']\n user_pwd = user_dict[self.edit_user.GetValue().encode('utf-8','ignore')]['pwd']\n self.korail = Korail(user_id,user_pwd)\n self.logger.AppendText(\"로그인 성공!\\n\")\n self.logger.AppendText(\"자동예약 시작.\\n\")\n \n thread = threading.Thread(target=self.auto_reserve,args=(dep,arr,date,dep_time,train_count,train_type,user_id,user_pwd,))\n thread.start()\n\n def train_reserve(self,idx,start_time,dep,arr,date,dep_time,train_count,train_type,user_id,user_pwd):\n self.logger.AppendText(str(idx))\n self.logger.AppendText(\"회 시도, \")\n trains = self.korail.search_train(dep, arr, date, dep_time,train_dict[train_type],include_no_seats = True)\n self.logger.AppendText(\"소요시간: \")\n self.logger.AppendText(get_elapsed_time(start_time,t.time())+\"\\n\")\n for j in train_count:\n if trains[j].has_general_seat() == False:\n self.logger.AppendText(\"예약시도\")\n self.logger.AppendText(\"{\"+str(j+1)+\"} ==> \"+str(trains[j])+\"\\n\")\n if idx % 2000 == 0:\n self.korail = Korail(user_id,user_pwd)\n else:\n try:\n self.logger.AppendText(\"기회포착\")\n self.logger.AppendText(\" ==> \")\n seat = self.korail.reserve(trains[j])\n self.logger.AppendText(\"예약성공\")\n self.logger.AppendText(\"\\n\"+str(seat))\n self.run = False\n except Exception:\n self.logger.AppendText(\"실패\")\n self.logger.AppendText(\"\\n\")\n pass\n\n def auto_reserve(self,dep,arr,date,dep_time,train_count,train_type,user_id,user_pwd):\n t.sleep(2.0)\n self.run = True\n start_time = t.time()\n idx = 0\n while self.run:\n t.sleep(time_interval)\n wx.CallAfter(self.train_reserve,idx,start_time,dep,arr,date,dep_time,train_count,train_type,user_id,user_pwd)\n idx+=1\n\n \napp = wx.App(False)\nframe = wx.Frame(None)\npanel = ExamplePanel(frame)\nframe.Fit()\nframe.Show()\napp.MainLoop()","sub_path":"ktx.py","file_name":"ktx.py","file_ext":"py","file_size_in_byte":9205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"318083502","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 PAL Robotics SL.\n# Released under the BSD License.\n#\n# Authors:\n# * Siegfried-A. Gevatter\n\nimport curses\nimport math\n\nimport rospy\nfrom geometry_msgs.msg import Twist\n\n\nclass Velocity(object):\n\n def __init__(self, min_velocity, max_velocity, num_steps):\n assert min_velocity > 0 and max_velocity > 0 and num_steps > 0\n self._min = min_velocity\n self._max = max_velocity\n self._num_steps = num_steps\n if self._num_steps > 1:\n self._step_incr = (max_velocity - min_velocity) / (self._num_steps - 1)\n else:\n # If num_steps is one, we always use the minimum velocity.\n self._step_incr = 0\n\n def __call__(self, value, step):\n \"\"\"\n Takes a value in the range [0, 1] and the step and returns the\n velocity (usually m/s or rad/s).\n \"\"\"\n if step == 0:\n return 0\n\n assert step > 0 and step <= self._num_steps\n max_value = self._min + self._step_incr * (step - 1)\n return value * max_value\n\n\nclass TextWindow():\n\n _screen = None\n _window = None\n _num_lines = None\n\n def __init__(self, stdscr, lines=10):\n self._screen = stdscr\n self._screen.nodelay(True)\n curses.curs_set(0)\n\n self._num_lines = lines\n\n def read_key(self):\n keycode = self._screen.getch()\n return keycode if keycode != -1 else None\n\n def clear(self):\n self._screen.clear()\n\n def write_line(self, lineno, message):\n if lineno < 0 or lineno >= self._num_lines:\n raise ValueError('lineno out of bounds')\n height, width = self._screen.getmaxyx()\n y = (height / self._num_lines) * lineno\n x = 10\n for text in message.split('\\n'):\n text = text.ljust(width)\n self._screen.addstr(y, x, text)\n y += 1\n\n def refresh(self):\n self._screen.refresh()\n\n def beep(self):\n curses.flash()\n\n\nclass KeyTeleop():\n\n _interface = None\n\n _linear = None\n _angular = None\n\n def __init__(self, interface):\n self._interface = interface\n self._pub_cmd = rospy.Publisher('cmd_vel', Twist)\n\n self._hz = rospy.get_param('~hz', 10)\n\n self._num_steps = rospy.get_param('~turbo/steps', 4)\n\n forward_min = rospy.get_param('~turbo/linear_forward_min', 0.05)\n forward_max = rospy.get_param('~turbo/linear_forward_max', 0.3)\n self._forward = Velocity(forward_min, forward_max, self._num_steps)\n\n backward_min = rospy.get_param('~turbo/linear_backward_min', 0.03)\n backward_max = rospy.get_param('~turbo/linear_backward_max', 0.1)\n self._backward = Velocity(backward_min, backward_max, self._num_steps)\n\n angular_min = rospy.get_param('~turbo/angular_min', 0.3)\n angular_max = rospy.get_param('~turbo/angular_max', 0.7)\n self._rotation = Velocity(angular_min, angular_max, self._num_steps)\n\n def run(self):\n self._linear = 0\n self._angular = 0\n\n rate = rospy.Rate(self._hz)\n while True:\n keycode = self._interface.read_key()\n if keycode:\n if self._key_pressed(keycode):\n self._publish()\n else:\n self._publish()\n rate.sleep()\n\n def _get_twist(self, linear, angular):\n twist = Twist()\n if linear >= 0:\n twist.linear.x = self._forward(1.0, linear)\n else:\n twist.linear.x = self._backward(-1.0, -linear)\n twist.angular.z = self._rotation(math.copysign(1, angular), abs(angular))\n return twist\n\n def _key_pressed(self, keycode):\n movement_bindings = {\n curses.KEY_UP: (1, 0),\n curses.KEY_DOWN: (-1, 0),\n curses.KEY_LEFT: (0, 1),\n curses.KEY_RIGHT: (0, -1),\n }\n speed_bindings = {\n ord(' '): (0, 0),\n }\n if keycode in movement_bindings:\n acc = movement_bindings[keycode]\n ok = False\n if acc[0]:\n linear = self._linear + acc[0]\n if abs(linear) <= self._num_steps:\n self._linear = linear\n ok = True\n if acc[1]:\n angular = self._angular + acc[1]\n if abs(angular) <= self._num_steps:\n self._angular = angular\n ok = True\n if not ok:\n self._interface.beep()\n elif keycode in speed_bindings:\n acc = speed_bindings[keycode]\n # Note: bounds aren't enforced here!\n if acc[0] is not None:\n self._linear = acc[0]\n if acc[1] is not None:\n self._angular = acc[1]\n\n elif keycode == ord('q'):\n rospy.signal_shutdown('Bye')\n else:\n return False\n\n return True\n\n def _publish(self):\n self._interface.clear()\n self._interface.write_line(2, 'Linear: %d, Angular: %d' % (self._linear, self._angular))\n self._interface.write_line(5, 'Use arrow keys to move, space to stop, q to exit.')\n self._interface.refresh()\n\n twist = self._get_twist(self._linear, self._angular)\n self._pub_cmd.publish(twist)\n\n\nclass SimpleKeyTeleop():\n def __init__(self, interface):\n self._interface = interface\n self._pub_cmd = rospy.Publisher('cmd_vel', Twist)\n\n self._hz = rospy.get_param('~hz', 10)\n\n self._forward_rate = rospy.get_param('~forward_rate', 0.15)\n self._backward_rate = rospy.get_param('~backward_rate', 0.15)\n self._rotation_rate = rospy.get_param('~rotation_rate', 0.8)\n self._last_pressed = {}\n self._angular = 0\n self._linear = 0\n\n movement_bindings = {\n curses.KEY_UP: (1, 0),\n curses.KEY_DOWN: (-1, 0),\n curses.KEY_LEFT: (0, 1),\n curses.KEY_RIGHT: (0, -1),\n }\n\n def run(self):\n rate = rospy.Rate(self._hz)\n self._running = True\n while self._running:\n while True:\n keycode = self._interface.read_key()\n if keycode is None:\n break\n self._key_pressed(keycode)\n self._set_velocity()\n self._publish()\n rate.sleep()\n\n def _get_twist(self, linear, angular):\n twist = Twist()\n twist.linear.x = linear\n twist.angular.z = angular\n return twist\n\n def _set_velocity(self):\n now = rospy.get_time()\n keys = []\n for a in self._last_pressed:\n if now - self._last_pressed[a] < 0.4:\n keys.append(a)\n linear = 0.0\n angular = 0.0\n for k in keys:\n l, a = self.movement_bindings[k]\n linear += l\n angular += a\n if linear > 0:\n linear = linear * self._forward_rate\n else:\n linear = linear * self._backward_rate\n angular = angular * self._rotation_rate\n self._angular = angular\n self._linear = linear\n\n def _key_pressed(self, keycode):\n if keycode == ord('q'):\n self._running = False\n rospy.signal_shutdown('Bye')\n elif keycode in self.movement_bindings:\n self._last_pressed[keycode] = rospy.get_time()\n\n def _publish(self):\n self._interface.clear()\n self._interface.write_line(2, 'Linear: %f, Angular: %f' % (self._linear, self._angular))\n self._interface.write_line(5, 'Use arrow keys to move, q to exit.')\n self._interface.refresh()\n\n twist = self._get_twist(self._linear, self._angular)\n self._pub_cmd.publish(twist)\n\n\ndef main(stdscr):\n rospy.init_node('key_teleop')\n app = SimpleKeyTeleop(TextWindow(stdscr))\n app.run()\n\nif __name__ == '__main__':\n try:\n curses.wrapper(main)\n except rospy.ROSInterruptException:\n pass\n","sub_path":"ca_tools/scripts/key_teleop.py","file_name":"key_teleop.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"546920406","text":"import json\nimport os\nfrom datetime import datetime, timedelta\nfrom random import shuffle\nfrom ripe.atlas.cousteau import (\n Ping,\n Traceroute,\n AtlasSource,\n AtlasCreateRequest\n)\n\n# Modify the following parameters\nATLAS_API_KEY = \" \"\nmax_nb_servers = 3\nnb_requested_probes = 3\nping_interval = 10800\ntraceroute_interval = 10800\ntag_list = [\"test-code-esib\"] \n\nwith open('destinationNetworks.json', 'r') as f:\n networks = json.load(f)\n\nwith open('countries.json', 'r') as f:\n countries = json.load(f)\n\nfor source_country, source_country_code in countries.items():\n measurements = dict()\n for destination_network, destination_servers in networks.items():\n measurements[destination_network] = list()\n # Select randomly max_nb_servers servers from each network\n shuffle(destination_servers)\n for index, server in enumerate(destination_servers):\n if index == max_nb_servers:\n break\n ping = Ping(af=4, target=server['host'],\n description=\"From {} to {}\".format(source_country_code, destination_network),\n interval=ping_interval, tags=tag_list)\n traceroute = Traceroute(\n af=4,\n target=server['host'],\n description=\"From {} to {}\".format(source_country_code, destination_network),\n protocol=\"ICMP\",\n interval=traceroute_interval,\n tags=tag_list\n )\n # Request 3 probes\n source = AtlasSource(type=\"country\", value=source_country_code, requested=nb_requested_probes)\n atlas_request = AtlasCreateRequest(\n start_time=datetime.utcnow() + timedelta(seconds=60),\n stop_time=datetime.utcnow() + timedelta(days=2),\n key=ATLAS_API_KEY,\n measurements=[ping, traceroute],\n sources=[source],\n is_oneoff=False\n )\n (is_success, response) = atlas_request.create()\n if is_success:\n measurements[destination_network].append(\n {\"host\": server['host'], \"is_success\": is_success,\n \"measurement_id\": response['measurements']})\n else:\n measurements[destination_network].append(\n {\"host\": server['host'], \"is_success\": is_success, \"reason\": response})\n\n filename = \"measurements/{}.json\".format(source_country_code)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, \"w\") as f:\n json.dump(measurements, f, indent=4, sort_keys=True)\n","sub_path":"createAllMeasurements.py","file_name":"createAllMeasurements.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116883811","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/christian/PycharmProjects/Yeti/yeti/config_manger.py\n# Compiled at: 2015-10-06 10:01:46\n# Size of source mod 2**32: 4197 bytes\nimport logging\nfrom .module_loader import ModuleLoader\nlogger = logging.getLogger('yeti.ConfigManager')\n\nclass ConfigurationError(Exception):\n pass\n\n\nclass ConfigManager(object):\n __doc__ = '\\n Uses instances of :class:`ModuleLoader` to load modules from reference lists in a configuration file.\\n '\n _STARTUP_MOD_SECTION = 'StartupMods'\n _config_path = ''\n\n def __init__(self):\n self.config_structure = None\n self.module_loaders = dict()\n\n def load_startup_mods(self, context):\n \"\"\"\n Find all modules in the \"StartupMods\" section of the config file, and load them with instances of :class:`ModuleLoader`\n into the specified context.\n\n :param context: The context to load the modules into.\n \"\"\"\n if self.config_structure is None:\n raise ConfigurationError('No config file loaded.')\n for module_name in self.config_structure[self._STARTUP_MOD_SECTION]:\n self.load_module(module_name, context)\n\n def load_module(self, name, context):\n \"\"\"\n This uses a loaded config file to generate a fallback list and use a :class:`ModuleLoader` to load the module.\n\n :param name: The name reference of the module to load.\n :param context: The context to load the module into.\n\n :returns: The created :class:`ModuleLoader`\n \"\"\"\n context.config_manager = self\n if self.config_structure is None:\n fallback_list = [\n name]\n fallback_index = 0\n else:\n if name in self.config_structure:\n fallback_list = self.config_structure[name]\n fallback_index = 0\n else:\n for subsystem_config in self.config_structure:\n if subsystem_config != self._STARTUP_MOD_SECTION and name in self.config_structure[subsystem_config]:\n fallback_list = self.config_structure[subsystem_config]\n fallback_index = fallback_list.index(name)\n break\n else:\n fallback_list = [\n name]\n fallback_index = 0\n\n module_loader = ModuleLoader()\n module_loader.set_context(context)\n module_loader.fallback_list = fallback_list\n module_loader.fallback_index = fallback_index\n module_loader.load()\n self.module_loaders[name] = module_loader\n return module_loader\n\n def parse_config(self, path):\n \"\"\"\n Parse the config file.\n\n :param path: The file path of the config file to parse.\n\n :returns: The dictionary of the parsed config file.\n \"\"\"\n if path == '':\n path = self._config_path\n self._config_path = path\n f = open(path)\n section = None\n parsed_config = dict()\n for line in f:\n line = line.rstrip('\\r\\n')\n if '#' in line:\n line, comment = line.split('#', 1)\n line = line.strip()\n if '[' in line:\n section = line.split('[', 1)[1].split(']', 1)[0]\n parsed_config[section] = list()\n elif line is not '':\n parsed_config[section].append(line)\n\n logger.info('Finished parsing ' + path)\n self.config_structure = parsed_config\n return parsed_config","sub_path":"pycfiles/yeti-2016.0.3.tar/config_manger.cpython-35.py","file_name":"config_manger.cpython-35.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"212585905","text":"from Iterador import Iterator\n\n\ndef main():\n string = [\"A\", \"B\", \"C\", \"D\"]\n iterator = Iterator(len(string))\n\n print(\"Posibles combinaciones de los valores \" + str(string))\n\n while iterator.hasNext():\n counter = iterator.getNext()\n combination = \"\"\n for i in range(len(counter)):\n if counter[i] == 1:\n combination += string[i]\n print(combination)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"332997890","text":"from telegram.ext import Updater, MessageHandler, Filters\nimport logging\nimport handler\n\n\nlogging.basicConfig(format='[%(asctime)s] %(name)s: %(message)s')\n\n\ndef error_callback(bot, update, error):\n logging.error(error)\n\n\nmessage_handler = MessageHandler(Filters.status_update,\n handler.callback)\n\nupdater = Updater(TOKEN)\nupdater.dispatcher.add_error_handler(error_callback)\nupdater.dispatcher.add_handler(message_handler)\n\nupdater.start_polling()\nupdater.idle()\n","sub_path":"rage-quit/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136174107","text":"from Db import *\nfrom player_inf import *\nimport random\nimport datetime\n\ndef rank_promote(player): #判断是否升级\n temp=list()\n temp=search_exp()\n if player.exp>=temp[player.rank-1]:\n player.rank=player.rank+1\n update_player_rank(player.rank,str(player.id))\n\ndef draw_card_if(player):\n if player.money>=500:\n return True\n else:\n return False\n\ndef draw_card(player): #抽卡\n player.money = player.money - 500\n update_player_money(player.money, player.id)\n flag = 0\n card_frep = []\n card_frep = search_freq()\n\n len1 = len(card_frep)\n i = 0\n j = 0\n total = 0\n rand = random.randint(1, 1000)\n while i < len1:\n if i == 0:\n if rand <= card_frep[i][0] * 1000:\n for j in player.tt.keys():\n if j == 1:\n flag = 1\n update_prof(card_frep[1][1], player.id)\n return card_frep[1][1]\n if flag == 0:\n update_cuisne(card_frep[1][1], player.id)\n return card_frep[1][1]\n if i == len1-1:\n if rand >= total + 1:\n print(111112222)\n for j in player.tt.keys():\n if j == card_frep[i][1]:\n flag = 1\n update_prof(card_frep[len1-1][1], player.id)\n return card_frep[len1-1][1]\n if flag == 0:\n print(card_frep)\n update_cuisne(card_frep[len1-1][1], player.id)\n return card_frep[len1-1][1]\n if rand <= card_frep[i][0] * 1000 + total and rand >= total + 1:\n for j in player.tt.keys():\n if j == i + 1:\n flag = 1\n update_prof(card_frep[i][1], player.id)\n return card_frep[i][1]\n if flag == 0:\n update_cuisne(card_frep[i][1], player.id)\n return card_frep[i][1]\n total = total + card_frep[i][0] * 1000\n i = i + 1\n return card_frep[i][1]\n\n\ndef do_damage(player, type, meum):\n temp=list(player.tt.values())\n a=temp[type]\n damage= a * (meum.ratio1 * player.qual1 + meum.ratio2 * player.qual2\n + meum.ratio3 * player.qual3)+meum.base_damage\n return int(damage)\n\n\n\ndef pvp_fight(id_1,id_2): #id_1为挑战方,id_2为防守方\n p1=search_player_inf(id_1)\n p2=search_player_inf(id_2)\n winner=[]\n update_player_vit(p1.vit-1,id_1)\n blood_1=p1.rank*1000\n blood_2=p2.rank*1000\n p_do=[]\n while blood_1>=0 and blood_2>=0:\n len_1 = len(p1.tt)\n len_2 = len(p2.tt)\n if len_1 == 0 and len_2 == 0: # 平局\n p1.exp = p1.exp + 3\n update_player_goy(p1.exp,p1.id)\n p1.money = p1.money + 50\n update_player_money(p1.money,p1.id)\n rank_promote(p1)\n winner=\"平局\"\n p_do.append(p1.id+\"平局\")\n p_do.append(p1.id+\"获得3点经验值\")\n p_do.append(p1.id+\"获得50点金币\")\n insert_fight_rz(id_1,id_2,2)\n return p_do\n if len_1>0:\n temp1 = list(p1.tt.keys())\n rand1 = random.randint(0, len_1 - 1)\n meum_kind_1 = meum_inf_find(temp1[rand1 - 1])\n blood_2 = blood_2 - do_damage(p1, rand1, meum_kind_1)\n damage1=do_damage(p1, rand1, meum_kind_1)\n #print(do_damage(p1,rand1,meum_kind_1))\n p_do.append(p1.id+\"使用\"+str(meum_kind_1.name)+\"对\"+p2.id+\"造成\"+str(damage1)+\"伤害,\"+\n \"对方血量剩余\"+str(blood_2))\n if blood_2 <= 0:\n win_player = 1\n p1.exp = p1.exp + 5\n update_player_goy(p1.exp,p1.id)\n p1.money = p1.money + 100\n update_player_money(p1.money,p1.id)\n rank_promote(p1)\n winner=p1.id\n insert_fight_rz(id_1, id_2, 0)\n p_do.append(p1.id+\"获胜\")\n p_do.append(p1.id+\"获得5点经验值\")\n p_do.append(p1.id+\"获得100点金币\")\n return p_do\n if len_2>0:\n rand2 = random.randint(0, len_2 - 1)\n temp2 = list(p2.tt.keys())\n meum_kind_2 = meum_inf_find(temp2[rand2 - 1])\n blood_1 = blood_1 - do_damage(p2, rand2, meum_kind_2)\n damage2=do_damage(p2, rand2, meum_kind_2)\n p_do.append(p2.id+\"使用\"+str(meum_kind_2.name)+\"对\"+p1.id+\"造成\"+str(damage2)+\"伤害,\"+\n \"对方血量剩余\"+str(blood_1))\n if blood_1 <= 0:\n win_player = 2\n p1.exp = p1.exp + 1\n update_player_goy(p1.exp,p1.id)\n p1.money = p1.money + 20\n update_player_money(p1.money,p1.id)\n rank_promote(p1)\n winner=p2.id\n insert_fight_rz(id_1, id_2, 1)\n p_do.append(p1.id+\"失败\")\n p_do.append(p1.id+\"获得1点经验值\")\n p_do.append(p1.id+\"获得20点金币\")\n return p_do\n if len_1>0:\n del p1.tt[temp1[rand1 - 1]]\n if len_2>0:\n del p2.tt[temp2[rand2 - 1]]\n\n # file = open(\"record.txt\", \"a+\")\n # file.write(\"用户:\" + p1.id+\"and \"+p2.id+\"进行对战\"+\"\\n\"+\"获胜的是:\"+winner + \"\\n\" + \"\\n\")\n # file.close()\n return p_do\n\ndef search_vit_if(player):\n if player.vit > 0:\n return True\n else:\n return False\ndef money_enough_xiulian(money):\n if money >= 500:\n return True\n else:\n return False\n\ndef xiulian_knife(player):\n update_player_qual1(player.qual1+1,player.id)\n update_player_money(player.money-500,player.id)\n\ndef xiulian_spoon(player):\n update_player_qual2(player.qual2+1, player.id)\n update_player_money(player.money - 500, player.id)\n\ndef xiulian_flour(player):\n update_player_qual3(player.qual3+1, player.id)\n update_player_money(player.money - 500, player.id)\n\ndef pve_fight(id,npc_id):\n p1=search_player_inf(id) #获取对战双方的信息\n npc=search_player_inf(npc_id)\n blood_1=p1.rank*1000\n blood_2=npc.rank*1000\n winner=[]\n win_player=0 #挑战者赢为1,防御者赢为2\n p_do=[] #记录P1的动作 按照 伤害,使用菜品类型写入\n while (blood_1>0 and blood_2>0):\n len_1=len(p1.tt)\n len_2=len(npc.tt)\n if len_1==0 and len_2==0: #平局\n p1.money = p1.money + 10\n update_player_money(p1.money,p1.id)\n winner=\"平局\"\n insert_fight_rz(id, npc_id, 2)\n p_do.append(p1.id+\"平局\")\n p_do.append(p1.id+\"获得0点经验值\")\n p_do.append(p1.id+\"获得10点金币\")\n return p_do\n if len_1>0:\n temp1 = list(p1.tt.keys())\n rand1=random.randint(0,len_1-1)\n meum_kind_1 = meum_inf_find(temp1[rand1])\n blood_2=blood_2-do_damage(p1,rand1,meum_kind_1)\n damage1=do_damage(p1,rand1,meum_kind_1)\n p_do.append(p1.id + \"使用\" + str(meum_kind_1.name) + \"对\" + npc.id + \"造成\" + str(damage1) + \"伤害,\" +\n \"对方血量剩余\" + str(blood_2))\n if blood_2<=0:\n win_player = 1\n p1.exp = p1.exp + 1\n update_player_goy(p1.exp,p1.id)\n p1.money = p1.money + 20\n update_player_money(p1.money,p1.id)\n rank_promote(p1)\n winner=p1.id\n insert_fight_rz(id, npc_id, 0)\n p_do.append(p1.id+\"获胜\")\n p_do.append(p1.id+\"获得1点经验值\")\n p_do.append(p1.id+\"获得20点金币\")\n return p_do\n if len_2>0:\n temp2 = list(npc.tt.keys())\n rand2=random.randint(0,len_2-1)\n meum_kind_2 = meum_inf_find(temp2[rand2])\n blood_1=blood_1-do_damage(npc,rand2,meum_kind_2)\n damage2=do_damage(npc, rand2, meum_kind_2)\n p_do.append(npc.id + \"使用\" + str(meum_kind_2.name) + \"对\" + p1.id + \"造成\" + str(damage2) + \"伤害,\" +\n \"对方血量剩余\" + str(blood_1))\n if blood_1 <= 0:\n win_player = 2\n p1.money = p1.money + 5\n update_player_money(p1.money,p1.id)\n winner=\"npc\"\n insert_fight_rz(id, npc_id, 1)\n p_do.append(p1.id+\"失败\")\n p_do.append(p1.id+\"获得0点经验值\")\n p_do.append(p1.id+\"获得10点金币\")\n return p_do\n if len_1>0:\n del p1.tt[temp1[rand1 - 1]]\n if len_2>0:\n del npc.tt[temp2[rand2 - 1]]\n\n file = open(\"record.txt\", \"a+\")\n file.write(\"用户:\" + p1.id+\"and npc\"+npc.id+\"进行对战\"+\"\\n\"+\"获胜者是:\" + \"\\n\" + \"\\n\")\n file.close()\n\n return p_do\n\ndef search_exp_to_next(player):\n temp=[]\n temp=search_exp()\n a=temp[player.rank-1]-player.exp\n return a\n\ndef update_frep():\n freq=search_freq()\n len1=len(freq)\n total_freq=0\n each_freq=0\n end_freq=0\n i=0\n while i target:\n \treturn 'error'\n for x in range(nums_len):\n if nums[x] <= target:\n \t arr.append(nums[x]) \n for y in arr:\n \tif y == target:\n \t return y\n \telse :\n \t\t for i in range(len(arr)):\n \t\t\t for j in range(1,len(arr)):\n \t\t\t\t if (arr[i]+arr[j]) == target:\n \t\t\t\t\t answer.append(i)\n \t\t\t\t\t answer.append(j)\n \t\t\t\t\t return answer\n\n\nsol = Solution()\nanswer = sol.twoSum([2, 7, 11, 15], 9)\nprint('answer is: ', answer)\n\n","sub_path":"twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343181929","text":"# Copyright 2020 VentorTech OU\n# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl-3.0).\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef pre_init_hook(cr):\n \"\"\"\n The objective of this hook is to speed up the installation\n of the module on an existing Odoo instance.\n Without this script, big databases can take a long time to install this\n module.\n \"\"\"\n set_stock_location_priority_default(cr)\n set_stock_quant_location_priority_default(cr)\n\n\ndef set_stock_location_priority_default(cr):\n cr.execute(\n \"\"\"SELECT column_name\n FROM information_schema.columns\n WHERE table_name='stock_location' AND\n column_name='removal_prio'\"\"\"\n )\n if not cr.fetchone():\n logger.info(\"Creating field removal_prio on stock_location\")\n cr.execute(\n \"\"\"\n ALTER TABLE stock_location\n ADD COLUMN removal_prio integer\n DEFAULT 0;\n \"\"\"\n )\n\n\ndef set_stock_quant_location_priority_default(cr):\n cr.execute(\n \"\"\"SELECT column_name\n FROM information_schema.columns\n WHERE table_name='stock_quant' AND\n column_name='removal_prio'\"\"\"\n )\n if not cr.fetchone():\n logger.info(\"Creating field removal_prio on stock_quant\")\n cr.execute(\n \"\"\"\n ALTER TABLE stock_quant\n ADD COLUMN removal_prio integer\n DEFAULT 0;\n \"\"\"\n )\n","sub_path":"outgoing_routing/init_hook.py","file_name":"init_hook.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"556841784","text":"import cv2\nimport numpy as np\nimport face_recognition\nimport os\nfrom datetime import datetime\n\npath = 'Images'\nimages = []\nclassNames = []\nmyList = os.listdir(path)\nprint(myList)\n\nfor name in myList:\n currImg = cv2.imread(f'{path}/{name}')\n images.append(currImg)\n classNames.append(os.path.splitext(name)[0])\nprint(classNames)\n\ndef findEncodings(images):\n encodeList = []\n for img in images:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n encode = face_recognition.face_encodings(img)[0]\n encodeList.append(encode)\n return encodeList\n\ndef markAttendance(name):\n with open('Attendance.csv','r+') as f:\n myDataList = f.readlines()\n nameList = []\n for line in myDataList:\n entry = line.split(',')\n nameList.append(entry[0])\n if name not in nameList:\n now = datetime.now()\n dtString = now.strftime('%H:%M:%S')\n f.writelines(f'\\n{name},{dtString}')\n\nencodedList = findEncodings(images)\nprint('Encoding Complete')\ncap = cv2.VideoCapture(0)\n\nwhile True:\n success, img = cap.read()\n imgS = cv2.resize(img,(0,0),None,0.5,0.5)\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\n facesCurrFrame = face_recognition.face_locations(imgS)\n encodesCurrFrame = face_recognition.face_encodings(imgS,facesCurrFrame)\n for encodeFace,faceLoc in zip(encodesCurrFrame,facesCurrFrame):\n matches = face_recognition.compare_faces(encodedList,encodeFace)\n faceDis = face_recognition.face_distance(encodedList,encodeFace)\n matchIndex = np.argmin(faceDis)\n y1,x2,y2,x1 = faceLoc\n y1, x2, y2, x1 = y1*2,x2*2,y2*2,x1*2\n if faceDis[matchIndex]< 0.50:\n name = classNames[matchIndex].upper()\n cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)\n cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)\n\n cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)\n markAttendance(name)\n\n cv2.imshow('Webcam',img)\n cv2.waitKey(1)\n","sub_path":"Attendance_prgm.py","file_name":"Attendance_prgm.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343892058","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport falcon\nimport pendulum\nfrom pony import orm\n\nfrom models.entity2proto import Entity2Proto\nfrom mvp.presenter_group import PresenterGroup\nfrom utils.const import ACTIVITY_EVENT, NOTIFICATION_EVENT\nfrom utils.time_climber import TimeClimber\nfrom utils.time_flier import TimeFlier\nfrom verifier.verifier import Verifier\nfrom verifier.diary import verify_diary\nfrom models import Diary, User, Attention\nfrom utils import const\n\n\n@PresenterGroup.route(\"/diary\")\n@falcon.before(Verifier.login)\n@falcon.before(verify_diary)\nclass DiaryPresenter(object):\n\n def on_put(self, req, resp, diary_req, diary_resp, current_user_identity, diary_date):\n now = int(pendulum.utcnow().float_timestamp * 1000)\n with orm.db_session:\n diary = Diary(\n type=const.ARTICLE_TYPE.DIARY,\n author=User.select(lambda u: u.identity == current_user_identity).get(),\n date=now,\n content=diary_req.diary.content,\n title=diary_req.diary.title,\n language=diary_req.diary.language,\n diary_date=diary_date\n )\n Attention(\n user_id=current_user_identity,\n diary_id=diary.identity,\n date=now\n )\n orm.commit()\n TimeClimber.anchor(\n event=ACTIVITY_EVENT.PUBLISH_DIARY,\n article_id=diary.identity,\n user_id=current_user_identity\n )\n TimeFlier.notify(\n event=NOTIFICATION_EVENT.PUBLISH_DIARY,\n who=current_user_identity,\n which=diary.identity\n )\n proto_diary = Entity2Proto.build_diary(diary, current_user_identity)\n diary_resp.success = True\n diary_resp.diary.CopyFrom(proto_diary)\n resp.body = diary_resp.SerializeToString()\n\n def on_delete(self, req, resp, diary_req, diary_resp, current_user_identity, diary_identity):\n with orm.db_session:\n diary = Diary.select(lambda d: d.identity == diary_identity\n and d.author.identity == current_user_identity).for_update().get()\n diary.soft_delete = True\n Attention.select(lambda a: a.user_id == current_user_identity\n and a.diary_id == diary_identity).delete(bulk=True)\n orm.commit()\n TimeClimber.descender(\n event=ACTIVITY_EVENT.PUBLISH_DIARY,\n article_id=diary_identity,\n user_id=current_user_identity\n )\n TimeFlier.erase(\n event=None,\n who=diary_identity,\n which=None,\n receiver=None\n )\n TimeFlier.erase(\n event=None,\n who=None,\n which=diary_identity,\n receiver=None\n )\n diary_resp.success = True\n resp.body = diary_resp.SerializeToString()\n","sub_path":"presenters/diary.py","file_name":"diary.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68730312","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport unicodedata\nimport re\n\nUNICODE_A = ord('A')\nUNICODE_Z = ord('Z')\nSIZE_ALPHABET = 26\nPATH_DECRYPTED_LOG = \"tmp/decrypted.log\"\nREAD = \"r\"\nWRITE = \"w\"\n\ndef normalizeKey(message, key):\n keyNormalized = \"\"\n for char in message:\n if(char == \" \"):\n keyNormalized += \" \"\n elif (char == \",\"):\n keyNormalized += \",\"\n else:\n keyNormalized += key[0]\n key = key[1:] + key[0]\n return keyNormalized\n\ndef normalizeText(text, codif='utf-8'):\n return unicodedata.normalize('NFKD', text.decode(codif)).encode('ASCII','ignore').upper()\n\ndef encrypt(message, key):\n result = \"\"\n for index in range(len(message)):\n if ord(message[index]) >= UNICODE_A and ord(message[index]) <= UNICODE_Z: \n result += chr((ord(message[index]) + ord(key[index]) - (UNICODE_A * 2)) % SIZE_ALPHABET + UNICODE_A)\n else:\n result += message[index]\n return result\n\ndef decrypt(message, key):\n result = \"\"\n for index in range(len(message)):\n if ord(message[index]) >= UNICODE_A and ord(message[index]) <= UNICODE_Z: \n result += chr(((ord(message[index]) - ord(key[index])) + SIZE_ALPHABET) % SIZE_ALPHABET + UNICODE_A)\n else:\n result += message[index]\n return result\n\ndef main():\n params = sys.argv[1:]\n message = open(params[0], READ).readlines()\n keys = open(params[1], READ)\n result = open(PATH_DECRYPTED_LOG, WRITE)\n for l in keys:\n result.write(l + decrypt(normalizeText(message[0]), normalizeKey(normalizeText(message[0]), normalizeText(l.replace('\\n', '').replace('-','')))))\n result.write(\"\\n\\n\")\nif __name__ == '__main__':\n main()","sub_path":"02-cifra-vigenere/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302017825","text":"#!/usr/bin/env python\r\n#\r\n# logger.py\r\n#\r\n# Python logger for the SdrScript SDR application\r\n# \r\n# Copyright (C) 2014 by G3UKB Bob Cowdery\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r\n# \r\n# The author can be reached by email at: \r\n# bob@bobcowdery.plus.com\r\n#\r\n\r\n# Python imports\r\nimport sys, os\r\nimport logging\r\nfrom logging import handlers\r\nfrom time import sleep\r\n\r\n# Application imports\r\nfrom common.defs import *\r\n\r\n\"\"\"\r\nThe logger logs messages to a cyclic file.\r\n\"\"\"\r\nclass Logger():\r\n\t\r\n\tdef __init__(self):\r\n\t\t\"\"\" Constructor \"\"\"\r\n\t\t\r\n\t\t# Set up the local logging environment\r\n\t\tif not os.path.exists(os.path.join('..', '..', 'logs')):\r\n\t\t\tos.mkdir(os.path.join('..', '..', 'logs'))\r\n\t\tlogger = logging.getLogger('sdr_script')\r\n\t\tlogger.setLevel(logging.INFO)\r\n\t\tformat = logging.Formatter(\"%(asctime)s - %(name)s - %(thread)5d - %(levelname)-5s - %(message)s\")\r\n\t\t# Log to a rotating file\r\n\t\th = logging.handlers.RotatingFileHandler(os.path.join('..', '..', 'logs', 'sdr_script.log'), maxBytes=100000, backupCount=5)\r\n\t\th.setLevel(logging.INFO)\r\n\t\th.setFormatter(format)\r\n\t\tlogger.addHandler(h)\t\t\r\n\t\tlogger.log(logging.INFO, 'Logging initialised ===================================')\r\n\r\n\r\n","sub_path":"SdrScript/python/transcript/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"239704186","text":"# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom workspace_utils import active_session\nfrom PIL import Image\n\n\n# Function\ndef CreateClassifier(input_size, output_size, hidden_layers, dropout):\n '''\n Create Classifier section to attach to the pre-trained CNN\n '''\n # Create list of input-ouput neurons in each layers\n hidden_layers.pop() # remove last item since it's the same as the output_size\n layerlist = []\n\n # Create a list of layers\n n_in = input_size\n for i, n in enumerate(hidden_layers):\n layerlist.append( nn.Linear(n_in,n) )\n layerlist.append( nn.ReLU() )\n if len(dropout) != 0:\n layerlist.append( nn.Dropout(p=dropout[i]) )\n n_in = n\n \n # Append the output layer\n layerlist.append( nn.Linear(hidden_layers[-1],output_size) )\n layerlist.append( nn.LogSoftmax(dim=1) )\n classifier = nn.Sequential(*layerlist)\n \n return classifier\n\n\n# Function\ndef load_checkpoint(filepath):\n '''\n Function attached the output classifer to the pre-trained model and assigned weights and biases\n model: pre-trained model\n filepath: file path\n '''\n # Load file\n checkpoint = torch.load(filepath)\n \n # Create model\n model = models.vgg19(pretrained=True)\n \n # class index\n model.class_to_idx = checkpoint['class_to_idx']\n \n # Create network\n model.classifier = CreateClassifier(checkpoint['input_size'],\n checkpoint['output_size'],\n checkpoint['hidden_layers'],\n checkpoint['dropout'])\n \n # Disable all gradient and set model to evaluation mode\n for p in model.parameters():\n p.requires_grad = False\n model.eval()\n \n # Load weights and biases\n model.classifier.load_state_dict(checkpoint['state_dict'])\n \n return model","sub_path":"CreateModel.py","file_name":"CreateModel.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"301312913","text":"# model settings\ninput_size = 512\nmodel = dict(\n type='SingleStageDetector',\n pretrained=None,\n backbone=dict(\n type='SSDMobilenetV2',\n input_size=input_size,\n scales=6,\n ),\n neck=None,\n bbox_head=dict(\n type='SSDHead',\n input_size=input_size,\n in_channels=(576, 1280, 512),\n num_classes=4,\n anchor_strides=(16, 32, 64),\n anchor_widths=([17.137, 38.165, 70.69, 9.584, 17.634, 23.744, 6.507, 12.245, 14.749],\n [81.753, 153.183, 169.567, 32.148, 41.048, 52.198, 32.391, 22.397, 33.216],\n [110.651, 237.237, 348.269, 65.598, 82.729, 110.538, 53.24, 68.246, 105.444],\n ),\n anchor_heights=([20.733, 45.464, 78.592, 29.393, 55.398, 84.88, 17.006, 28.673, 44.11],\n [157.379, 104.698, 210.545, 118.319, 157.328, 203.363, 36.256, 64.451, 101.718],\n [344.064, 243.971, 337.749, 256.941, 327.187, 428.114, 68.919, 155.867, 270.048],\n ),\n target_means=(.0, .0, .0, .0),\n target_stds=(0.1, 0.1, 0.2, 0.2),\n depthwise_heads=True,\n depthwise_heads_activations='relu',\n loss_balancing=False))\n# training and testing settings\ncudnn_benchmark = True\ntrain_cfg = dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.,\n ignore_iof_thr=-1,\n gt_max_assign_all=False),\n smoothl1_beta=1.,\n use_giou=False,\n use_focal=False,\n allowed_border=-1,\n pos_weight=-1,\n neg_pos_ratio=3,\n debug=False)\ntest_cfg = dict(\n nms=dict(type='nms', iou_thr=0.45),\n min_bbox_size=0,\n score_thr=0.02,\n max_per_img=200)\n# dataset settings\ndataset_type = 'CustomCocoDataset'\ndata_root = '../../data/airport'\nimg_norm_cfg = dict(\n mean=[0, 0, 0], std=[255, 255, 255], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='PhotoMetricDistortion',\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=(input_size, input_size), keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(input_size, input_size),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n imgs_per_gpu=4,\n workers_per_gpu=0,\n train=dict(\n type=dataset_type,\n classes=('vehicle', 'person', 'non-vehicle'),\n ann_file=data_root+'/annotation_example_train.json',\n img_prefix=data_root + '/train',\n pipeline=train_pipeline\n ),\n val=dict(\n type=dataset_type,\n classes=('vehicle', 'person', 'non-vehicle'),\n ann_file=data_root+'/annotation_example_val.json',\n img_prefix=data_root + '/val',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n classes=('vehicle', 'person', 'non-vehicle'),\n ann_file=data_root+'/annotation_example_val.json',\n img_prefix=data_root + '/val',\n pipeline=test_pipeline))\n# optimizer\noptimizer = dict(type='SGD', lr=0.0001, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=5,\n warmup_ratio=1.0 / 3,\n step=[50, 75])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=1,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook'),\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 5\n# device_ids = range(8)\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = 'outputs/person-vehicle-bike-detection-crossroad-1016'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n","sub_path":"pytorch_toolkit/object_detection/configs/person-vehicle-bike-detection-crossroad-1016.py","file_name":"person-vehicle-bike-detection-crossroad-1016.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232386799","text":"#!/user/bin/env python\n#coding:utf-8\n#Author:shenqiang\nimport pymysql\n\ndef connectMysql():\n try:\n '''链接数据库'''\n connect = pymysql.connect(\n host='127.0.0.1',\n user='root',\n password='shen6409175',\n db='students'\n )\n except Exception as e:\n return e.args\n else:\n '''创建游标'''\n cur = connect.cursor()\n '''SQL语句分离'''\n # sql = 'select * from student where id = %s'\n # params = (1,)\n # '''查重'''\n # cur.execute(sql,params)\n # '''单个语句的查询'''\n # data = cur.fetchone()\n # return datas\n sql = 'select * from student'\n '''查重'''\n cur.execute(sql)\n '''返回多个数据'''\n datas = cur.fetchall()\n '''方法一,遍历'''\n # for data in datas:\n # print(data)\n '''方法二,列表推倒式'''\n db = [data for data in datas]\n print(db)\n\n finally:\n # 关闭游标和链接\n cur.close()\n connect.close()\n\nconnectMysql()\n\n\n\n\n\n\n","sub_path":"tryApiTest/apiRequestTests/apiTests/tryMySQL.py","file_name":"tryMySQL.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602796392","text":"# -*- coding: cp1252 -*-\nimport tweepy, time, sys\n\nconsumer_key = '7fLoKrPeYOxj5WQjCMdLnwODD'\nconsumer_secret = 'D5rJeGRfjKH0qcaW0rL9yIGjrWtRfYg13bQo8NUTRZur0GNWtG'\naccess_token = '817145018666336257-cyC9GJKBLypNclTmUce0TKumHrIcYzE'\naccess_token_secret = 'ncLOPZx3zWF325kZFxSNKKgs2j16n4dVEc0NfbBQe6gI8'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n#File the bot will tweet from\nfilename=open('chavez.txt','r')\nf=filename.readlines()\nfilename.close()\n\nfor line in f:\n api.update_status(line + \"HS\")\n time.sleep(600)#Tweet every 15 minutes\n","sub_path":"favsimequieres.py","file_name":"favsimequieres.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127771324","text":"import sqlalchemy\n\n\"\"\"\n This module contains all the object wrapper classes for the json data returned by the league-focused requests.\n\"\"\"\n\n\nclass Serie:\n\n def __init__(self, progress=None, losses=None, target=None, wins=None):\n self.progress = progress\n self.losses = losses\n self.target = target\n self.wins = wins\n\n\nclass SummonerLeagueInfo:\n\n def __init__(self, summoner_name=None, hot_streak=None, mini_series=Serie(), wins=None, veteran=None, losses=None,\n fresh_blood=None, inactive=None, rank=None, summoner_id=None, league_points=None):\n self.summonerName = summoner_name\n self.hotStreak = hot_streak\n self.miniSeries = mini_series\n self.wins = wins\n self.veteran = veteran\n self.losses = losses\n self.freshBlood = fresh_blood\n self.inactive = inactive\n self.rank = rank\n self.summonerId = summoner_id\n self.leaguePoints = league_points\n\n\nclass League:\n\n def __init__(self, league_id=None, tier=None, entries=SummonerLeagueInfo(), queue=None, name=None):\n self.leagueId = league_id\n self.tier = tier\n self.entries = entries\n self.queue = queue\n self.name = name","sub_path":"ezrealbatch/core/beans/league.py","file_name":"league.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"248736045","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pylab as plt\nimport matplotlib as mpl\nfrom matplotlib import rc\nfrom scipy import optimize, interpolate, misc, constants\nimport os\nimport sys\nimport re\nimport scipy.optimize as optimization\nimport math\nimport matplotlib.cm as cm\n\ndef sortArrayUpways(arr):\n\tn = len(arr)\n\tfor i in range(0,n):\n\t\tfor j in range(0,n):\n\t\t\tif(arr[i]90 grad\ns_theo2 = h2**2 + k2**2 + l2**2\n#2 = fcc\nprint(\"se1(bcc): \", s_exp1)\nprint(\"se2(fcc): \", s_exp2)\nprint(\"st1: \", s_theo1)\nprint(\"st2: \", s_theo2)\n\n#####Gitterkonstante\nd1 = lamda/(2*np.sin(theta1))\nd2 = lamda/(2*np.sin(theta2))\na1 = d1 * np.sqrt(s_theo1)\na2 = d2 * np.sqrt(s_theo2) \n\nprint(\"a1-mean: \", np.mean(a1))\nprint(\"a2-mean: \", np.mean(a2))\ncosSquare1 = np.cos(theta1)**2\ncosSquare2 = np.cos(theta2)**2\na1 = a1*10**7\na2 = a2*10**7\n\nslope1, intercept1, r_value1, p_value1, std_err1 = stats.linregress(cosSquare1,a1)\nslope2, intercept2, r_value2, p_value2, std_err2 = stats.linregress(cosSquare2,a2)\nprint(\"m1 = \"+str(slope1), \"b1 = \"+str(intercept1), \"MgS: 520 (falsche Farbe), BaO: 554, SrO: 516, LiBr: 550, KF: 534\", r_value1, p_value1, std_err1)\nprint(\"m2 = \"+str(slope2), \"b2 = \"+str(intercept2), \"Yb: 548, Sn: 583 (centerced tetragonal!), Sr: 608, Ge: 565\" , r_value2, p_value2, std_err2)\nx = np.linspace(0,1,100)\nfit1 = slope1*x+intercept1\nfit2 = slope2*x+intercept2\n\nfig = plt.figure()\nax = plt.gca()\n\n####output\nf = open('workfile','w')\nf.write(\"r1\\ttheta\\tsexp\\tstheo\\tmiller\\ta\\n\")\nfor i in range(0,len(a1)):\n\tf.write(str(round(r1_mess[i],2))+ \"\\t\" + str( round(theta1[i],2)) + \"\\t\" + str( round(s_exp1[i],2)) + \"\\t\" + str(round(s_theo1[i],2)) + \"\\t\" + str(h1[i])+str(k1[i])+str(l1[i]) +str(\"\\t\") + str(round(a1[i],2)))\n\tf.write(\"\\n\")\n\nf.write(\"\\n\\n\\n\")\nf.write(\"r2\\ttheta\\tsexp\\tstheo\\tmiller\\ta\\n\")\nfor i in range(0,len(a2)):\n\tf.write(str(round(r2_mess[i],2))+ \"\\t\" + str( round(theta2[i],2)) + \"\\t\" + str( round(s_exp2[i],2)) + \"\\t\" + str(round(s_theo2[i],2)) + \"\\t\" + str(h2[i])+str(k2[i])+str(l2[i]) +str(\"\\t\")+ str(round(a2[i],2)))\n\tf.write(\"\\n\")\n\nf.close()\n\nplt.plot(x,fit1)\nplt.xlim((0,1))\nplt.title(\"Korrektur zum Gitterparameter der ersten Probe\")\nplt.xlabel(r\"cos$^2 (\\theta)$\")\nplt.ylabel(\"Gitterparameter $a$\")\n#plt.legend(loc=2)\n#plt.plot(x,fit2)\nax.scatter(cosSquare1, a1)\n#plt.savefig(\"a1\",dpi=100)\n\n#ax.scatter(cosSquare2, a2)\n#plt.savefig(\"a2\",dpi=100)\nplt.show()\n\n","sub_path":"02Debye_Scherrer/auswertung/debye2.py","file_name":"debye2.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"167411300","text":"'''Framework to simplify implementation of command-line applications.'''\r\n\r\n__revision__ = '$Id: cmdlapp.py 61515 2017-08-04 03:23:52Z schroe03 $'\r\n__docformat__ = 'ReStructuredText'\r\n__author__ = 'Andreas Schroeder '\r\n\r\n\r\nfrom argparse import ArgumentParser\r\nimport logging\r\nimport sys\r\nimport yaml\r\n\r\n\r\nclass ConfigWrapper():\r\n '''Load and provide access to a configuration file.'''\r\n\r\n def __init__(self, filename):\r\n '''Initialize the ConfigWrapper object by loading the config file.\r\n\r\n :param string filename: The filename of the YAML config file to load.'''\r\n\r\n self._load_config(filename)\r\n\r\n\r\n def _load_config(self, filename):\r\n '''Load a YAML configuration file with the specified name.\r\n\r\n :param string filename: The filename of the YAML config file to load.'''\r\n\r\n with open(filename, 'r') as f:\r\n self.cfg = yaml.load(f)\r\n\r\n self.filename = filename\r\n\r\n\r\n def get(self, key, default=None, raise_ex=False):\r\n '''Retrieve a configuration value by path.\r\n\r\n If an element in a dictionary does not exist, the default value is\r\n returned.\r\n\r\n :param string key: The path to a config item, separated by '/' like\r\n ``section/subsection/key``.\r\n :param default: The default value returned if the key is not available\r\n in the config file.\r\n :param bool raise_ex: If set to true, an exception is raised if the\r\n specified key is not available.\r\n\r\n :returns: The config value read from the config file or the default.\r\n '''\r\n\r\n pathlist = key.split('/')\r\n\r\n d = self.cfg\r\n\r\n for p in pathlist:\r\n if p in d.keys():\r\n d = d[p]\r\n else:\r\n if raise_ex == True:\r\n msg = \"Config key '{key}' is not available in file '{file}'.\"\r\n raise KeyError(msg.format(\r\n key=key,\r\n file=self.filename))\r\n else:\r\n return default\r\n\r\n return d\r\n\r\n\r\nclass CmdlApp():\r\n '''Base class for implementing command-line applications.'''\r\n\r\n def __init__(self):\r\n self.use_cfgfile = False\r\n self.main_fct = None\r\n self.tool_name = 'unknown_tool'\r\n self.tool_version = '0.0-dev'\r\n\r\n self._cfg_keys = [\r\n 'main_fct',\r\n 'use_cfgfile',\r\n 'tool_name',\r\n 'tool_version',\r\n 'loglevel']\r\n\r\n\r\n def configure(self, **args):\r\n '''Configure the behavior of the command-line application.\r\n\r\n :param function-pointer main_fct: The function to run when the tool is\r\n started.\r\n :param bool use_cfgfile: If set to True, a '--config' parameter is added\r\n to the command-line options and the given file (YAML format) expected\r\n is loaded and provided as the cfg member variable.\\\r\n '''\r\n\r\n for key in args.keys():\r\n if key not in self._cfg_keys:\r\n msg = \"Unknown config parameter '{0}'.\"\r\n raise ValueError(msg.format(key))\r\n\r\n setattr(self, key, args[key])\r\n\r\n\r\n def setup_arg_parser(self):\r\n '''Set up the command line argument parser.\r\n\r\n Override this function to add additional command-line parameters by\r\n calling add_argument() on the self.parser member variable.\r\n '''\r\n\r\n self.parser = ArgumentParser()\r\n\r\n self.parser.add_argument('-v', '--verbose',\r\n help='Verbose output.',\r\n action=\"store_true\",\r\n default=False)\r\n\r\n if self.use_cfgfile:\r\n self.parser.add_argument('-c', '--config',\r\n type=str,\r\n help='Configuration file',\r\n default='cfg.yaml',\r\n dest='config_file')\r\n\r\n\r\n def _parse_cmdline(self, args):\r\n '''Configure command-line parser and parse command-line.'''\r\n\r\n self.args = self.parser.parse_args(args)\r\n\r\n\r\n def _setup_logging(self):\r\n '''Configure the logging.\r\n\r\n The logging is used for all output of the tool, so everything is written\r\n to stdout.'''\r\n\r\n if self.args.verbose:\r\n level = logging.DEBUG\r\n else:\r\n level = logging.INFO\r\n\r\n logfmt = '%(asctime)s %(levelname)s: {toolname}: %(message)s'.format(\r\n toolname=self.tool_name)\r\n\r\n logging.basicConfig(\r\n level=level,\r\n format=logfmt,\r\n stream=sys.stdout)\r\n\r\n logging.info('{0} version {1}'.format(\r\n self.tool_name,\r\n self.tool_version))\r\n\r\n\r\n def load_config(self):\r\n '''Load the configuration file.'''\r\n\r\n try:\r\n self.cfg = ConfigWrapper(self.args.config_file)\r\n except FileNotFoundError:\r\n msg = \"Cannot open config file '{0}'.\"\r\n logging.error(msg.format(self.args.config_file))\r\n sys.exit(1)\r\n\r\n\r\n def run(self, args=None):\r\n '''Initializes the tool and then starts the automation to execute all\r\n TESSY tests of the given project.'''\r\n\r\n self.setup_arg_parser()\r\n self._parse_cmdline(args=sys.argv[1:])\r\n\r\n self._setup_logging()\r\n\r\n if self.use_cfgfile:\r\n self.load_config()\r\n\r\n # Start the main function\r\n if self.main_fct != None:\r\n self.main_fct()\r\n else:\r\n msg = \"The 'main_fct' config is unset. Cannot run application.\"\r\n raise ValueError(msg)\r\n","sub_path":"bme280/plot/cmdlapp.py","file_name":"cmdlapp.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"27244165","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nfrom collections import deque \nimport torch.optim as optim\nimport os\nfrom sklearn.preprocessing import OneHotEncoder\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nimport sklearn.decomposition\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--sequence_size', help=\"length of the horizon\", type=int, default=4)\nparser.add_argument('--pca_length', help=\"length of the PCA feature vector\", type=int, default=8)\nparser.add_argument('--lr', help=\"learning rate\", type=float, default=1e-2)\nparser.add_argument('--task', help=\"task problem\", default='dectiger')\nargs = parser.parse_args()\n\nSEQUENCE_SIZE = args.sequence_size\nPCA_LENGTH = args.pca_length\nlr = args.lr\ntask = args.task\nBATCH_SIZE = 400\nMEMORY_SIZE = 4000\nHIDDEN_SIZE = 6\nTOTAL_EPISODES = 40000\nTEST_TOTAL_EPISODES = 2000\nTARGET_UPDATE_FREQ = 100 * SEQUENCE_SIZE\nBACK_PROP_FREQ = 1 * SEQUENCE_SIZE\nINITIAL_EPSILON = 0.9\nFINAL_EPSILON = 0.0\n\n\n\nif task == 'boxpushing':\n from env.boxpushing import Environment\nelif task == 'grid3x3':\n from env.grid3x3 import Environment\nelse:\n from env.dectiger import Environment\n\n\nclass Memory():\n def __init__(self, memory_size):\n self.memory_size = memory_size\n self.memory = deque(maxlen=self.memory_size)\n \n def add_episode(self, epsiode):\n self.memory.append(epsiode)\n \n def get_batch(self, batch_size):\n batch = random.sample(self.memory, batch_size)\n return batch\n\n\n\nclass Model(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Model, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n \n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, output_size)\n self.relu = nn.ReLU()\n\n\n def forward(self, input, batch_size):\n input = torch.FloatTensor(input).view(batch_size, -1)\n output = self.fc1(input)\n output = self.relu(output)\n output = self.fc2(output)\n \n return output\n\n\ndef one_hot_encoding(xs, n):\n # xs[batch_size]\n tmp = [[i] for i in range(n)]\n enc = OneHotEncoder(handle_unknown='ignore', categories=[[i for i in range(n)]])\n enc.fit(tmp)\n xs = np.expand_dims(np.array(xs), axis=1)\n result = enc.transform(xs).toarray()\n result = torch.tensor(result).float()\n # result[batch_size][action_size]\n return result\n\n\ndef fit_pca():\n X = []\n current_sample = [0 for _ in range((action_size + observation_size) * (SEQUENCE_SIZE))]\n X.append(current_sample)\n sample_size = 300000\n\n for _ in range(sample_size):\n current_sample = [0 for _ in range((action_size + observation_size) * (SEQUENCE_SIZE))]\n length = random.randint(1, SEQUENCE_SIZE)\n for i in range(length):\n a = random.randint(0, action_size - 1)\n o = random.randint(0, observation_size - 1)\n current_sample[(SEQUENCE_SIZE - length + i) * (action_size + observation_size) + a] = 1\n current_sample[(SEQUENCE_SIZE - length + i) * (action_size + observation_size) + action_size + o] = 1\n X.append(current_sample)\n\n pca = sklearn.decomposition.PCA(n_components=PCA_LENGTH)\n pca.fit(np.array(X))\n return pca\n\n\ndef train():\n # Initialize experience memory\n for episode in range(0, MEMORY_SIZE // SEQUENCE_SIZE // 2 + 1):\n env.init_environment(batch_size=1)\n embedding = [deque([0.0 for i in range((SEQUENCE_SIZE) * (action_size + observation_size))], maxlen=(SEQUENCE_SIZE) * (action_size + observation_size)) for i in range(2)]\n \n for _ in range(SEQUENCE_SIZE):\n actions = [[random.randint(0, action_size - 1)] for i in range(2)]\n observations[0], observations[1], rewards = env.step(actions[0], actions[1])\n\n for agent_i in range(agent_num): \n one_hot_actions[agent_i] = one_hot_encoding(actions[agent_i], action_size)\n one_hot_observations[agent_i] = one_hot_encoding(observations[agent_i], observation_size)\n # TODO here\n\n current_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state = torch.cat((torch.FloatTensor([agent_i]), current_state), 0)\n\n embedding[agent_i].extend(torch.cat((one_hot_actions[agent_i][0], one_hot_observations[agent_i][0]), 0).tolist())\n new_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n new_state = torch.cat((torch.FloatTensor([agent_i]), new_state), 0)\n memory.add_episode((current_state, actions[agent_i][0], rewards[0], new_state))\n\n\n epsilon = INITIAL_EPSILON\n reward_stat = []\n total_steps = 0\n total_reward = 0\n total_loss = 0\n\n\n # Start training\n for episode in range(TOTAL_EPISODES):\n env.init_environment(batch_size=1)\n embedding = [deque([0.0 for i in range((SEQUENCE_SIZE) * (action_size + observation_size))], maxlen=(SEQUENCE_SIZE) * (action_size + observation_size)) for i in range(2)]\n\n for _ in range(SEQUENCE_SIZE):\n total_steps += 1\n current_state = [None for i in range(2)]\n for agent_i in range(2):\n current_state[agent_i] = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state[agent_i] = torch.cat((torch.FloatTensor([agent_i]), current_state[agent_i]), 0)\n if np.random.rand(1) < epsilon:\n actions = [[random.randint(0, action_size - 1)] for i in range(2)]\n else:\n for agent_i in range(2):\n q_values = main_model(current_state[agent_i], batch_size=1)[0]\n actions[agent_i] = [int(torch.argmax(q_values))]\n\n observations[0], observations[1], rewards = env.step(actions[0], actions[1])\n total_reward += rewards[0]\n \n for agent_i in range(agent_num): \n one_hot_actions[agent_i] = one_hot_encoding(actions[agent_i], action_size)\n one_hot_observations[agent_i] = one_hot_encoding(observations[agent_i], observation_size)\n\n current_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state = torch.cat((torch.FloatTensor([agent_i]), current_state), 0)\n\n embedding[agent_i].extend(torch.cat((one_hot_actions[agent_i][0], one_hot_observations[agent_i][0]), 0).tolist())\n new_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n new_state = torch.cat((torch.FloatTensor([agent_i]), new_state), 0)\n memory.add_episode((current_state, actions[agent_i][0], rewards[0], new_state))\n\n\n if (total_steps % TARGET_UPDATE_FREQ) == 0:\n target_model.load_state_dict(main_model.state_dict())\n \n if (total_steps % BACK_PROP_FREQ) == 0: \n\n batch = memory.get_batch(batch_size=BATCH_SIZE)\n \n current_states = []\n local_actions = []\n local_rewards = []\n next_states = []\n\n for sample in batch:\n current_states.append(sample[0])\n local_actions.append(sample[1])\n local_rewards.append(sample[2])\n next_states.append(sample[3])\n \n current_states = torch.cat(current_states, dim=0) # [batch_size][embedding_size]\n local_actions = torch.LongTensor(local_actions)\n local_rewards = torch.FloatTensor(local_rewards) # [batch_size]\n next_states = torch.cat(next_states, dim=0)\n\n next_q_values = target_model(next_states, batch_size=BATCH_SIZE) # [batch_size][action_size]\n next_q_max_value, _ = next_q_values.detach().max(dim=1) # [batch_size]\n target_values = local_rewards + 0.75 * next_q_max_value # There should be a gamma factor\n \n q_values = main_model(current_states, batch_size=BATCH_SIZE) # [batch_size][action_size]\n current_values = torch.gather(q_values, dim=1, index=local_actions.unsqueeze(dim=1)).squeeze(dim=1)\n \n loss = criterion(current_values, target_values)\n total_loss += loss\n \n if episode <= TOTAL_EPISODES - 2000:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n reward_stat.append(total_reward)\n if episode % 100 == 99:\n print(episode, total_reward / 100, total_loss.item() / 100)\n writer.add_scalar(task + '/reward', total_reward / 100, episode)\n writer.add_scalar(task + '/loss', total_loss.item() / 100, episode)\n total_reward = 0\n total_loss = 0\n\n if epsilon > FINAL_EPSILON:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / (TOTAL_EPISODES - 4000)\n \n \ndef test():\n report = 0.0\n result = 0\n total_reward = 0\n for length in [SEQUENCE_SIZE]:\n TEST_SEQUENCE_SIZE = length\n\n for episode in range(TEST_TOTAL_EPISODES):\n env.init_environment(batch_size=1)\n embedding = [deque([0.0 for i in range((SEQUENCE_SIZE) * (action_size + observation_size))], maxlen=(SEQUENCE_SIZE) * (action_size + observation_size)) for i in range(2)]\n current_discount = 1.0\n\n for _ in range(TEST_SEQUENCE_SIZE):\n current_state = [None for i in range(2)]\n actions = [None for i in range(2)]\n for agent_i in range(2):\n current_state[agent_i] = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state[agent_i] = torch.cat((torch.FloatTensor([agent_i]), current_state[agent_i]), 0)\n for agent_i in range(2):\n q_values = main_model(current_state[agent_i], batch_size=1)[0]\n actions[agent_i] = [int(torch.argmax(q_values))]\n\n observations[0], observations[1], rewards = env.step(actions[0], actions[1])\n total_reward += rewards[0] * current_discount\n result += rewards[0] * current_discount\n for agent_i in range(agent_num): \n one_hot_actions[agent_i] = one_hot_encoding(actions[agent_i], action_size)\n one_hot_observations[agent_i] = one_hot_encoding(observations[agent_i], observation_size)\n\n current_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state = torch.cat((torch.FloatTensor([agent_i]), current_state), 0)\n\n embedding[agent_i].extend(torch.cat((one_hot_actions[agent_i][0], one_hot_observations[agent_i][0]), 0).tolist())\n new_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n new_state = torch.cat((torch.FloatTensor([agent_i]), new_state), 0)\n\n\n if episode % 100 == 99:\n print(episode, total_reward / 100)\n total_reward = 0\n\n print(result / TEST_TOTAL_EPISODES)\n file = open('PCA-E_' + task + '.txt', 'a')\n file.write(str(TEST_SEQUENCE_SIZE) + '\\t' + str(result / TEST_TOTAL_EPISODES) + '\\n')\n file.close()\n report += result / TEST_TOTAL_EPISODES\n result = 0\n\n\nif __name__ == \"__main__\":\n\n env = Environment(task + '.txt')\n\n agent_num = env.agent_num\n state_size = env.state_size\n action_size = env.action_size[0]\n observation_size = env.observation_size[0]\n input_size = observation_size + action_size\n\n writer = SummaryWriter()\n\n memory = Memory(memory_size=MEMORY_SIZE)\n main_model = Model(input_size=PCA_LENGTH + 1, hidden_size=HIDDEN_SIZE, output_size=action_size).float()\n target_model = Model(input_size=PCA_LENGTH + 1, hidden_size=HIDDEN_SIZE, output_size=action_size).float()\n\n target_model.load_state_dict(main_model.state_dict())\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(main_model.parameters(), lr=lr)\n\n one_hot_actions = [None for i in range(2)]\n observations = [None for i in range(2)]\n one_hot_observations = [None for i in range(2)]\n\n pca = fit_pca()\n\n train()\n\n test()","sub_path":"PCA-E.py","file_name":"PCA-E.py","file_ext":"py","file_size_in_byte":12746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"334070074","text":"#!/usr/bin/env python3\n\nfrom threading import Timer\nfrom random import randint\n\nfrom plugin import BasicPlugin\n\nclass Plugin(BasicPlugin):\n\t\"\"\"Oven Plugin\"\"\"\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\t\tself.name = \"oven\"\n\t\tself.priority = 0\n\t\tself.load_priority = 10\n\n\tdef finish(self):\n\t\tpass\n\n\tdef hook(self):\n\t\tself.bot.config.set_safe(\"plugins.\"+self.name, False, \"Oven all the things\")\n\t\tself.bot.config.set_safe(\"plugins.\"+self.name+\".time_min\", 7, _help=\"(int) Minimum time to wait\")\n\t\tself.bot.config.set_safe(\"plugins.\"+self.name+\".time_max\", 11, _help=\"(int) Maximum time to wait\")\n\t\treturn self.bot.config.get(\"plugins.\"+self.name)\n\n\tdef call(self, message):\n\t\tif message.command != \"PRIVMSG\":\n\t\t\treturn None\n\n\t\torigin = message.params[0] if message.params[0] != self.bot.ircsock.getnick() else message.origin()[1:]\n\n\t\tif message.params[1] == self.bot.config.get(\"command_trigger\")+\"oven\" and len(message.params) > 2:\n\t\t\tself.bot.ircsock.action(origin, \"prepares his ovens\")\n\t\t\twait_time = randint(self.bot.config.get(\"plugins.\"+self.name+\".time_min\")\n\t\t\t\t,self.bot.config.get(\"plugins.\"+self.name+\".time_max\")\n\t\t\t)\n\t\t\tt = Timer(wait_time, self.bot.ircsock.action, (origin, \"ovens {0}\".format(\" \".join(message.params[2:]))))\n\t\t\tt.daemon = True\n\t\t\tt.start()\n","sub_path":"Plugins/Oven/Oven.py","file_name":"Oven.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"40175449","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author : Lily\n@Date : 2020/6/30 13:28\n@Desc :\n\"\"\"\nfrom src.utils.logfactory import LogFactory\nfrom src.db import odps\nfrom src.conf import Config\nimport pandas as pd\nimport numpy as np\nimport traceback\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom statsmodels.tsa.arima_model import ARMA\n\nclass salemodel2:\n\n def __init__(self, date, days):\n self.logging = LogFactory()\n self.odps = odps()\n self.config = Config().config_data\n self.date = date\n self.days = days\n\n def get_dsitrib_data(self,data):\n \"\"\"\n 预测-多进程\n :return: list, list的元素为DataFrame\n \"\"\"\n try:\n data['sal_amt_clr_log'] = data['sal_amt_clr'].apply(lambda x: np.log(x) if x > 1 else 1e-10)\n # 得到具体分区的时间序列\n pool = ThreadPool(4)\n yhat = pool.starmap(self.algo_arma, zip(data.groupby(by=['distrib_code','season_code','new_line_code'])))\n pool.close()\n pool.join()\n except:\n self.logging.error(traceback.format_exc())\n return []\n return yhat\n\n def algo_arma(self, grouper):\n \"\"\"\n 时间序列算法\n :param grouper:\n :return: DataFrame\n \"\"\"\n try:\n data = grouper[1].sal_amt_clr_log.sort_index()\n model = ARMA(data,(2,1)).fit()\n yhat = model.predict(start=len(data), end=len(data) + self.days - 1)\n yhat = pd.DataFrame(yhat, columns=['yhat'])\n yhat['distrib_code'] = grouper[0][0]\n yhat['season_code'] = grouper[0][1]\n yhat['new_line_code'] = grouper[0][2]\n yhat['stat_date'] = yhat.index.to_timestamp()\n except:\n self.logging.error(\"distrib_code={0},season_code={1},new_line_code={2},arma sale predict fail!\".foramt(grouper[0][0],grouper[0][1],grouper[0][2]))\n self.logging.error(traceback.format_exc())\n return pd.DataFrame()\n self.logging.info(\"algo_time_series success!\")\n return yhat\n\n def salepredict(self, data,fest_weight,week_weight):\n \"\"\"\n 输出最终预测结果\n :return: DataFrame\n \"\"\"\n yhat = self.get_dsitrib_data(data)\n num = len(yhat)\n if num == 0:\n self.logging.error(\"salepredict fail!\")\n return pd.DataFrame(columns=['distrib_code', 'season_code', 'new_line_code', 'stat_date', 'sal_pred_arma'])\n else:\n try:\n yhat_full = pd.DataFrame(columns=['yhat','distrib_code','season_code','new_line_code','stat_date'])\n for i in range(num):\n yhat_full = pd.concat([yhat_full, yhat[i]],ignore_index=True)\n yhat_full['day_of_week'] = yhat_full['stat_date'].dt.dayofweek.values\n yhat_full['stat_date'] = yhat_full['stat_date'].dt.strftime('%Y%m%d').values\n yhat_full['yhat'] = yhat_full['yhat'].apply(lambda x: np.exp(x))\n # 乘以权重\n yhat_full = pd.merge(yhat_full, week_weight, on=['distrib_code','day_of_week'], how = 'left')\n yhat_full = pd.merge(yhat_full, fest_weight, on=['distrib_code','stat_date'], how = 'left')\n yhat_full['weight'] = yhat_full.apply(func=lambda x: x.fest_sal_weight if x.fest_sal_weight > 0 else x.sal_week_wgt,\n axis=1)\n yhat_full['sal_pred_arma'] = yhat_full['yhat'] * yhat_full['weight']\n except:\n self.logging.error(traceback.format_exc())\n return pd.DataFrame()\n self.logging.info(\"salepredict success!\")\n return yhat_full\n","sub_path":"src/sale/salemodel2.py","file_name":"salemodel2.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640250500","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB, BernoulliNB\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nclass NaiveBayesian:\n def __init__(self, x, y, type='GaussianNB', a=1.0, bina=0.0, preprocess=True):\n\n self.x = x\n self.y = y\n self.type = type\n self.a = a\n self.bina = bina\n self.preprocess = preprocess\n\n\n def construct_nbc_model(self):\n\n if (self.type=='GaussianNB'):\n self.nbc = GaussianNB()\n elif (self.type=='MultinomialNB'):\n self.nbc = MultinomialNB(alpha=self.a)\n elif (self.type=='ComplementNB'):\n self.nbc = ComplementNB(alpha=self.a)\n else:\n self.nbc = BernoulliNB(alpha=self.a, binarize=self.bina)\n if self.preprocess:\n self.Xscaler = preprocessing.StandardScaler().fit(self.x)\n self.x = self.Xscaler.transform(self.x)\n\n self.nbc.fit(self.x, self.y)\n\n def extract_nbc_samples(self, x_test):\n\n if (self.type=='BernoulliNB'):\n if self.preprocess:\n x_test = self.Xscaler.transform(x_test)\n\n return self.nbc.predict(x_test)\n\n def clfReport(self):\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.3)\n self.nbc.fit(x_train, y_train)\n pre = self.nbc.predict(x_test)\n\n return metrics.classification_report(y_test, pre) # 1d array-like\n\n\n","sub_path":"all/classification_models/naive_bayesian_model.py","file_name":"naive_bayesian_model.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"385403493","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tc', '0006_remove_testsession_state'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TestStep',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('applies_to_standard', models.BooleanField(default=True, db_index=True)),\n ('applies_to_flat', models.BooleanField(default=True, db_index=True)),\n ('applies_to_small', models.BooleanField(default=True, db_index=True)),\n ('applies_to_elongated', models.BooleanField(default=True, db_index=True)),\n ('min_weight', models.FloatField(default=0.0)),\n ('max_weight', models.FloatField(default=999999.9)),\n ('procedure', models.CharField(default=b'', max_length=20)),\n ('step', models.CharField(default=b'', max_length=20)),\n ('more_info', models.CharField(default=b'', max_length=150)),\n ('step_text', models.CharField(default=b'', max_length=200)),\n ('page_template', models.CharField(default=b'404.html', max_length=50)),\n ('skip', models.CharField(default=b'', max_length=20)),\n ('comment_required', models.BooleanField(default=False)),\n ],\n ),\n ]\n","sub_path":"tc/migrations/0007_teststep.py","file_name":"0007_teststep.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"77792914","text":"#-*- coding:utf-8 -*-\n\nfrom keras.models import load_model\nimport cv2\nimport os, sys\nimport numpy as np\nimport base64\n\n\ndef predict_face():\n\n\n\n os.system(\"autocrop -w 100 -H 100\")\n\n img = cv2.imread('./input_test.jpg')\n\n name_list = list(['김수현', '임창정', '성동일', '류승범', '오달수', '차태현', '엄태웅', '김건모', '변희봉', '문재인', '이정재', '유노윤호',\n '김윤석', '강호동', '서강준', '정우성', '박지성', '박명수', '지창욱', 'mc몽', '현빈', '허경환', '김장훈', '김무성',\n '이덕화', '안성기', '이한위', '신하균', '유승호', '조진웅', '권지용', '김수로', '이혁재', '김현중', '하하', '유해진',\n '김병만', '차인표', '서인국', '김광규', '유세윤', '송중기', '공유', '장혁', '지코', '장근석', '전현무', '최일화',\n '전인권', '안정환', '황정민', '유상무', '최민식', '윤계상', '손흥민', '마동석', '임재범', '설경구', '지성', '조세호',\n '조권', '이승윤', '이병헌', '안철수', '김흥국', '조승우', '박철민', '장동건', '박재범', '신현준', '박진영', '김범수',\n '이승기', '곽한구', '싸이', '양세형', '송강호', '홍준표', '한석규', '유아인', '김종민', '민경훈', '장동민', '유재석',\n '류승룡', '고수', '정준영', '이민호', '장기하', '은지원', '김희철', '전진', '권상우', '조현우', '강동원', '이진욱',\n '서장훈', '노홍철', '하정우', '이수근', '장병규', '원빈', '차승원', '박영규', '이명박', '진구', '유희열', '정준하',\n '박근형', '박중훈', '이적', '윤종신', '김준현', '이순재'])\n\n print(len(name_list))\n\n\n model = load_model('model_keras.h5')\n\n\n\n if img is not None:\n img = cv2.resize(img, (100,100), interpolation=cv2.INTER_CUBIC)\n img = np.reshape(img,[1,100,100,3])\n\n classes = model.predict_classes(img)\n\n print(classes)\n\n print(name_list[classes[0]])\n\n return name_list[classes[0]]\n","sub_path":"image_and_dcinside/web/mysite/face_search.py","file_name":"face_search.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465247938","text":"\"\"\"\nFrom: https://gist.github.com/fpom/92a690a8cf89cebd7d4a\n\"\"\"\nimport os\nimport collections\nimport gevent\nfrom gevent import pool\nfrom gevent import queue\n\n\nevent = collections.namedtuple(\"event\", [\"name\", \"path\", \"isdir\"])\n\n\nclass GeventObserver(object):\n def __init__(self, root):\n self.root = os.path.abspath(root)\n self.pool = pool.Pool()\n self.q = queue.Queue()\n self.w = {}\n self.add(root, \"crawl\")\n\n def get(self):\n return self.q.get()\n\n def add(self, path, evt=\"created\"):\n if os.path.isdir(path):\n for name in os.listdir(path):\n self.add(os.path.join(path, name), evt)\n self.pool.spawn(self.watch, path)\n self.q.put(event(evt, path, os.path.isdir(path)))\n\n def watch(self, path):\n hub = gevent.get_hub()\n watcher = hub.loop.stat(path, 1)\n self.w[path] = watcher\n isdir = os.path.isdir(path)\n if isdir:\n old = set(os.listdir(path))\n while path in self.w:\n try:\n with gevent.Timeout(2):\n hub.wait(watcher)\n except: # noqa: E722\n continue\n if os.path.isdir(path):\n new = set(os.listdir(path))\n for name in new - old:\n self.add(os.path.join(path, name))\n old = new\n elif os.path.exists(path):\n self.q.put(event(\"modified\", path, isdir))\n else:\n break\n if isdir:\n for name in old:\n self.w.pop(os.path.join(path, name), None)\n self.w.pop(path, None)\n self.q.put(event(\"delete\", path, isdir))\n\n\nclass DirWatcher(object):\n _thread = None\n\n def __init__(self, path_to_watch, callback, observer_class=None):\n self.observer = GeventObserver(path_to_watch)\n self.callback = callback\n\n def _run(self):\n while True:\n event = self.observer.get()\n self.callback(event.path, event.name)\n gevent.sleep(0)\n\n def start(self):\n if self._thread is not None:\n raise ValueError(\"Cannot start if already started\")\n self._thread = gevent.spawn(self._run)\n\n def stop(self):\n self._thread.kill()\n","sub_path":"populus/utils/observers/observers_gevent.py","file_name":"observers_gevent.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"397871298","text":"from os.path import basename\n\ndef error(function_name, msg):\n \"\"\"\n :param function_name: String name of the function the error occurred in\n :param msg: String that explains the error\n :return String: Information regarding a fatal Exception\n \"\"\"\n\n return \"[ERROR] {0} - {1}: {2}\".format(basename(__file__), function_name, msg)\n\n\ndef warning(function_name, msg):\n \"\"\"\n :param function_name: String name of the function the problem occurred in\n :param msg: String that explains the warning\n :return String: Information regarding a raised Exception\n \"\"\"\n\n return \"[WARNING] {0} - {1}: {2}\".format(basename(__file__), function_name, msg)\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self._root_node = None\n self._head_node = None\n self._node_count = 0\n\n self._current = None\n\n class SinglyLinkedListNode:\n _data = None\n _key = None\n _next = None\n\n def __init__(self, data, key, head):\n self._data = data\n self._key = key\n self._next = head\n\n def __lt__(self, other):\n return self._data < other.get_data()\n\n def get_data(self):\n return self._data\n\n def set_data(self, _data):\n self._data = _data\n\n def get_key(self):\n return self._key\n\n def set_key(self, _key):\n self._key = _key\n\n def get_next(self):\n return self._next\n\n def set_next(self, _next):\n self._next = _next\n\n class SinglyLinkedListIterator:\n def __init__(self, node):\n self.node = node\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.node is None:\n raise StopIteration\n\n else:\n current_node = self.node\n self.node = self.node.get_next()\n\n return current_node\n\n def __del__(self):\n del self\n\n def __hash__(self):\n return hash(repr(self))\n\n def __iter__(self):\n return self.SinglyLinkedListIterator(self._root_node)\n\n def __len__(self):\n return self._node_count\n\n def __new_node(self, data, key, child):\n # Update node counter\n self._node_count += 1\n return self.SinglyLinkedListNode(data, key, child)\n\n def __remove_node(self, remove):\n # Removal of first node\n if remove == self._root_node:\n self._root_node = remove.get_next()\n\n else:\n # Iterate up to node between start and end\n node = None\n for node in self:\n if node.get_next() == remove:\n break\n\n # Midway removal\n if node != self._head_node:\n node.set_next(remove.get_next())\n\n # End removal\n else:\n self._head_node = node\n\n self._node_count -= 1\n return 0\n\n def size(self):\n \"\"\"\n :return int: The amount of nodes with data and keys\n \"\"\"\n return self._node_count\n\n def push(self, data, key):\n \"\"\"\n Pushes data and a key to the end of the Linked List\n\n :param data: Data to be inserted\n :param key: Key to be inserted\n :return None:\n\n :except\n \"\"\"\n if data is None:\n raise ValueError(\n error(self.push.__name__, \"Data passed is None\")\n )\n\n # Allocate memory for the new node\n new_node = self.__new_node(data, key, None)\n\n # If we already have a node inserted, insert the new one in front of it\n if self._head_node is not None:\n self._head_node._next = new_node\n\n # Point to the new head\n self._head_node = new_node\n\n # First allocation, set the root node of this list\n if self._root_node is None:\n self._root_node = self._head_node\n self._current = self._root_node\n\n def push_start(self, data, key):\n \"\"\"\n Pushes data and a key to the front of the Linked List\n\n :param data: Data to be inserted\n :param key: Key to be inserted\n :return None:\n\n :exception ValueError: Invalid data (None)\n \"\"\"\n # Unusuable data\n if data is None:\n raise ValueError(\n error(self.push_start.__name__, \"Data passed is None\")\n )\n\n # First allocation\n if self.size() == 0:\n self.push(data, key)\n\n else:\n old_start = self._root_node\n new_node = self.__new_node(data, key, old_start)\n\n self._root_node = new_node\n self._current = self._root_node\n\n def push_at(self, data, key, offset):\n \"\"\"\n Pushes data and a key to a given index of the Linked list\n\n :param data: Data to be inserted\n :param key: Key to be inserted\n :param offset: The index of the node the other parameters are to be inserted at\n :return None:\n\n :exception ValueError: Invalid data (None)\n :exception IndexError: Invalid index (either smaller than 0 or larger than the List's size)\n \"\"\"\n # Unusuable data\n if data is None:\n raise ValueError(\n error(self.push_at.__name__, \"Data passed is None\"))\n\n # Out of bounds\n if offset < 1:\n raise IndexError(\n error(self.push_at.__name__, \"The given offset {0} is smaller than 1\".format(offset))\n )\n\n # Same as insert at start\n elif offset == 1:\n return self.push_start(data, key)\n\n # Same as simple push\n elif offset == self._node_count + 1:\n return self.push(data, key)\n\n # Out of bounds\n elif offset > self._node_count:\n raise IndexError(\n error(self.push_at.__name__,\n \"The given offset {0} is greater than the Singly Linked List's size {1}\"\n .format(offset, self.size()))\n )\n\n # Insertion in the middle of the list\n node = self._root_node\n parent = None\n for _ in xrange(1, offset):\n parent = node\n node = node.get_next()\n\n new_node = self.__new_node(data, key, node)\n parent._next = new_node\n\n return 0\n\n def set(self, data, key, offset):\n \"\"\"\n Modifies a node's data and key, at the given offset\n\n :param data: Data to be inserted\n :param key: Key to be inserted\n :param offset: The index of the node whose data and key are to be modified\n :return None:\n\n :exception ValueError: Invalid data (None)\n :exception RuntimeError:The List is empty i.e. there is nothing to modify\n :exception IndexError: Invalid index (either smaller than 0 or larger than the List's size)\n \"\"\"\n # Unusuable data\n if data is None:\n raise ValueError(\n error(self.set.__name__, \"Data passed is None\")\n )\n\n # Empty List\n if self.size() == 0:\n raise RuntimeError(\n error(self.set.__name__, \"Attempted to modify data in an empty Singly Linked List\")\n )\n\n # Out of bounds\n if offset > self._node_count:\n raise IndexError(\n error(self.set.__name__, \n \"The given offset {0} is greater than the Singly Linked List's size {1}\"\n .format(offset, self._node_count))\n )\n\n # Out of bounds\n if offset < 1:\n raise IndexError(\n error(self.push_at.__name__, \"The given offset {0} is smaller than 1\"\n .format(offset))\n )\n\n node = self._root_node\n for _ in xrange(1, offset):\n node = node.get_next()\n\n if node.get_data() != data:\n node.set_data(data)\n\n if node.get_key() != key:\n node.set_key(key)\n\n def lookup_wkey(self, key):\n \"\"\"\n Searches for a node with the given key\n\n :param key: The key whose node is to be found\n :return None: The node wasn't found\n :return node: The first occurrence of the node with the given key\n\n :exception ValueError: Generic key (None) cannot be used for searching\n :exception RuntimeError:The List is empty i.e. there is nothing to lookup\n \"\"\"\n # None is a generic key\n if key is None:\n raise ValueError(\n error(self.lookup_wkey.__name__, \"Cannot use a generic key (None) to search for data\")\n )\n\n # Empty Linked List\n if self.size() == 0:\n raise RuntimeError(\n error(self.lookup_wkey.__name__, \"Cannot search for data in an empty Singly Linked List\")\n )\n\n # Search for node\n for node in self:\n if node.get_key() == key:\n return node\n\n return None\n\n def lookup_wdata(self, data):\n \"\"\"\n Searches for a node with the given data\n\n :param data: The data whose node is to be found\n :return None: The node wasn't found\n :return node: The first occurrence of the node with the given data\n\n :exception ValueError: Invalid data (None) does not exist within the List\n :exception RuntimeError:The List is empty i.e. there is nothing to lookup\n \"\"\"\n if data is None:\n raise ValueError(\n error(self.lookup_wdata.__name__, \n \"Cannot use a invalid data (None) to search for data\")\n )\n\n # Empty Linked List\n if self.size() == 0:\n raise RuntimeError(\n error(self.lookup_wkey.__name__, \n \"Cannot search for data in an empty Singly Linked List\")\n )\n\n # Search for node\n for node in self:\n if node.get_data() == data:\n return node\n\n return None\n\n def remove_wkey(self, key):\n \"\"\"\n Removes a node with the given key\n\n :param key: The key whose node is to be removed\n :return None: The given key was not found, nothing has changed\n :return data: The data of the node that was removed\n \"\"\"\n remove = self.lookup_wkey(key)\n if remove is None:\n print(warning(self.remove_wkey.__name__,\n \"The given key is not present in the Singly Linked List\"))\n return None\n\n data = remove.get_data()\n self.__remove_node(remove)\n\n return data\n\n def remove_wdata(self, data):\n \"\"\"\n Removes a node with the given data\n\n :param data: The data whose node is to be removed\n :return None: The given data was not found, nothing has changed\n :return data: The data of the node that was removed\n \"\"\"\n remove = self.lookup_wdata(data)\n if remove is None:\n print(warning(self.remove_wdata.__name__,\n \"The given data is not present in the Singly Linked List\"))\n return None\n\n self.__remove_node(remove)\n\n return data\n\n def collect_data(self):\n \"\"\"\n Collects all the data in the Linked List\n\n :return List: Contains the data of every node in the Linked List\n \"\"\"\n return [node.get_data() for node in self]\n\n def collect_keys(self):\n \"\"\"\n Collects all the key in the Linked List\n\n :return List: Contains the key of every node in the Linked List\n \"\"\"\n return [node.get_key() for node in self]\n\n def collect_all(self):\n \"\"\"\n Collects all the data and keys in the Linked List\n\n :return List: Contains the data and key of every node in the Linked List\n \"\"\"\n return [(node.get_data(), node.get_key()) for node in self]\n\n def sort(self):\n raise NotImplementedError\n\n def inorder(self):\n \"\"\"\n Prints the contents of the Linked List\n\n :return None:\n \"\"\"\n print(\" ->\"),\n for node in self:\n print(\"{0}, {1} ->\".format(node.get_data(), node.get_key())),\n\n print(\"\")\n","sub_path":"Python/Linked-Lists/Singly-Linked-List/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":12537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"518623110","text":"from django.conf.urls import url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom . import views\nurlpatterns = [\n # Examples:\n # url(r'^$', 'ywzy.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url (r'^$', views.index,name='index'),\n url (r'^search_candidate/?$',views.searchCandidate,name='search_candidate'),\n url (r'^search_job/?$',views.searchJob,name='search_job'),\n\n url(r'^post_job/$',views.postJob,name='post_job'),\n url(r'^modify_job/(?P[0-9]+)/?$',views.modifyJob,name='modify_job'),\n url(r'^posted_job/?$',views.postedJob,name='posted_job'),\n url(r'^job_details/(?P[0-9]+)/?$',views.jobDetails,name='job_details'),\n url(r'^jobseeker/(?P[0-9]+)/?$',views.jobseeker,name='jobseeker'),\n url(r'^employer/(?P[0-9]+)/?$',views.employer,name='employer'),\n url(r'^applied_job/?$',views.appliedJob,name='applied_job'),\n url(r'^billboard/?$',views.billboard,name='billboard'),\n url(r'^403/?',views.permission_required,name='403'),\n url(r'^a/$',views.a,name=\"a\")\n]","sub_path":"jobsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"629149791","text":"# 🚨 Don't change the code below 👇\nheight = input(\"enter your height in m: \")\nweight = input(\"enter your weight in kg: \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\n\nheight = float(height.replace(',', '.'))\nweight = float(weight.replace(',', '.'))\n\nbmi = weight / height**2\n\nprint(int(round(bmi)))\n","sub_path":"day02/bmiCalculator.py","file_name":"bmiCalculator.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229601976","text":"\nfrom Tkinter import *\n\n#create window\nroot = Tk()\n\n#scale the window\nroot.title(\"Weather App\")\nroot.geometry(\"500x300\")\n\napp = Frame(root)\napp.grid()\nlabel = Label(app, text = \"this is a test\")\nlabel.grid()\n\nbutton1 = Button(app, text = \"This is a Button\")\nbutton1.grid(row=3, column=2)\n\nbutton2 = Button(app)\nbutton2.grid()\nbutton2.configure(text =\"this will show text\")\n\nbutton3 = Button(app)\nbutton3.grid()\n\nbutton3[\"text\"] = \"This will show up as well.\"\n\n#kick off the event loop\nroot.mainloop()\n","sub_path":"Python/GUI/GUI Tkinter example.py","file_name":"GUI Tkinter example.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650188353","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated by server on 14-8-12下午2:17.\n\"\"\"\nfrom app.proto_file.feast_pb2 import EatFeastResponse, GetEatTimeResponse\nfrom gfirefly.server.globalobject import remoteserviceHandle\nfrom shared.db_opear.configs_data import game_configs\nimport time\nfrom shared.tlog import tlog_action\n\n\n@remoteserviceHandle('gate')\ndef feast_820(pro_data, player):\n \"\"\"美味酒席\n \"\"\"\n response = EatFeastResponse()\n res = eat_feast(player)\n response.res = res\n return response.SerializeToString()\n\n\n@remoteserviceHandle('gate')\ndef get_eat_time_821(pro_data, player):\n \"\"\"获取上次吃的时间\n \"\"\"\n response = GetEatTimeResponse()\n last_eat_time = player.feast.last_eat_time\n response.res.result = True\n response.eat_time = last_eat_time\n return response.SerializeToString()\n\n\ndef eat_feast(player):\n \"\"\" 吃 \"\"\"\n \"\"\"\n # (tm_year=2014, tm_mon=9, tm_mday=1, tm_hour=18, tm_min=38, tm_sec=1, tm_wday=0, tm_yday=244, tm_isdst=0)\n last_eat_time = time.localtime(player.feast.last_eat_time).tm_hour*60*60 + \\\n time.localtime(player.feast.last_eat_time).tm_min*60 + time.localtime(player.feast.last_eat_time).tm_sec\n eat_times = game_configs.base_config.get(u'time_vigor_activity')\n now = time.localtime().tm_hour*60*60 + time.localtime().tm_min*60 + time.localtime().tm_sec\n for eat_time in eat_times:\n t1 = eat_time[0].split(':')\n time1 = int(t1[0])*60*60 + int(t1[1])*60\n t2 = eat_time[1].split(':')\n time2 = int(t2[0])*60*60 + int(t2[1])*60\n if time2 >= now >= time1:\n if time2 >= last_eat_time >= time1:\n # 已经吃过\n return 1\n # 吃\n player.stamina.stamina += game_configs.base_config.get(u'num_vigor_activity')\n player.stamina.save_data()\n player.feast.last_eat_time = int(time.time())\n player.feast.save_data()\n return 2\n # 没到时间\n return 3\n \"\"\"\n\n last_eat_time = player.feast.last_eat_time\n now = int(time.time())\n eat_times = game_configs.base_config.get(u'time_vigor_activity')\n t = time.localtime(now)\n for eat_time in eat_times:\n time1 = time.mktime(time.strptime(time.strftime('%Y-%m-%d '+eat_time[0]+':00', t), '%Y-%m-%d %H:%M:%S'))\n time2 = time.mktime(time.strptime(time.strftime('%Y-%m-%d '+eat_time[1]+':00', t), '%Y-%m-%d %H:%M:%S'))\n if time2 >= now >= time1:\n if time2 >= last_eat_time >= time1:\n # 已经吃过\n return 1\n # 吃\n player.stamina.stamina += game_configs.base_config.get(u'num_vigor_activity')\n player.stamina.save_data()\n player.feast.last_eat_time = int(time.time())\n player.feast.save_data()\n tlog_action.log('Feast', player, player.stamina.stamina)\n return 2\n # 没到时间\n return 3\n","sub_path":"app/game/action/node/feast.py","file_name":"feast.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"334380123","text":"# -*- coding: utf8 -*-\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\nimport numpy as np\nimport glob\nimport re\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nimport random\nimport gensim\nimport logging\nfrom pymystem3 import Mystem\n\n\ndef get_pos_for_semvector(mystem_pos):\n mystemtag_pos_dict = {'S,': '_NOUN', 'S=': '_NOUN',\n 'A,': '_ADJ', 'A=': '_ADJ',\n 'ADV,': '_ADV', 'ADV=': '_ADV',\n 'V,': '_VERB', 'V=': '_VERB'}\n for tag in mystemtag_pos_dict:\n if mystem_pos.startswith(tag):\n pos = mystemtag_pos_dict[tag]\n return pos\n\nm = Mystem()\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nncrl_model = 'ruscorpora_upos_skipgram_300_5_2018.vec'\nncrl_model = gensim.models.KeyedVectors.load_word2vec_format(ncrl_model, binary=False)\nncrl_model.init_sims(replace=True)\n\n\ndef display_topics(model, feature_names, no_top_words, n_topics):\n \"\"\"Displays all topics' top-words and semdensity per topic\"\"\"\n all_topics_topwords_similarity = list()\n no_top_words_for_semantics = 10\n for topic_idx, topic in enumerate(model.components_):\n print(\"Topic {}:\".format(topic_idx))\n print(\", \".join([feature_names[i]\n for i in topic.argsort()[:-no_top_words - 1:-1]]))\n topwords = [feature_names[i] for i in topic.argsort()[:-no_top_words_for_semantics - 1:-1]]\n topwords_similarity = list()\n not_in_model = 0\n for word1 in topwords:\n pos1 = m.analyze(word1)[0]['analysis'][0]['gr']\n pos1 = get_pos_for_semvector(pos1)\n word1 = word1 + pos1\n for word2 in topwords:\n pos2 = m.analyze(word2)[0]['analysis'][0]['gr']\n pos2 = get_pos_for_semvector(pos2)\n word2 = word2 + pos2\n if word1 in ncrl_model and word2 in ncrl_model and word1 != word2:\n word1_word2_similarity = ncrl_model.similarity(word1, word2)\n else:\n word1_word2_similarity = 0\n not_in_model += 1\n topwords_similarity.append(word1_word2_similarity)\n topwords_similarity = sum(topwords_similarity)/((no_top_words_for_semantics-1)**2 - not_in_model)\n print(topwords_similarity)\n all_topics_topwords_similarity.append(topwords_similarity)\n print('\\nMean topics semantic similarity for {0} topics is {1}'.\n format(n_topics, np.mean(all_topics_topwords_similarity)))\n return np.mean(all_topics_topwords_similarity)\n\n\ndef grey_color_func(word, font_size, position, orientation, random_state=None,\n **kwargs):\n \"\"\"Establishes colour range for word-clouds\"\"\"\n return \"hsl(0, 0%%, %d%%)\" % random.randint(0, 30)\n\n\ndef display_wordclouds(model, feature_names, no_top_words, n_topics):\n \"\"\"Displays word-clouds for n topics' top-words\"\"\"\n top_words_weight_dicts = list()\n for topic_idx, topic in enumerate(model.components_):\n top_words_weight_dict = dict()\n for i in topic.argsort()[:-no_top_words - 1:-1]:\n top_words_weight_dict[feature_names[i]] = model.components_[topic_idx][i]\n top_words_weight_dicts.append(top_words_weight_dict)\n for t in range(n_topics):\n plt.figure()\n plt.imshow(WordCloud(background_color='white', color_func=grey_color_func).fit_words(top_words_weight_dicts[t]))\n plt.axis(\"off\")\n plt.title(\"Topic #\" + str(t))\n plt.show()\n\n# Opening a stop-words list for Russian\nstopwords_ru = open('./stopwords_and_others/stop_ru.txt', 'r', encoding='utf-8').read().split('\\n')\n\n# Determining train texts path (txt-files)\ntrain_texts_path = '/Users/IrinaPavlova/Desktop/Uni/Бакалавриат/2015-2016/' \\\n 'Programming/github desktop/RusDraCor/Ira_Scripts/' \\\n 'TopicModelling/rusdracor_topic_modeling/corpora/' \\\n 'speech_corpus_no_prop_char_names_ONLY_NOUNS/byplay/byplay/'\n\ntrain_documents = list()\ntrain_documents_titles = list()\nall_train_texts = glob.glob(train_texts_path+'*.txt')\n\n# Splitting train texts into word-chunks\nn = 0\nk = 0\nchunk_size = 500\nmin_chunk_size = 100\nfor doc in all_train_texts:\n train_documents_titles.append(doc.split('/')[-1].split('.txt')[0])\n doc_text = re.sub('[\\.,!\\?\\(\\)\\-:;—…́«»–]', '', open(doc, 'r', encoding='utf-8').read()).split()\n for i in range(0, len(doc_text), chunk_size):\n one_chunk = ' '.join(doc_text[i:i + chunk_size])\n if len(one_chunk.split()) > min_chunk_size:\n train_documents.append(one_chunk)\n if min_chunk_size < len(one_chunk.split()) < chunk_size:\n k += 1\n if len(one_chunk.split()) < min_chunk_size:\n n += 1\nprint('Taking chunks of length {0} WORDS'.format(chunk_size))\nprint('Chunks with length less than {0} (did not take):'.format(min_chunk_size), n)\nprint('Chunks with length more than {0} and less than {1} (took):'.format(min_chunk_size, chunk_size), k)\n\n\n# Reporting statistics on the model\nprint('\\nTopic modeling train text collection size: ', len(train_documents))\nprint('Median length of train collection\\'s documents: ', np.median([len(d.split()) for d in train_documents]))\nprint('Mean length of train collection\\'s documents: ', np.mean([len(d.split()) for d in train_documents]))\nprint('Minimum length of train collection\\'s documents: ', np.min([len(d.split()) for d in train_documents]))\nprint('Maximum length of train collection\\'s documents: ', np.max([len(d.split()) for d in train_documents]))\n\nwrite_semdensity = open('/Users/IrinaPavlova/Desktop/Uni/Бакалавриат/2015-2016/'\n 'Programming/github desktop/RusDraCor/Ira_Scripts/'\n 'TopicModelling/rusdracor_topic_modeling/graphs_6_topics/semdensity_2/Only Nouns 500-100.csv', 'w',\n encoding='utf-8')\nwrite_semdensity.write('numtopics;average_topic_semdensity_for_10_topwords;model\\n')\n\n\ndef run_TM(n_topics, doprint):\n \"\"\"Performs Topic Modeling, present topics and return/print/write in a file model's application results\"\"\"\n n_topics = n_topics\n no_top_words = 40\n\n tf_vectorizer = CountVectorizer(max_df=0.7,\n min_df=0.2,\n stop_words=stopwords_ru,\n max_features=500)\n tf = tf_vectorizer.fit_transform(train_documents)\n tf_feature_names = tf_vectorizer.get_feature_names()\n\n lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=100, random_state=42)\n lda_doc_topic = lda.fit_transform(tf)\n\n # Printing topics' 40 top-words, printing topics', semdensity oer topic,\n # displaying word-clouds for 100 topics' top-words if needed\n if doprint:\n print('LDA doc-topic shape:', lda_doc_topic.shape)\n print('\\nTOPICS\\nLDA top terms:')\n display_topics(lda, tf_feature_names, no_top_words, n_topics)\n print('\\n\\n')\n # display_wordclouds(lda, tf_feature_names, 100, n_topics)\n model = 'Only Nouns 500-100'\n mean_semdensity = display_topics(lda, tf_feature_names, no_top_words, n_topics)\n write_semdensity.write(str(n_topics)+';'+str(mean_semdensity)+';'+model+'\\n')\n\n print('The TM is finished, the model is applied to the data, '\n 'the semdensity per topic is calculated.')\n\n# Running topic modeling task to build a model with 5 topics\nfor t in range(4, 11):\n run_TM(t, 1)","sub_path":"semantic_vectors.py","file_name":"semantic_vectors.py","file_ext":"py","file_size_in_byte":7620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"534440090","text":"__all__ = (\n 'PureUtils',\n)\n\nimport re\nfrom typing import Any, Tuple\n\n\nclass PureUtils:\n \"\"\"与疫情上报网站无关的工具函数。可以将同一个本类的实例注入到多个类中。\"\"\"\n\n @staticmethod\n def is_number_data_in_range(data: Any, range: Tuple[int, int]) -> bool:\n \"\"\"\n 检查 data 所表示的十进制数字是否在 range 所表示��前闭后开区间内。\n :param data: 数字,或可转换为数字的类型\n :param range: 前闭后开区间\n :return: True/False 表示是/否在区间内\n \"\"\"\n if isinstance(data, int):\n int_data = data\n else:\n try:\n int_data = int(data)\n except:\n return False\n\n return range[0] <= int_data < range[1]\n\n @staticmethod\n def match_re_group1(re_str: str, text: str) -> str:\n \"\"\"\n 在 text 中匹配正则表达式 re_str,返回第 1 个捕获组(即首个用括号包住的捕获组)\n :param re_str: 正则表达式(字符串)\n :param text: 要被匹配的文本\n :return: 第 1 个捕获组捕获到的内容(字符串)\n \"\"\"\n match = re.search(re_str, text)\n if match is None:\n raise ValueError(f'在文本中匹配 {re_str} 失败,没找到任何东西。\\n请阅读脚本文档中的「使用前提」部分。')\n\n return match.group(1)\n\n @staticmethod\n def looks_falsy(x: Any) -> bool:\n \"\"\"\n 判断传入的参数是否看起来像是 False。\n 参考:https://dorey.github.io/JavaScript-Equality-Table/\n\n 使用例:\n >>> PureUtils.looks_falsy('false')\n True\n >>> PureUtils.looks_falsy('1')\n False\n >>> PureUtils.looks_falsy(dict())\n True\n >>> PureUtils.looks_falsy([0])\n True\n\n :param x: 几乎任意参数\n :return: 如果看起来像 False,则返回 True\n \"\"\"\n FALSY_OBJECTS = (\n '0', 'false', 'False', [0],\n )\n\n if isinstance(x, str):\n x = x.lower().strip()\n\n if bool(x) == False:\n return True\n for i in FALSY_OBJECTS:\n if x == i:\n return True\n\n return False\n\n @staticmethod\n def looks_truthy(x: Any) -> bool:\n \"\"\"\n 判断传入的参数是否看起来像是 False。\n 参考:https://dorey.github.io/JavaScript-Equality-Table/\n\n 使用例:\n >>> PureUtils.looks_truthy('false')\n False\n >>> PureUtils.looks_truthy('1')\n True\n >>> PureUtils.looks_truthy(dict())\n False\n >>> PureUtils.looks_truthy([0])\n False\n\n :param x: 几乎任意参数\n :return: 如果看起来像 True,则返回 True\n \"\"\"\n return not PureUtils.looks_falsy(x)\n","sub_path":"bupt_ncov_report/pure_utils/pure_utils.py","file_name":"pure_utils.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415373159","text":"import argparse\nimport datetime\n\n# import glob\nimport os\nimport random\nimport warnings\n\n# from copy import deepcopy\nfrom functools import partial\n\nimport colorednoise as cn\nimport librosa\nimport numpy as np\nimport pandas as pd\nimport pytorch_lightning as pl\nimport scipy as sp\nimport soundfile as sf\nimport timm\nimport torch\nimport torch.optim as optim\nfrom pytorch_lightning import LightningDataModule, callbacks\n\n# from pytorch_lightning.utilities import rank_zero_info\nfrom sklearn.metrics import f1_score\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchaudio.transforms import AmplitudeToDB, MelSpectrogram\n\nwarnings.simplefilter(\"ignore\")\n\n\ntrain_df = pd.read_csv(\"../../input/birdclef-2021/train_metadata_new.csv\")\ntarget_columns = [\n \"acafly\",\n \"acowoo\",\n \"aldfly\",\n \"ameavo\",\n \"amecro\",\n \"amegfi\",\n \"amekes\",\n \"amepip\",\n \"amered\",\n \"amerob\",\n \"amewig\",\n \"amtspa\",\n \"andsol1\",\n \"annhum\",\n \"astfly\",\n \"azaspi1\",\n \"babwar\",\n \"baleag\",\n \"balori\",\n \"banana\",\n \"banswa\",\n \"banwre1\",\n \"barant1\",\n \"barswa\",\n \"batpig1\",\n \"bawswa1\",\n \"bawwar\",\n \"baywre1\",\n \"bbwduc\",\n \"bcnher\",\n \"belkin1\",\n \"belvir\",\n \"bewwre\",\n \"bkbmag1\",\n \"bkbplo\",\n \"bkbwar\",\n \"bkcchi\",\n \"bkhgro\",\n \"bkmtou1\",\n \"bknsti\",\n \"blbgra1\",\n \"blbthr1\",\n \"blcjay1\",\n \"blctan1\",\n \"blhpar1\",\n \"blkpho\",\n \"blsspa1\",\n \"blugrb1\",\n \"blujay\",\n \"bncfly\",\n \"bnhcow\",\n \"bobfly1\",\n \"bongul\",\n \"botgra\",\n \"brbmot1\",\n \"brbsol1\",\n \"brcvir1\",\n \"brebla\",\n \"brncre\",\n \"brnjay\",\n \"brnthr\",\n \"brratt1\",\n \"brwhaw\",\n \"brwpar1\",\n \"btbwar\",\n \"btnwar\",\n \"btywar\",\n \"bucmot2\",\n \"buggna\",\n \"bugtan\",\n \"buhvir\",\n \"bulori\",\n \"burwar1\",\n \"bushti\",\n \"butsal1\",\n \"buwtea\",\n \"cacgoo1\",\n \"cacwre\",\n \"calqua\",\n \"caltow\",\n \"cangoo\",\n \"canwar\",\n \"carchi\",\n \"carwre\",\n \"casfin\",\n \"caskin\",\n \"caster1\",\n \"casvir\",\n \"categr\",\n \"ccbfin\",\n \"cedwax\",\n \"chbant1\",\n \"chbchi\",\n \"chbwre1\",\n \"chcant2\",\n \"chispa\",\n \"chswar\",\n \"cinfly2\",\n \"clanut\",\n \"clcrob\",\n \"cliswa\",\n \"cobtan1\",\n \"cocwoo1\",\n \"cogdov\",\n \"colcha1\",\n \"coltro1\",\n \"comgol\",\n \"comgra\",\n \"comloo\",\n \"commer\",\n \"compau\",\n \"compot1\",\n \"comrav\",\n \"comyel\",\n \"coohaw\",\n \"cotfly1\",\n \"cowscj1\",\n \"cregua1\",\n \"creoro1\",\n \"crfpar\",\n \"cubthr\",\n \"daejun\",\n \"dowwoo\",\n \"ducfly\",\n \"dusfly\",\n \"easblu\",\n \"easkin\",\n \"easmea\",\n \"easpho\",\n \"eastow\",\n \"eawpew\",\n \"eletro\",\n \"eucdov\",\n \"eursta\",\n \"fepowl\",\n \"fiespa\",\n \"flrtan1\",\n \"foxspa\",\n \"gadwal\",\n \"gamqua\",\n \"gartro1\",\n \"gbbgul\",\n \"gbwwre1\",\n \"gcrwar\",\n \"gilwoo\",\n \"gnttow\",\n \"gnwtea\",\n \"gocfly1\",\n \"gockin\",\n \"gocspa\",\n \"goftyr1\",\n \"gohque1\",\n \"goowoo1\",\n \"grasal1\",\n \"grbani\",\n \"grbher3\",\n \"grcfly\",\n \"greegr\",\n \"grekis\",\n \"grepew\",\n \"grethr1\",\n \"gretin1\",\n \"greyel\",\n \"grhcha1\",\n \"grhowl\",\n \"grnher\",\n \"grnjay\",\n \"grtgra\",\n \"grycat\",\n \"gryhaw2\",\n \"gwfgoo\",\n \"haiwoo\",\n \"heptan\",\n \"hergul\",\n \"herthr\",\n \"herwar\",\n \"higmot1\",\n \"hofwoo1\",\n \"houfin\",\n \"houspa\",\n \"houwre\",\n \"hutvir\",\n \"incdov\",\n \"indbun\",\n \"kebtou1\",\n \"killde\",\n \"labwoo\",\n \"larspa\",\n \"laufal1\",\n \"laugul\",\n \"lazbun\",\n \"leafly\",\n \"leasan\",\n \"lesgol\",\n \"lesgre1\",\n \"lesvio1\",\n \"linspa\",\n \"linwoo1\",\n \"littin1\",\n \"lobdow\",\n \"lobgna5\",\n \"logshr\",\n \"lotduc\",\n \"lotman1\",\n \"lucwar\",\n \"macwar\",\n \"magwar\",\n \"mallar3\",\n \"marwre\",\n \"mastro1\",\n \"meapar\",\n \"melbla1\",\n \"monoro1\",\n \"mouchi\",\n \"moudov\",\n \"mouela1\",\n \"mouqua\",\n \"mouwar\",\n \"mutswa\",\n \"naswar\",\n \"norcar\",\n \"norfli\",\n \"normoc\",\n \"norpar\",\n \"norsho\",\n \"norwat\",\n \"nrwswa\",\n \"nutwoo\",\n \"oaktit\",\n \"obnthr1\",\n \"ocbfly1\",\n \"oliwoo1\",\n \"olsfly\",\n \"orbeup1\",\n \"orbspa1\",\n \"orcpar\",\n \"orcwar\",\n \"orfpar\",\n \"osprey\",\n \"ovenbi1\",\n \"pabspi1\",\n \"paltan1\",\n \"palwar\",\n \"pasfly\",\n \"pavpig2\",\n \"phivir\",\n \"pibgre\",\n \"pilwoo\",\n \"pinsis\",\n \"pirfly1\",\n \"plawre1\",\n \"plaxen1\",\n \"plsvir\",\n \"plupig2\",\n \"prowar\",\n \"purfin\",\n \"purgal2\",\n \"putfru1\",\n \"pygnut\",\n \"rawwre1\",\n \"rcatan1\",\n \"rebnut\",\n \"rebsap\",\n \"rebwoo\",\n \"redcro\",\n \"reevir1\",\n \"rehbar1\",\n \"relpar\",\n \"reshaw\",\n \"rethaw\",\n \"rewbla\",\n \"ribgul\",\n \"rinkin1\",\n \"roahaw\",\n \"robgro\",\n \"rocpig\",\n \"rotbec\",\n \"royter1\",\n \"rthhum\",\n \"rtlhum\",\n \"ruboro1\",\n \"rubpep1\",\n \"rubrob\",\n \"rubwre1\",\n \"ruckin\",\n \"rucspa1\",\n \"rucwar\",\n \"rucwar1\",\n \"rudpig\",\n \"rudtur\",\n \"rufhum\",\n \"rugdov\",\n \"rumfly1\",\n \"runwre1\",\n \"rutjac1\",\n \"saffin\",\n \"sancra\",\n \"sander\",\n \"savspa\",\n \"saypho\",\n \"scamac1\",\n \"scatan\",\n \"scbwre1\",\n \"scptyr1\",\n \"scrtan1\",\n \"semplo\",\n \"shicow\",\n \"sibtan2\",\n \"sinwre1\",\n \"sltred\",\n \"smbani\",\n \"snogoo\",\n \"sobtyr1\",\n \"socfly1\",\n \"solsan\",\n \"sonspa\",\n \"soulap1\",\n \"sposan\",\n \"spotow\",\n \"spvear1\",\n \"squcuc1\",\n \"stbori\",\n \"stejay\",\n \"sthant1\",\n \"sthwoo1\",\n \"strcuc1\",\n \"strfly1\",\n \"strsal1\",\n \"stvhum2\",\n \"subfly\",\n \"sumtan\",\n \"swaspa\",\n \"swathr\",\n \"tenwar\",\n \"thbeup1\",\n \"thbkin\",\n \"thswar1\",\n \"towsol\",\n \"treswa\",\n \"trogna1\",\n \"trokin\",\n \"tromoc\",\n \"tropar\",\n \"tropew1\",\n \"tuftit\",\n \"tunswa\",\n \"veery\",\n \"verdin\",\n \"vigswa\",\n \"warvir\",\n \"wbwwre1\",\n \"webwoo1\",\n \"wegspa1\",\n \"wesant1\",\n \"wesblu\",\n \"weskin\",\n \"wesmea\",\n \"westan\",\n \"wewpew\",\n \"whbman1\",\n \"whbnut\",\n \"whcpar\",\n \"whcsee1\",\n \"whcspa\",\n \"whevir\",\n \"whfpar1\",\n \"whimbr\",\n \"whiwre1\",\n \"whtdov\",\n \"whtspa\",\n \"whwbec1\",\n \"whwdov\",\n \"wilfly\",\n \"willet1\",\n \"wilsni1\",\n \"wiltur\",\n \"wlswar\",\n \"wooduc\",\n \"woothr\",\n \"wrenti\",\n \"y00475\",\n \"yebcha\",\n \"yebela1\",\n \"yebfly\",\n \"yebori1\",\n \"yebsap\",\n \"yebsee1\",\n \"yefgra1\",\n \"yegvir\",\n \"yehbla\",\n \"yehcar1\",\n \"yelgro\",\n \"yelwar\",\n \"yeofly1\",\n \"yerwar\",\n \"yeteup1\",\n \"yetvir\",\n]\nbird2id = {b: i for i, b in enumerate(target_columns)}\nid2bird = {i: b for i, b in enumerate(target_columns)}\n\n\nclass Compose:\n def __init__(self, transforms: list):\n self.transforms = transforms\n\n def __call__(self, y: np.ndarray, sr):\n for trns in self.transforms:\n y = trns(y, sr)\n return y\n\n\nclass AudioTransform:\n def __init__(self, always_apply=False, p=0.5):\n self.always_apply = always_apply\n self.p = p\n\n def __call__(self, y: np.ndarray, sr):\n if self.always_apply:\n return self.apply(y, sr=sr)\n else:\n if np.random.rand() < self.p:\n return self.apply(y, sr=sr)\n else:\n return y\n\n def apply(self, y: np.ndarray, **params):\n raise NotImplementedError\n\n\nclass OneOf(Compose):\n # https://github.com/albumentations-team/albumentations/blob/master/albumentations/core/composition.py\n def __init__(self, transforms, p=0.5):\n super().__init__(transforms)\n self.p = p\n transforms_ps = [t.p for t in transforms]\n s = sum(transforms_ps)\n self.transforms_ps = [t / s for t in transforms_ps]\n\n def __call__(self, y: np.ndarray, sr):\n data = y\n if self.transforms_ps and (random.random() < self.p):\n random_state = np.random.RandomState(random.randint(0, 2 ** 32 - 1))\n t = random_state.choice(self.transforms, p=self.transforms_ps)\n data = t(y, sr)\n return data\n\n\nclass Normalize(AudioTransform):\n def __init__(self, always_apply=False, p=1):\n super().__init__(always_apply, p)\n\n def apply(self, y: np.ndarray, **params):\n max_vol = np.abs(y).max()\n y_vol = y * 1 / max_vol\n return np.asfortranarray(y_vol)\n\n\nclass NewNormalize(AudioTransform):\n def __init__(self, always_apply=False, p=1):\n super().__init__(always_apply, p)\n\n def apply(self, y: np.ndarray, **params):\n y_mm = y - y.mean()\n return y_mm / y_mm.abs().max()\n\n\nclass NoiseInjection(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, max_noise_level=0.5):\n super().__init__(always_apply, p)\n\n self.noise_level = (0.0, max_noise_level)\n\n def apply(self, y: np.ndarray, **params):\n noise_level = np.random.uniform(*self.noise_level)\n noise = np.random.randn(len(y))\n augmented = (y + noise * noise_level).astype(y.dtype)\n return augmented\n\n\nclass GaussianNoise(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, min_snr=5, max_snr=20):\n super().__init__(always_apply, p)\n\n self.min_snr = min_snr\n self.max_snr = max_snr\n\n def apply(self, y: np.ndarray, **params):\n snr = np.random.uniform(self.min_snr, self.max_snr)\n a_signal = np.sqrt(y ** 2).max()\n a_noise = a_signal / (10 ** (snr / 20))\n\n white_noise = np.random.randn(len(y))\n a_white = np.sqrt(white_noise ** 2).max()\n augmented = (y + white_noise * 1 / a_white * a_noise).astype(y.dtype)\n return augmented\n\n\nclass PinkNoise(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, min_snr=5, max_snr=20):\n super().__init__(always_apply, p)\n\n self.min_snr = min_snr\n self.max_snr = max_snr\n\n def apply(self, y: np.ndarray, **params):\n snr = np.random.uniform(self.min_snr, self.max_snr)\n a_signal = np.sqrt(y ** 2).max()\n a_noise = a_signal / (10 ** (snr / 20))\n\n pink_noise = cn.powerlaw_psd_gaussian(1, len(y))\n a_pink = np.sqrt(pink_noise ** 2).max()\n augmented = (y + pink_noise * 1 / a_pink * a_noise).astype(y.dtype)\n return augmented\n\n\nclass PitchShift(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, max_range=5):\n super().__init__(always_apply, p)\n self.max_range = max_range\n\n def apply(self, y: np.ndarray, sr, **params):\n n_steps = np.random.randint(-self.max_range, self.max_range)\n augmented = librosa.effects.pitch_shift(y, sr, n_steps)\n return augmented\n\n\nclass TimeStretch(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, max_rate=1):\n super().__init__(always_apply, p)\n self.max_rate = max_rate\n\n def apply(self, y: np.ndarray, **params):\n rate = np.random.uniform(0, self.max_rate)\n augmented = librosa.effects.time_stretch(y, rate)\n return augmented\n\n\ndef _db2float(db: float, amplitude=True):\n if amplitude:\n return 10 ** (db / 20)\n else:\n return 10 ** (db / 10)\n\n\ndef volume_down(y: np.ndarray, db: float):\n \"\"\"\n Low level API for decreasing the volume\n Parameters\n ----------\n y: numpy.ndarray\n stereo / monaural input audio\n db: float\n how much decibel to decrease\n Returns\n -------\n applied: numpy.ndarray\n audio with decreased volume\n \"\"\"\n applied = y * _db2float(-db)\n return applied\n\n\ndef volume_up(y: np.ndarray, db: float):\n \"\"\"\n Low level API for increasing the volume\n Parameters\n ----------\n y: numpy.ndarray\n stereo / monaural input audio\n db: float\n how much decibel to increase\n Returns\n -------\n applied: numpy.ndarray\n audio with increased volume\n \"\"\"\n applied = y * _db2float(db)\n return applied\n\n\nclass RandomVolume(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, limit=10):\n super().__init__(always_apply, p)\n self.limit = limit\n\n def apply(self, y: np.ndarray, **params):\n db = np.random.uniform(-self.limit, self.limit)\n if db >= 0:\n return volume_up(y, db)\n else:\n return volume_down(y, db)\n\n\nclass CosineVolume(AudioTransform):\n def __init__(self, always_apply=False, p=0.5, limit=10):\n super().__init__(always_apply, p)\n self.limit = limit\n\n def apply(self, y: np.ndarray, **params):\n db = np.random.uniform(-self.limit, self.limit)\n cosine = np.cos(np.arange(len(y)) / len(y) * np.pi * 2)\n dbs = _db2float(cosine * db)\n return y * dbs\n\n\ndef drop_stripes(image: np.ndarray, dim: int, drop_width: int, stripes_num: int):\n total_width = image.shape[dim]\n lowest_value = image.min()\n for _ in range(stripes_num):\n distance = np.random.randint(low=0, high=drop_width, size=(1,))[0]\n begin = np.random.randint(low=0, high=total_width - distance, size=(1,))[0]\n\n if dim == 0:\n image[begin : begin + distance] = lowest_value\n elif dim == 1:\n image[:, begin + distance] = lowest_value\n elif dim == 2:\n image[:, :, begin + distance] = lowest_value\n return image\n\n\ndef load_wave_and_crop(filename, period, start=None):\n waveform_orig, sample_rate = sf.read(filename)\n wave_len = len(waveform_orig)\n waveform = np.concatenate([waveform_orig, waveform_orig, waveform_orig])\n while len(waveform) < (period * sample_rate * 3):\n waveform = np.concatenate([waveform, waveform_orig])\n if start is not None:\n start = start - (period - 5) / 2 * sample_rate\n while start < 0:\n start += wave_len\n start = int(start)\n # start = int(start * sample_rate) + wave_len\n else:\n start = np.random.randint(wave_len)\n waveform_seg = waveform[start : start + int(period * sample_rate)]\n return waveform_orig, waveform_seg, sample_rate, start\n\n\nclass BirdClef2021Dataset(Dataset):\n def __init__(\n self,\n data_path: str = \"../../input/birdclef-2021/train_short_audio\",\n pseudo_label_path: list = [\n \"../../input/birdclef-2021/pseudo_label_stage1_repvgg_b0\",\n \"../../input/birdclef-2021/pseudo_label_stage1_resnet34\",\n ],\n period: float = 15.0,\n secondary_coef: float = 1.0,\n smooth_label: float = 0.0,\n df: pd.DataFrame = train_df,\n train: bool = True,\n ):\n\n self.df = df\n self.data_path = data_path\n self.pseudo_label_path = pseudo_label_path\n self.duration = df[\"duration\"]\n self.filenames = df[\"filename\"]\n self.primary_label = df[\"primary_label\"]\n\n self.secondary_labels = (\n df[\"secondary_labels\"]\n .map(\n lambda s: s.replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\",\", \"\")\n .replace(\"'\", \"\")\n .split(\" \")\n )\n .values\n )\n self.secondary_coef = secondary_coef\n self.type = df[\"type\"]\n self.period = period\n self.smooth_label = smooth_label + 1e-6\n if train:\n self.wave_transforms = Compose(\n [\n OneOf(\n [\n NoiseInjection(p=1, max_noise_level=0.04),\n GaussianNoise(p=1, min_snr=5, max_snr=20),\n PinkNoise(p=1, min_snr=5, max_snr=20),\n ],\n p=0.2,\n ),\n RandomVolume(p=0.2, limit=4),\n Normalize(p=1),\n ]\n )\n else:\n self.wave_transforms = Compose(\n [\n Normalize(p=1),\n ]\n )\n self.train = train\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n filename = os.path.join(\n self.data_path, self.primary_label[idx], self.filenames[idx]\n )\n if self.train:\n waveform, waveform_seg, sample_rate, start = load_wave_and_crop(\n filename, self.period\n )\n else:\n waveform, waveform_seg, sample_rate, start = load_wave_and_crop(\n filename, self.period, 0\n )\n\n waveform_seg = self.wave_transforms(waveform_seg, sr=sample_rate)\n\n waveform_seg = torch.Tensor(np.nan_to_num(waveform_seg))\n\n target = np.zeros(397, dtype=np.float32)\n primary_label = bird2id[self.primary_label[idx]]\n target[primary_label] = 1.0\n\n for s in self.secondary_labels[idx]:\n if s == \"rocpig1\":\n s = \"rocpig\"\n if s != \"\" and s in bird2id.keys():\n target[bird2id[s]] = self.secondary_coef\n\n pl_filename_att = os.path.join(\n self.pseudo_label_path[0],\n self.primary_label[idx],\n self.filenames[idx].split(\".\")[0] + \"_att.npy\",\n )\n pseudo_label1_att = np.load(pl_filename_att)\n frame_length1 = int(\n pseudo_label1_att.shape[1] / len(waveform) * len(waveform_seg)\n )\n start1 = int(pseudo_label1_att.shape[1] / len(waveform) * start)\n pseudo_label1_att = (\n np.concatenate(\n [\n pseudo_label1_att\n for _ in range(int(len(waveform_seg) / len(waveform) + 2))\n ],\n -1,\n )\n if len(waveform) > len(waveform_seg)\n else np.concatenate([pseudo_label1_att, pseudo_label1_att], -1)\n )\n pseudo_label1_att = pseudo_label1_att[:, start1 : start1 + frame_length1]\n pl_filename_frame = os.path.join(\n self.pseudo_label_path[0],\n self.primary_label[idx],\n self.filenames[idx].split(\".\")[0] + \"_framewise.npy\",\n )\n pseudo_label1_frame = np.load(pl_filename_frame)\n pseudo_label1_frame = (\n np.concatenate(\n [\n pseudo_label1_frame\n for _ in range(int(len(waveform_seg) / len(waveform) + 2))\n ],\n -1,\n )\n if len(waveform) > len(waveform_seg)\n else np.concatenate([pseudo_label1_frame, pseudo_label1_frame], -1)\n )\n pseudo_label1_frame = pseudo_label1_frame[:, start1 : start1 + frame_length1]\n pseudo_label1 = (\n pseudo_label1_frame * sp.special.softmax(pseudo_label1_att, -1)\n ).sum(-1)\n\n pl_filename_att = os.path.join(\n self.pseudo_label_path[1],\n self.primary_label[idx],\n self.filenames[idx].split(\".\")[0] + \"_att.npy\",\n )\n pseudo_label2_att = np.load(pl_filename_att)\n frame_length2 = int(\n pseudo_label2_att.shape[1] / len(waveform) * len(waveform_seg)\n )\n start2 = int(pseudo_label2_att.shape[1] / len(waveform) * start)\n pseudo_label2_att = (\n np.concatenate(\n [\n pseudo_label2_att\n for _ in range(int(len(waveform_seg) / len(waveform) + 2))\n ],\n -1,\n )\n if len(waveform) > len(waveform_seg)\n else np.concatenate([pseudo_label2_att, pseudo_label2_att], -1)\n )\n pseudo_label2_att = pseudo_label2_att[:, start2 : start2 + frame_length2]\n\n pl_filename_frame = os.path.join(\n self.pseudo_label_path[1],\n self.primary_label[idx],\n self.filenames[idx].split(\".\")[0] + \"_framewise.npy\",\n )\n pseudo_label2_frame = np.load(pl_filename_frame)\n pseudo_label2_frame = (\n np.concatenate(\n [\n pseudo_label2_frame\n for _ in range(int(len(waveform_seg) / len(waveform) + 2))\n ],\n -1,\n )\n if len(waveform) > len(waveform_seg)\n else np.concatenate([pseudo_label2_frame, pseudo_label2_frame], -1)\n )\n pseudo_label2_frame = pseudo_label2_frame[:, start2 : start2 + frame_length2]\n pseudo_label2 = (\n pseudo_label2_frame * sp.special.softmax(pseudo_label2_att, -1)\n ).sum(-1)\n\n pseudo_label = (pseudo_label1 + pseudo_label2) / 2\n if (pseudo_label > 0.05).max() == 1:\n pseudo_label = (pseudo_label > 0.2).astype(np.float)\n if self.secondary_labels[idx][0] != \"\":\n target = np.maximum(target, pseudo_label)\n else:\n if torch.rand(1)[0] < 0.1:\n target = target * 0.1\n\n target = torch.Tensor(target)\n return {\n \"wave\": waveform_seg,\n \"target\": (target > 0.01).float(),\n \"loss_target\": target * (1 - self.smooth_label)\n + self.smooth_label / target.size(-1),\n }\n\n\nclass BirdClef2021DataModule(LightningDataModule):\n def __init__(\n self,\n num_workers: int = 0,\n batch_size: int = 8,\n period: float = 15.0,\n secondary_coef: float = 1.0,\n train_df: pd.DataFrame = train_df,\n valid_df: pd.DataFrame = train_df,\n ):\n super().__init__()\n\n self._num_workers = num_workers\n self._batch_size = batch_size\n self.period = period\n self.secondary_coef = secondary_coef\n self.train_df = train_df\n self.valid_df = valid_df\n\n def create_dataset(self, train=True):\n return (\n BirdClef2021Dataset(\n period=self.period,\n secondary_coef=self.secondary_coef,\n train=True,\n df=self.train_df,\n )\n if train\n else BirdClef2021Dataset(\n period=self.period,\n secondary_coef=self.secondary_coef,\n train=False,\n df=self.valid_df,\n )\n )\n\n def __dataloader(self, train: bool):\n \"\"\"Train/validation loaders.\"\"\"\n dataset = self.create_dataset(train)\n return DataLoader(\n dataset=dataset,\n batch_size=self._batch_size,\n num_workers=self._num_workers,\n shuffle=train,\n drop_last=train,\n worker_init_fn=lambda x: np.random.seed(np.random.get_state()[1][0] + x),\n )\n\n def train_dataloader(self):\n return self.__dataloader(train=True)\n\n def val_dataloader(self):\n return self.__dataloader(train=False)\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = parent_parser.add_argument_group(\"BirdClef2021DataModule\")\n parser.add_argument(\n \"--num_workers\",\n default=0,\n type=int,\n metavar=\"W\",\n help=\"number of CPU workers\",\n dest=\"num_workers\",\n )\n parser.add_argument(\n \"--batch_size\",\n default=8,\n type=int,\n metavar=\"BS\",\n help=\"number of sample in a batch\",\n dest=\"batch_size\",\n )\n parser.add_argument(\n \"--period\",\n default=15.0,\n type=float,\n metavar=\"P\",\n help=\"period for training\",\n dest=\"period\",\n )\n parser.add_argument(\n \"--secondary_coef\",\n default=1.0,\n type=float,\n metavar=\"SC\",\n help=\"secondary coef\",\n dest=\"secondary_coef\",\n )\n return parent_parser\n\n\nclass AdaptiveConcatPool2d(nn.Module):\n def __init__(self, sz=None):\n super().__init__()\n sz = sz or (1, 1)\n self.ap = nn.AdaptiveAvgPool2d(sz)\n self.mp = nn.AdaptiveMaxPool2d(sz)\n\n def forward(self, x):\n return torch.cat([self.mp(x), self.ap(x)], 1)\n\n\nclass Flatten(nn.Module):\n \"\"\"\n Simple class for flattening layer.\n \"\"\"\n\n def forward(self, x):\n return x.view(x.size()[0], -1)\n\n\ndef gem(x, p=3, eps=1e-6):\n return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1.0 / p)\n\n\nclass GeM(nn.Module):\n def __init__(self, p=3, eps=1e-6):\n super(GeM, self).__init__()\n self.p = torch.nn.Parameter(torch.ones(1) * p)\n self.eps = eps\n\n def forward(self, x):\n return gem(x, p=self.p, eps=self.eps)\n\n\ndef gem_freq(x, p=3, eps=1e-6):\n return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), 1)).pow(1.0 / p)\n\n\nclass GeMFreq(nn.Module):\n def __init__(self, p=3, eps=1e-6):\n super().__init__()\n self.p = torch.nn.Parameter(torch.ones(1) * p)\n self.eps = eps\n\n def forward(self, x):\n return gem_freq(x, p=self.p, eps=self.eps)\n\n\nclass NormalizeMelSpec(nn.Module):\n def __init__(self, eps=1e-6):\n super().__init__()\n self.eps = eps\n\n def forward(self, X):\n mean = X.mean((1, 2), keepdim=True)\n std = X.std((1, 2), keepdim=True)\n Xstd = (X - mean) / (std + self.eps)\n norm_min, norm_max = Xstd.min(-1)[0].min(-1)[0], Xstd.max(-1)[0].max(-1)[0]\n fix_ind = (norm_max - norm_min) > self.eps * torch.ones_like(\n (norm_max - norm_min)\n )\n V = torch.zeros_like(Xstd)\n if fix_ind.sum():\n V_fix = Xstd[fix_ind]\n norm_max_fix = norm_max[fix_ind, None, None]\n norm_min_fix = norm_min[fix_ind, None, None]\n V_fix = torch.max(\n torch.min(V_fix, norm_max_fix),\n norm_min_fix,\n )\n # print(V_fix.shape, norm_min_fix.shape, norm_max_fix.shape)\n V_fix = (V_fix - norm_min_fix) / (norm_max_fix - norm_min_fix)\n V[fix_ind] = V_fix\n return V\n\n\nclass AttHead(nn.Module):\n def __init__(\n self, in_chans, p=0.5, num_class=397, train_period=15.0, infer_period=5.0\n ):\n super().__init__()\n self.train_period = train_period\n self.infer_period = infer_period\n self.pooling = GeMFreq()\n\n self.dense_layers = nn.Sequential(\n nn.Dropout(p / 2),\n nn.Linear(in_chans, 512),\n nn.ReLU(),\n nn.Dropout(p),\n )\n self.attention = nn.Conv1d(\n in_channels=512,\n out_channels=num_class,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=True,\n )\n self.fix_scale = nn.Conv1d(\n in_channels=512,\n out_channels=num_class,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=True,\n )\n\n def forward(self, feat):\n feat = self.pooling(feat).squeeze(-2).permute(0, 2, 1) # (bs, time, ch)\n\n feat = self.dense_layers(feat).permute(0, 2, 1) # (bs, 512, time)\n time_att = torch.tanh(self.attention(feat))\n assert self.train_period >= self.infer_period\n if self.training or self.train_period == self.infer_period:\n\n clipwise_pred = torch.sum(\n torch.sigmoid(self.fix_scale(feat)) * torch.softmax(time_att, dim=-1),\n dim=-1,\n ) # sum((bs, 24, time), -1) -> (bs, 24)\n logits = torch.sum(\n self.fix_scale(feat) * torch.softmax(time_att, dim=-1),\n dim=-1,\n )\n else:\n feat_time = feat.size(-1)\n start = (\n feat_time / 2 - feat_time * (self.infer_period / self.train_period) / 2\n )\n end = start + feat_time * (self.infer_period / self.train_period)\n start = int(start)\n end = int(end)\n feat = feat[:, :, start:end]\n att = torch.softmax(time_att[:, :, start:end], dim=-1)\n clipwise_pred = torch.sum(\n torch.sigmoid(self.fix_scale(feat)) * att,\n dim=-1,\n )\n logits = torch.sum(\n self.fix_scale(feat) * att,\n dim=-1,\n )\n time_att = time_att[:, :, start:end]\n return (\n logits,\n clipwise_pred,\n self.fix_scale(feat).permute(0, 2, 1),\n time_att.permute(0, 2, 1),\n )\n\n\nclass AttModel(nn.Module):\n def __init__(\n self,\n backbone=\"resnet34\",\n p=0.5,\n n_mels=224,\n num_class=397,\n train_period=15.0,\n infer_period=5.0,\n in_chans=1,\n ):\n super().__init__()\n self.n_mels = n_mels\n self.logmelspec_extractor = nn.Sequential(\n MelSpectrogram(\n 32000,\n n_mels=n_mels,\n f_min=20,\n n_fft=2048,\n hop_length=512,\n normalized=True,\n ),\n AmplitudeToDB(top_db=80.0),\n NormalizeMelSpec(),\n )\n self.backbone = timm.create_model(\n backbone, features_only=True, pretrained=True, in_chans=in_chans\n )\n encoder_channels = self.backbone.feature_info.channels()\n dense_input = encoder_channels[-1]\n self.head = AttHead(\n dense_input,\n p=p,\n num_class=num_class,\n train_period=train_period,\n infer_period=infer_period,\n )\n\n def forward(self, input):\n feats = self.backbone(input)\n return self.head(feats[-1])\n\n\ndef row_wise_f1_score_micro(y_true, y_pred, threshold=0.5):\n def event_thresholder(x, threshold):\n return x > threshold\n\n return f1_score(\n y_true=y_true, y_pred=event_thresholder(y_pred, threshold), average=\"samples\"\n )\n\n\nclass ThresholdOptimizer:\n def __init__(self, loss_fn):\n self.coef_ = {}\n self.loss_fn = loss_fn\n self.coef_[\"x\"] = [0.5]\n\n def _loss(self, coef, X, y):\n ll = self.loss_fn(y, X, coef)\n return -ll\n\n def fit(self, X, y):\n loss_partial = partial(self._loss, X=X, y=y)\n initial_coef = [0.5]\n self.coef_ = sp.optimize.minimize(\n loss_partial, initial_coef, method=\"nelder-mead\"\n )\n\n def coefficients(self):\n return self.coef_[\"x\"]\n\n def calc_score(self, X, y, coef):\n return self.loss_fn(y, X, coef)\n\n\nclass Mixup(object):\n def __init__(self, p=0.5, alpha=5):\n self.p = p\n self.alpha = alpha\n self.lam = 1.0\n self.do_mixup = False\n\n def init_lambda(self):\n if np.random.rand() < self.p:\n self.do_mixup = True\n else:\n self.do_mixup = False\n if self.do_mixup and self.alpha > 0.0:\n self.lam = np.random.beta(self.alpha, self.alpha)\n else:\n self.lam = 1.0\n\n\nclass BirdClef2021Model(pl.LightningModule):\n def __init__(\n self,\n backbone: str = \"resnet50\",\n n_mels: int = 224,\n batch_size: int = 32,\n lr: float = 1e-3,\n backbone_lr: float = None,\n num_workers: int = 6,\n period=15.0,\n infer_period=15.0,\n mixup_p=0.0,\n mixup_alpha=0.5,\n **kwargs,\n ) -> None:\n super().__init__()\n self.backbone = backbone\n self.n_mels = n_mels\n # self.milestones = milestones\n self.batch_size = batch_size\n self.lr = lr\n self.backbone_lr = backbone_lr if backbone_lr is not None else lr\n self.num_workers = num_workers\n self.period = period\n self.infer_period = infer_period\n self.thresholder = ThresholdOptimizer(row_wise_f1_score_micro)\n self.mixupper = Mixup(p=mixup_p, alpha=mixup_alpha)\n\n self.decay = 0.99\n\n self.__build_model()\n self.save_hyperparameters()\n\n def __build_model(self):\n \"\"\"Define model layers & loss.\"\"\"\n\n self.model = AttModel(\n self.backbone,\n p=0.5,\n n_mels=self.n_mels,\n num_class=397,\n train_period=self.period,\n infer_period=self.infer_period,\n )\n self.criterions = {\n \"classification_clip\": nn.BCEWithLogitsLoss(),\n \"classification_frame\": nn.BCEWithLogitsLoss(),\n }\n\n def forward(self, image):\n \"\"\"Forward pass. Returns logits.\"\"\"\n outputs = {}\n (\n outputs[\"logits\"],\n outputs[\"output_clip\"],\n outputs[\"output_frame\"],\n outputs[\"output_attention\"],\n ) = self.model(image)\n return outputs\n\n def loss(self, outputs, batch):\n losses = {}\n losses[\"loss_clip\"] = self.criterions[\"classification_clip\"](\n torch.logit(outputs[\"output_clip\"]), batch[\"loss_target\"]\n )\n losses[\"loss_frame\"] = self.criterions[\"classification_frame\"](\n outputs[\"output_frame\"].max(1)[0], batch[\"loss_target\"]\n )\n losses[\"loss\"] = losses[\"loss_clip\"] + losses[\"loss_frame\"] * 0.5\n return losses\n\n def training_step(self, batch, batch_idx):\n self.mixupper.init_lambda()\n step_output = {}\n image = self.model.logmelspec_extractor(batch[\"wave\"])[:, None]\n image = self.mixupper.lam * image + (1 - self.mixupper.lam) * image.flip(0)\n outputs = self.forward(image)\n batch[\"loss_target\"] = self.mixupper.lam * batch[\"loss_target\"] + (\n 1 - self.mixupper.lam\n ) * batch[\"loss_target\"].flip(0)\n batch[\"target\"] = self.mixupper.lam * batch[\"target\"] + (\n 1 - self.mixupper.lam\n ) * batch[\"target\"].flip(0)\n\n train_loss = self.loss(outputs, batch)\n\n step_output.update(train_loss)\n step_output.update({\"output_clip\": outputs[\"output_clip\"]})\n step_output[\"target\"] = batch[\"target\"]\n self.log_dict(\n dict(\n train_loss=train_loss[\"loss\"],\n train_loss_frame=train_loss[\"loss_frame\"],\n train_loss_clip=train_loss[\"loss_clip\"],\n )\n )\n return step_output\n\n def training_epoch_end(self, training_step_outputs):\n y_true = []\n y_pred = []\n for tso in training_step_outputs:\n y_true.append(tso[\"target\"])\n y_pred.append(tso[\"output_clip\"])\n y_true = torch.cat(y_true).cpu().numpy().astype(\"int\")\n y_pred = torch.cat(y_pred).cpu().detach().numpy()\n self.thresholder.fit(y_pred, y_true)\n coef = self.thresholder.coefficients()\n f1_score = self.thresholder.calc_score(y_pred, y_true, coef)\n f1_score_05 = self.thresholder.calc_score(y_pred, y_true, [0.5])\n f1_score_03 = self.thresholder.calc_score(y_pred, y_true, [0.3])\n self.log_dict(\n dict(\n train_coef=coef,\n train_f1_score=f1_score,\n train_f1_score_05=f1_score_05,\n train_f1_score_03=f1_score_03,\n )\n )\n\n def validation_step(self, batch, batch_idx):\n step_output = {}\n image = self.model.logmelspec_extractor(batch[\"wave\"])[:, None]\n outputs = self.forward(image)\n valid_loss = self.loss(outputs, batch)\n step_output.update({\"output_clip\": outputs[\"output_clip\"]})\n step_output[\"target\"] = batch[\"target\"]\n self.log_dict(\n dict(\n val_loss=valid_loss[\"loss\"],\n val_loss_frame=valid_loss[\"loss_frame\"],\n val_loss_clip=valid_loss[\"loss_clip\"],\n )\n )\n return step_output\n\n def validation_epoch_end(self, validation_step_outputs):\n y_pred = []\n y_true = []\n for vso in validation_step_outputs:\n y_true.append(vso[\"target\"])\n y_pred.append(vso[\"output_clip\"])\n y_true = torch.cat(y_true).cpu().numpy().astype(\"int\")\n y_pred = torch.cat(y_pred).cpu().detach().numpy()\n self.thresholder.fit(y_pred, y_true)\n coef = self.thresholder.coefficients()\n f1_score = self.thresholder.calc_score(y_pred, y_true, coef)\n f1_score_05 = self.thresholder.calc_score(y_pred, y_true, [0.5])\n f1_score_03 = self.thresholder.calc_score(y_pred, y_true, [0.3])\n self.log_dict(\n dict(\n val_coef=coef,\n val_f1_score=f1_score,\n val_f1_score_05=f1_score_05,\n val_f1_score_03=f1_score_03,\n )\n )\n\n def optimizer_step(self, *args, **kwargs):\n super().optimizer_step(*args, **kwargs)\n\n def configure_optimizers(self):\n\n optimizer = optim.Adam(\n [\n {\"params\": self.model.head.parameters(), \"lr\": self.lr},\n {\"params\": self.model.backbone.parameters(), \"lr\": self.backbone_lr},\n ],\n lr=self.lr,\n weight_decay=0.0001,\n )\n scheduler = CosineAnnealingLR(\n optimizer,\n T_max=self.trainer.max_epochs,\n eta_min=1.0e-6,\n )\n return [optimizer], [scheduler]\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = parent_parser.add_argument_group(\"TransferLearningModel\")\n parser.add_argument(\n \"--backbone\",\n default=\"resnet34\",\n type=str,\n metavar=\"BK\",\n help=\"Name (as in ``timm``) of the feature extractor\",\n )\n parser.add_argument(\n \"--n_mels\", default=224, type=int, metavar=\"NM\", help=\"nmels\", dest=\"n_mels\"\n )\n parser.add_argument(\n \"--epochs\", default=10, type=int, metavar=\"N\", help=\"total number of epochs\"\n )\n parser.add_argument(\n \"--batch_size\",\n default=8,\n type=int,\n metavar=\"B\",\n help=\"batch size\",\n dest=\"batch_size\",\n )\n parser.add_argument(\"--gpus\", type=int, default=0, help=\"number of gpus to use\")\n parser.add_argument(\n \"--lr\",\n default=1e-3,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate\",\n dest=\"lr\",\n )\n parser.add_argument(\n \"--backbone_lr\",\n default=None,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate for backbone network\",\n dest=\"backbone_lr\",\n )\n parser.add_argument(\n \"--mixup_p\",\n default=0,\n type=float,\n metavar=\"MP\",\n help=\"mixup proba\",\n dest=\"mixup_p\",\n )\n parser.add_argument(\n \"--mixup_alpha\",\n default=0.8,\n type=float,\n metavar=\"ML\",\n help=\"mixup alpha\",\n dest=\"mixup_alpha\",\n )\n parser.add_argument(\n \"--period\",\n default=15.0,\n type=float,\n metavar=\"P\",\n help=\"period for training\",\n dest=\"period\",\n )\n parser.add_argument(\n \"--infer_period\",\n default=15.0,\n type=float,\n metavar=\"P\",\n help=\"period for inference\",\n dest=\"infer_period\",\n )\n return parent_parser\n\n\ndef get_args() -> argparse.Namespace:\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n \"--seed\",\n default=2021,\n type=int,\n metavar=\"SE\",\n help=\"seed number\",\n dest=\"seed\",\n )\n parent_parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"1 batch run for debug\",\n dest=\"debug\",\n )\n dt_now = datetime.datetime.now()\n parent_parser.add_argument(\n \"--logdir\",\n default=f\"{dt_now.strftime('%Y%m%d-%H-%M-%S')}\",\n )\n parent_parser.add_argument(\n \"--fold\",\n type=int,\n default=0,\n )\n parser = BirdClef2021Model.add_model_specific_args(parent_parser)\n parser = BirdClef2021DataModule.add_argparse_args(parser)\n return parser.parse_args()\n\n\ndef main(args):\n pl.seed_everything(args.seed)\n assert args.fold < 4\n for i in range(4):\n if args.fold != i:\n continue\n train_df_fold = train_df[train_df.fold != i].reset_index(drop=True)\n valid_df_fold = train_df[train_df.fold == i].reset_index(drop=True)\n\n datamodule = BirdClef2021DataModule(\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n period=args.period,\n secondary_coef=args.secondary_coef,\n train_df=train_df_fold,\n valid_df=valid_df_fold,\n )\n model = BirdClef2021Model(**vars(args))\n rootdir = f\"../../logs/stage2/{args.logdir}/fold{i}\"\n print(f\"logdir = {rootdir}\")\n lr_monitor = callbacks.LearningRateMonitor()\n loss_checkpoint = callbacks.ModelCheckpoint(\n filename=\"best_loss\",\n monitor=\"val_loss\",\n save_top_k=1,\n mode=\"min\",\n )\n\n f1_checkpoint = callbacks.ModelCheckpoint(\n filename=\"best_f1\",\n monitor=\"val_f1_score\",\n save_top_k=1,\n mode=\"max\",\n )\n\n trainer = pl.Trainer(\n default_root_dir=rootdir,\n progress_bar_refresh_rate=1,\n sync_batchnorm=True,\n # precision=16,\n gpus=args.gpus,\n max_epochs=args.epochs,\n callbacks=[\n loss_checkpoint,\n f1_checkpoint,\n lr_monitor,\n ],\n accelerator=\"ddp\",\n fast_dev_run=args.debug,\n num_sanity_val_steps=0,\n )\n\n trainer.fit(model, datamodule=datamodule)\n\n\nif __name__ == \"__main__\":\n main(get_args())\n","sub_path":"src/stage2/main_v5.py","file_name":"main_v5.py","file_ext":"py","file_size_in_byte":41712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"374529086","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import *\nfrom sklearn.metrics import *\nfrom sklearn.linear_model import *\n\nimport lightgbm as lgb\nimport os\nimport gc\nimport pickle\nimport logging\n\nfrom utils import *\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# create a file handler\nhandler = logging.FileHandler('genTimeDiffs.log')\nhandler.setLevel(logging.INFO)\n\n# create a logging format\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\nlogger.addHandler(handler) \n\nif __name__ == \"__main__\":\n SEED = 786\n OUT_PATH = \"../output\"\n DTYPES = {\n 'ip' : 'uint32',\n 'app' : 'uint16',\n 'device' : 'uint16',\n 'os' : 'uint16',\n 'channel' : 'uint16',\n 'is_attributed' : 'uint8',\n 'click_id' : 'uint32',\n 'hourofday' : 'uint8',\n 'dayofweek' : 'uint8',\n 'ip_device_os' : 'uint32',\n 'ip_device_os_app' : 'uint32',\n 'ip_device_os_app_channel' : 'uint32',\n\n }\n \n logger.info(\"Reading train and test\")\n train = pd.read_csv(\"../input/train_base.csv\", dtype=DTYPES, \n )\n test = pd.read_csv(\"../input/test_base.csv\", dtype=DTYPES)\n test[\"is_attributed\"] = np.nan\n \n logger.info(\"Get time details\")\n train = time_details(train)\n test = time_details(test)\n \n logger.info(\"Break train into tr and val\")\n cond = (train.dayofweek == 3) & (train.hourofday.isin([4,5,9,10,13,14]))\n cond2 = ((train.dayofweek == 3) & (train.hourofday < 4)) | (train.dayofweek < 3)\n \n tr = train.loc[cond2].reset_index(drop=True) ###\n val = train.loc[cond].reset_index(drop=True)\n y_val = val[\"is_attributed\"]\n \n logger.info(\"Shape of train and test is {} and {}\".format(train.shape, test.shape))\n logger.info(\"Shape of tr and val is {} and {}\".format(tr.shape, val.shape))\n \n \n logger.info(\"Generate cumulative count features\")\n feats2 = []\n for col in ['ip', 'ip_device_os', 'ip_device_os_app', 'ip_device_os_app_channel']:\n logger.info(\"Processing feature: {}\".format(col))\n for SHIFT in [1,2]:\n col_name = \"_\".join([col]) + \"_prev_click\"\n logger.info(\"Gnerating feature: {} {} for tr/val set\".format(col_name, SHIFT))\n get_prev_click(tr, val, [col], target='epoch_time', shift=SHIFT,\n tr_filename=os.path.join(OUT_PATH, \"tr_{}_{}.npy\".format(col_name, SHIFT)), \n val_filename=os.path.join(OUT_PATH, \"val_{}_{}.npy\".format(col_name, SHIFT)), \n seed=786, rewrite=False)\n \n logger.info(\"Gnerating feature: {} for train/test set\".format(col_name))\n get_prev_click(train, test, [col], target='epoch_time', shift=SHIFT,\n tr_filename=os.path.join(OUT_PATH, \"train_{}_{}.npy\".format(col_name, SHIFT)), \n val_filename=os.path.join(OUT_PATH, \"test_{}_{}.npy\".format(col_name, SHIFT)), \n seed=786, rewrite=False)\n\n col_name = \"_\".join([col]) + \"_next_click\"\n logger.info(\"Gnerating feature: {} {} for tr/val set\".format(col_name, SHIFT))\n \n get_next_click(tr, val, [col], target='epoch_time', shift=-SHIFT,\n tr_filename=os.path.join(OUT_PATH, \"tr_{}_{}.npy\".format(col_name, SHIFT)), \n val_filename=os.path.join(OUT_PATH, \"val_{}_{}.npy\".format(col_name, SHIFT)), \n seed=786, rewrite=False)\n \n logger.info(\"Gnerating feature: {} {} for train/test set\".format(col_name, SHIFT))\n get_next_click(train, test, [col], target='epoch_time', shift=-SHIFT,\n tr_filename=os.path.join(OUT_PATH, \"train_{}_{}.npy\".format(col_name, SHIFT)), \n val_filename=os.path.join(OUT_PATH, \"test_{}_{}.npy\".format(col_name, SHIFT)), \n seed=786, rewrite=False)\n \n logger.info(\"Successfully Completed\")\n \n \n\n","sub_path":"genTimeDiffs.py","file_name":"genTimeDiffs.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"486720306","text":"\"\"\"\nA collection of dielectric physical property definitions.\n\"\"\"\nimport copy\nimport logging\n\nimport numpy as np\nfrom simtk import openmm\nfrom simtk.openmm import XmlSerializer\n\nfrom propertyestimator import unit\nfrom propertyestimator.datasets.plugins import register_thermoml_property\nfrom propertyestimator.properties import PhysicalProperty, PropertyPhase\nfrom propertyestimator.properties.plugins import register_estimable_property\nfrom propertyestimator.protocols import analysis, reweighting\nfrom propertyestimator.protocols.utils import generate_base_reweighting_protocols, generate_gradient_protocol_group, \\\n generate_base_simulation_protocols\nfrom propertyestimator.storage import StoredSimulationData\nfrom propertyestimator.thermodynamics import ThermodynamicState\nfrom propertyestimator.utils import timeseries\nfrom propertyestimator.utils.exceptions import PropertyEstimatorException\nfrom propertyestimator.utils.quantities import EstimatedQuantity\nfrom propertyestimator.utils.statistics import bootstrap\nfrom propertyestimator.workflow import plugins\nfrom propertyestimator.workflow.decorators import protocol_input, protocol_output, UNDEFINED\nfrom propertyestimator.workflow.schemas import WorkflowSchema\nfrom propertyestimator.workflow.utils import ProtocolPath\n\n\n@plugins.register_calculation_protocol()\nclass ExtractAverageDielectric(analysis.AverageTrajectoryProperty):\n \"\"\"Extracts the average dielectric constant from a simulation trajectory.\n \"\"\"\n\n system_path = protocol_input(\n docstring='The path to the XML system object which defines the forces present in the system.',\n type_hint=str,\n default_value=UNDEFINED\n )\n thermodynamic_state = protocol_input(\n docstring='The thermodynamic state at which the trajectory was generated.',\n type_hint=ThermodynamicState,\n default_value=UNDEFINED\n )\n\n dipole_moments = protocol_output(\n docstring='The raw (possibly correlated) dipole moments which were used in '\n 'the dielectric calculation.',\n type_hint=unit.Quantity\n )\n volumes = protocol_output(\n docstring='The raw (possibly correlated) which were used in the dielectric calculation.',\n type_hint=unit.Quantity\n )\n\n uncorrelated_volumes = protocol_output(\n docstring='The uncorrelated volumes which were used in the dielectric '\n 'calculation.',\n type_hint=unit.Quantity\n )\n\n def _bootstrap_function(self, **sample_kwargs):\n \"\"\"Calculates the static dielectric constant from an\n array of dipoles and volumes.\n\n Notes\n -----\n The static dielectric constant is taken from for Equation 7 of [1]\n\n References\n ----------\n [1] A. Glattli, X. Daura and W. F. van Gunsteren. Derivation of an improved simple point charge\n model for liquid water: SPC/A and SPC/L. J. Chem. Phys. 116(22):9811-9828, 2002\n\n Parameters\n ----------\n sample_kwargs: dict of str and np.ndarray\n A key words dictionary of the bootstrap sample data, where the\n sample data is a numpy array of shape=(num_frames, num_dimensions)\n with dtype=float. The kwargs should include the dipole moment and\n the system volume\n\n Returns\n -------\n float\n The unitless static dielectric constant\n \"\"\"\n\n dipole_moments = sample_kwargs['dipoles']\n volumes = sample_kwargs['volumes']\n\n temperature = self.thermodynamic_state.temperature\n\n dipole_mu = dipole_moments.mean(0)\n shifted_dipoles = dipole_moments - dipole_mu\n\n dipole_variance = (shifted_dipoles * shifted_dipoles).sum(-1).mean(0) * \\\n (unit.elementary_charge * unit.nanometers) ** 2\n\n volume = volumes.mean() * unit.nanometer**3\n\n e0 = 8.854187817E-12 * unit.farad / unit.meter # Taken from QCElemental\n\n dielectric_constant = 1.0 + dipole_variance / (3 *\n unit.boltzmann_constant *\n temperature *\n volume *\n e0)\n\n return dielectric_constant\n\n def _extract_charges(self):\n \"\"\"Extracts all of the charges from a system object.\n\n Returns\n -------\n list of float\n \"\"\"\n from simtk import unit as simtk_unit\n\n charge_list = []\n\n with open(self._system_path, 'r') as file:\n system = XmlSerializer.deserialize(file.read())\n\n for force_index in range(system.getNumForces()):\n\n force = system.getForce(force_index)\n\n if not isinstance(force, openmm.NonbondedForce):\n continue\n\n for atom_index in range(force.getNumParticles()):\n charge = force.getParticleParameters(atom_index)[0]\n charge = charge.value_in_unit(simtk_unit.elementary_charge)\n\n charge_list.append(charge)\n\n return charge_list\n\n def _extract_dipoles_and_volumes(self):\n \"\"\"Extract the systems dipole moments and volumes.\n\n Returns\n -------\n numpy.ndarray\n The dipole moments of the trajectory (shape=(n_frames, 3), dtype=float)\n numpy.ndarray\n The volumes of the trajectory (shape=(n_frames, 1), dtype=float)\n \"\"\"\n import mdtraj\n\n dipole_moments = []\n volumes = []\n charge_list = self._extract_charges()\n\n for chunk in mdtraj.iterload(self.trajectory_path, top=self.input_coordinate_file, chunk=50):\n\n dipole_moments.extend(mdtraj.geometry.dipole_moments(chunk, charge_list))\n volumes.extend(chunk.unitcell_volumes)\n\n dipole_moments = np.array(dipole_moments)\n volumes = np.array(volumes)\n\n return dipole_moments, volumes\n\n def execute(self, directory, available_resources):\n\n logging.info('Extracting dielectrics: ' + self.id)\n\n base_exception = super(ExtractAverageDielectric, self).execute(directory, available_resources)\n\n if isinstance(base_exception, ExtractAverageDielectric):\n return base_exception\n\n # Extract the dipoles\n dipole_moments, volumes = self._extract_dipoles_and_volumes()\n self.dipole_moments = dipole_moments * unit.dimensionless\n\n dipole_moments, self.equilibration_index, self.statistical_inefficiency = \\\n timeseries.decorrelate_time_series(dipole_moments)\n\n uncorrelated_length = len(volumes) - self.equilibration_index\n\n sample_indices = timeseries.get_uncorrelated_indices(uncorrelated_length, self.statistical_inefficiency)\n sample_indices = [index + self.equilibration_index for index in sample_indices]\n\n self.volumes = volumes * unit.nanometer ** 3\n uncorrelated_volumes = volumes[sample_indices]\n\n self.uncorrelated_values = dipole_moments * unit.dimensionless\n self.uncorrelated_volumes = uncorrelated_volumes * unit.nanometer ** 3\n\n value, uncertainty = bootstrap(self._bootstrap_function,\n self.bootstrap_iterations,\n self.bootstrap_sample_size,\n dipoles=dipole_moments,\n volumes=uncorrelated_volumes)\n\n self.value = EstimatedQuantity(value * unit.dimensionless,\n uncertainty * unit.dimensionless, self.id)\n\n logging.info('Extracted dielectrics: ' + self.id)\n\n return self._get_output_dictionary()\n\n\n@plugins.register_calculation_protocol()\nclass ReweightDielectricConstant(reweighting.BaseMBARProtocol):\n \"\"\"Reweights a set of dipole moments (`reference_observables`) and volumes\n (`reference_volumes`) using MBAR, and then combines these to yeild the reweighted\n dielectric constant. Uncertainties in the dielectric constant are determined\n by bootstrapping.\n \"\"\"\n\n reference_dipole_moments = protocol_input(\n docstring='A Quantity wrapped np.ndarray of the dipole moments of each '\n 'of the reference states.',\n type_hint=list,\n default_value=UNDEFINED\n )\n reference_volumes = protocol_input(\n docstring='A Quantity wrapped np.ndarray of the volumes of each of the '\n 'reference states.',\n type_hint=list,\n default_value=UNDEFINED\n )\n\n thermodynamic_state = protocol_input(\n docstring='The thermodynamic state at which the trajectory was generated.',\n type_hint=ThermodynamicState,\n default_value=UNDEFINED\n )\n\n def __init__(self, protocol_id):\n super().__init__(protocol_id)\n self.bootstrap_uncertainties = True\n\n def _bootstrap_function(self, reference_reduced_potentials, target_reduced_potentials,\n **reference_observables):\n\n assert len(reference_observables) == 3\n\n transposed_observables = {}\n\n for key in reference_observables:\n transposed_observables[key] = np.transpose(reference_observables[key])\n\n values, _, _ = self._reweight_observables(np.transpose(reference_reduced_potentials),\n np.transpose(target_reduced_potentials),\n **transposed_observables)\n\n average_squared_dipole = values['dipoles_sqr']\n average_dipole_squared = np.linalg.norm(values['dipoles'])\n\n dipole_variance = (average_squared_dipole - average_dipole_squared) * \\\n (unit.elementary_charge * unit.nanometers) ** 2\n\n volume = values['volumes'] * unit.nanometer ** 3\n\n e0 = 8.854187817E-12 * unit.farad / unit.meter # Taken from QCElemental\n\n dielectric_constant = 1.0 + dipole_variance / (3 *\n unit.boltzmann_constant *\n self.thermodynamic_state.temperature *\n volume *\n e0)\n\n return dielectric_constant\n\n def execute(self, directory, available_resources):\n\n logging.info('Reweighting dielectric: {}'.format(self.id))\n\n if len(self.reference_dipole_moments) == 0:\n return PropertyEstimatorException(directory=directory, message='There were no dipole moments to reweight.')\n\n if len(self.reference_volumes) == 0:\n return PropertyEstimatorException(directory=directory, message='There were no volumes to reweight.')\n\n if (not isinstance(self.reference_dipole_moments[0], unit.Quantity) or\n not isinstance(self.reference_volumes[0], unit.Quantity)):\n\n return PropertyEstimatorException(directory=directory,\n message='The reference observables should be '\n 'a list of unit.Quantity wrapped ndarray\\'s.')\n\n if len(self.reference_dipole_moments) != len(self.reference_volumes):\n return PropertyEstimatorException(directory=directory, message='The number of reference dipoles does '\n 'not match the number of reference volumes.')\n\n for reference_dipoles, reference_volumes in zip(self.reference_dipole_moments, self.reference_volumes):\n\n if len(reference_dipoles) == len(reference_volumes):\n continue\n\n return PropertyEstimatorException(directory=directory, message='The number of reference dipoles does '\n 'not match the number of reference volumes.')\n\n self._reference_observables = self.reference_dipole_moments\n\n dipole_moments = self._prepare_observables_array(self.reference_dipole_moments)\n dipole_moments_sqr = np.array([[np.dot(dipole, dipole) for dipole in np.transpose(dipole_moments)]])\n\n volumes = self._prepare_observables_array(self.reference_volumes)\n\n if self.bootstrap_uncertainties:\n error = self._execute_with_bootstrapping(unit.dimensionless,\n dipoles=dipole_moments,\n dipoles_sqr=dipole_moments_sqr,\n volumes=volumes)\n else:\n\n return PropertyEstimatorException(directory=directory,\n message='Dielectric constant can only be reweighted in conjunction '\n 'with bootstrapped uncertainties.')\n\n if error is not None:\n\n error.directory = directory\n return error\n\n return self._get_output_dictionary()\n\n\n@register_estimable_property()\n@register_thermoml_property(thermoml_string='Relative permittivity at zero frequency',\n supported_phases=PropertyPhase.Liquid)\nclass DielectricConstant(PhysicalProperty):\n \"\"\"A class representation of a dielectric property\"\"\"\n\n @property\n def multi_component_property(self):\n return False\n\n @property\n def required_data_class(self):\n return StoredSimulationData\n\n @staticmethod\n def get_default_workflow_schema(calculation_layer, options=None):\n\n if calculation_layer == 'SimulationLayer':\n return DielectricConstant.get_default_simulation_workflow_schema(options)\n elif calculation_layer == 'ReweightingLayer':\n return DielectricConstant.get_default_reweighting_workflow_schema(options)\n\n return None\n\n @staticmethod\n def get_default_simulation_workflow_schema(options=None):\n \"\"\"Returns the default workflow to use when estimating this property\n from direct simulations.\n\n Parameters\n ----------\n options: WorkflowOptions\n The default options to use when setting up the estimation workflow.\n\n Returns\n -------\n WorkflowSchema\n The schema to follow when estimating this property.\n \"\"\"\n\n # Define the protocol which will extract the average dielectric constant\n # from the results of a simulation.\n extract_dielectric = ExtractAverageDielectric('extract_dielectric')\n extract_dielectric.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')\n\n # Define the protocols which will run the simulation itself.\n protocols, value_source, output_to_store = generate_base_simulation_protocols(extract_dielectric,\n options)\n\n # Make sure the input of the analysis protcol is properly hooked up.\n extract_dielectric.system_path = ProtocolPath('system_path', protocols.assign_parameters.id)\n\n # Dielectric constants typically take longer to converge, so we need to\n # reflect this in the maximum number of convergence iterations.\n protocols.converge_uncertainty.max_iterations = 400\n\n # Set up the gradient calculations. For dielectric constants, we need to use\n # a slightly specialised reweighting protocol which we set up here.\n gradient_mbar_protocol = ReweightDielectricConstant('gradient_mbar')\n gradient_mbar_protocol.reference_dipole_moments = [ProtocolPath('dipole_moments',\n protocols.converge_uncertainty.id,\n extract_dielectric.id)]\n gradient_mbar_protocol.reference_volumes = [ProtocolPath('volumes',\n protocols.converge_uncertainty.id,\n extract_dielectric.id)]\n gradient_mbar_protocol.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')\n\n coordinate_source = ProtocolPath('output_coordinate_file', protocols.equilibration_simulation.id)\n trajectory_source = ProtocolPath('trajectory_file_path', protocols.converge_uncertainty.id,\n protocols.production_simulation.id)\n statistics_source = ProtocolPath('statistics_file_path', protocols.converge_uncertainty.id,\n protocols.production_simulation.id)\n\n gradient_group, gradient_replicator, gradient_source = \\\n generate_gradient_protocol_group(gradient_mbar_protocol,\n [ProtocolPath('force_field_path', 'global')],\n ProtocolPath('force_field_path', 'global'),\n coordinate_source,\n trajectory_source,\n statistics_source)\n\n # Build the workflow schema.\n schema = WorkflowSchema(property_type=DielectricConstant.__name__)\n schema.id = '{}{}'.format(DielectricConstant.__name__, 'Schema')\n\n schema.protocols = {\n protocols.build_coordinates.id: protocols.build_coordinates.schema,\n protocols.assign_parameters.id: protocols.assign_parameters.schema,\n protocols.energy_minimisation.id: protocols.energy_minimisation.schema,\n protocols.equilibration_simulation.id: protocols.equilibration_simulation.schema,\n protocols.converge_uncertainty.id: protocols.converge_uncertainty.schema,\n protocols.extract_uncorrelated_trajectory.id: protocols.extract_uncorrelated_trajectory.schema,\n protocols.extract_uncorrelated_statistics.id: protocols.extract_uncorrelated_statistics.schema,\n gradient_group.id: gradient_group.schema\n }\n\n schema.replicators = [gradient_replicator]\n\n schema.outputs_to_store = {'full_system': output_to_store}\n\n schema.gradients_sources = [gradient_source]\n schema.final_value_source = value_source\n\n return schema\n\n @staticmethod\n def get_default_reweighting_workflow_schema(options=None):\n \"\"\"Returns the default workflow to use when estimating this property\n by reweighting existing data.\n\n Parameters\n ----------\n options: WorkflowOptions\n The default options to use when setting up the estimation workflow.\n\n Returns\n -------\n WorkflowSchema\n The schema to follow when estimating this property.\n \"\"\"\n\n data_replicator_id = 'data_replicator'\n\n # Set up a protocol to extract the dielectric constant from the stored data.\n extract_dielectric = ExtractAverageDielectric(f'calc_dielectric_$({data_replicator_id})')\n\n # For the dielectric constant, we employ a slightly more advanced reweighting\n # protocol set up for calculating fluctuation properties.\n reweight_dielectric = ReweightDielectricConstant('reweight_dielectric')\n reweight_dielectric.reference_dipole_moments = ProtocolPath('uncorrelated_values', extract_dielectric.id)\n reweight_dielectric.reference_volumes = ProtocolPath('uncorrelated_volumes', extract_dielectric.id)\n reweight_dielectric.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')\n reweight_dielectric.bootstrap_uncertainties = True\n reweight_dielectric.bootstrap_iterations = 200\n\n # Make a copy of the mbar reweighting protocol to use for evaluating gradients\n # by reweighting.\n reweight_dielectric_template = copy.deepcopy(reweight_dielectric)\n\n reweighting_protocols, data_replicator = generate_base_reweighting_protocols(extract_dielectric,\n reweight_dielectric,\n options,\n data_replicator_id)\n\n # Make sure input is taken from the correct protocol outputs.\n extract_dielectric.system_path = ProtocolPath('system_path', reweighting_protocols.build_reference_system.id)\n extract_dielectric.thermodynamic_state = ProtocolPath('thermodynamic_state',\n reweighting_protocols.unpack_stored_data.id)\n\n # Set up the gradient calculations\n coordinate_path = ProtocolPath('output_coordinate_path', reweighting_protocols.concatenate_trajectories.id)\n trajectory_path = ProtocolPath('output_trajectory_path', reweighting_protocols.concatenate_trajectories.id)\n\n gradient_group, gradient_replicator, gradient_source = \\\n generate_gradient_protocol_group(reweight_dielectric_template,\n ProtocolPath('force_field_path',\n reweighting_protocols.unpack_stored_data.id),\n ProtocolPath('force_field_path', 'global'),\n coordinate_path,\n trajectory_path,\n replicator_id='grad',\n use_subset_of_force_field=False,\n effective_sample_indices=ProtocolPath('effective_sample_indices',\n reweight_dielectric.id))\n\n schema = WorkflowSchema(property_type=DielectricConstant.__name__)\n schema.id = '{}{}'.format(DielectricConstant.__name__, 'Schema')\n\n schema.protocols = {protocol.id: protocol.schema for protocol in reweighting_protocols}\n schema.protocols[gradient_group.id] = gradient_group.schema\n\n schema.replicators = [data_replicator, gradient_replicator]\n\n schema.gradients_sources = [gradient_source]\n schema.final_value_source = ProtocolPath('value', reweighting_protocols.mbar_protocol.id)\n\n return schema\n","sub_path":"propertyestimator/properties/dielectric.py","file_name":"dielectric.py","file_ext":"py","file_size_in_byte":22469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"86873902","text":"from __future__ import absolute_import\nfrom abc import abstractmethod\nfrom copy import copy\nfrom makiflow.base import MakiLayer, MakiTensor\n\n\nclass SimpleForwardLayer(MakiLayer):\n def __call__(self, x):\n data = x.get_data_tensor()\n data = self._forward(data)\n\n parent_tensor_names = [x.get_name()]\n previous_tensors = copy(x.get_previous_tensors())\n previous_tensors.update(x.get_self_pair())\n maki_tensor = MakiTensor(\n data_tensor=data,\n parent_layer=self,\n parent_tensor_names=parent_tensor_names,\n previous_tensors=previous_tensors,\n )\n return maki_tensor\n\n @abstractmethod\n def _forward(self, X):\n pass","sub_path":"makiflow/layers/sf_layer.py","file_name":"sf_layer.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"437799221","text":"mail = dict()\nfname = input(\"Enter the file name:\\n\")\ntry:\n handle = open(fname)\n for line in handle:\n line = line.split()\n words = line\n if len(words) < 2 or words[0] != 'From':\n continue\n else:\n atpos = words[1].find('@')\n domain = words[1][atpos + 1:]\n if domain not in mail:\n mail[domain] = 1\n else:\n mail[domain] += 1\n print(mail)\nexcept FileNotFoundError:\n print(\"Enter correct file name.\")\n quit()","sub_path":"src/chapter9/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"396010727","text":"import subprocess\nimport sys\nimport os \n\ncmd = sys.argv[1:]\nidx = None\nfor arg in cmd:\n if \"--task_index=\" in arg:\n idx = int(arg.split(\"=\")[1])\nif idx == 0:\n cmd.insert(0, \"horovodrun\")\n proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=False,env=dict(os.environ))\n while proc.poll() is None:\n line = proc.stdout.readline()\n line = line.strip()\n if line:\n print(line)\n proc.wait()","sub_path":"selftf/tf_job/other_framework/mxnet/launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"329457671","text":"import grpc\nimport simpleServer_pb2\nimport simpleServer_pb2_grpc\nfrom concurrent import futures\nimport threading\nimport queue\nimport time\n\nclass Client:\n\n def __init__(self):\n self.event = threading.Event()\n self.display_queue = queue.Queue(maxsize=-1)\n \n def request(self):\n while not self.event.is_set():\n self.print_instructions()\n request = input(\"Request: \")\n request = request.split()\n if self.is_valid(request):\n self.display_queue.put(request)\n elif request[0] == \"#quit\":\n self.event.set()\n break\n else:\n print(\"CLIENTE: COMANDO INVALIDO\")\n\n def is_valid(self, request):\n if len(request) > 1:\n if request[0] == \"CREATE\" or request[0] == \"UPDATE\" or request[0] == \"READ\" or request[0] == \"DELETE\":\n if request[1].isdigit():\n if (request[0] == \"READ\" or request[0] == \"DELETE\") and len(request) != 2:\n return False\n return True\n return False\n \n\n def display(self):\n while not self.event.is_set():\n if not self.display_queue.empty():\n request = self.display_queue.get()\n tipo = request[0]\n id = int(request[1])\n data = \" \".join(map(str, request[2:])) if len(request) > 2 else \"\"\n response = self.stub.Service(simpleServer_pb2.SimpleServerRequest(type=tipo, id=id, data=data)).response\n if (tipo == \"CREATE\" or tipo == \"UPDATE\"):\n msg = \"\\nANSWER FOR REQUISITION '%s %d <%s>':\\n\" % (tipo, id, data)\n elif (tipo == \"READ\" or tipo == \"DELETE\"):\n msg = \"\\nANSWER FOR REQUISITION '%s %d':\\n\" % (tipo, id)\n print(msg + response)\n\n def print_instructions(self):\n\n print(\"\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \"~ INSTRUCTIONS: ~\\n\"\n \"~ ~\\n\"\n \"~ * To insert a new value type CREATE ~\\n\"\n \"~ ~\\n\"\n \"~ * To modify a value type UPDATE ~\\n\"\n \"~ ~\\n\"\n \"~ * To read a value type READ ~\\n\"\n \"~ ~\\n\"\n \"~ * To remove a value type DELETE ~\\n\"\n \"~ ~\\n\"\n \"~ * To close type '#quit' ~\\n\"\n \"~ ~\\n\"\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n \n def start(self):\n channel = grpc.insecure_channel('localhost:50051')\n self.stub = simpleServer_pb2_grpc.SimpleServerStub(channel)\n\n self.request_thread = threading.Thread(target=self.request)\n self.request_thread.setDaemon(True)\n self.request_thread.start()\n\n try:\n self.display()\n except KeyboardInterrupt:\n print(\"\\nTerminado pelo usuario\\n\")\n self.display()\n print(\"\\nClosing...\\n\")\n time.sleep(5)\n self.event.set()\n\n\nif __name__ == '__main__':\n client = Client()\n client.start()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544333951","text":"import os\nimport sys\n\nfrom PySide2.QtGui import QPixmap, QIcon\nfrom PySide2.QtCore import Qt, QRect\nfrom PySide2.QtWidgets import QDialog, QWidget, QLabel, QFrame, \\\n QVBoxLayout, QHBoxLayout, \\\n QCheckBox, QTabWidget, \\\n QTableWidget, QTableWidgetItem, \\\n QAbstractItemView \\\n\nfrom .QSS import Stylesheet, getNukePalette\nfrom .component import *\n \ndef resourcePath(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.join(os.path.dirname(__file__),\"icons\")\n \n return os.path.join(base_path, relative_path)\n \nclass UserInterface(QDialog):\n def __init__(self, parent=None):\n super(UserInterface, self).__init__(parent)\n self.setWindowTitle(\"Render Manager by Timo.ink\")\n self.setWindowIcon(QIcon(resourcePath(\"Timo.png\")))\n self.setWindowFlags( self.windowFlags() |\n Qt.WindowSystemMenuHint |\n Qt.WindowMinMaxButtonsHint)\n self.setAttribute(Qt.WA_DeleteOnClose, Qt.WA_TranslucentBackground)\n self.setStyleSheet(Stylesheet)\n self.setPalette(getNukePalette())\n self.resize(1600, 350)\n \n self.logo = QLabel() \n self.logo.setPixmap(QPixmap(resourcePath(\"Timo.png\")).scaled(20, 20, Qt.KeepAspectRatio, Qt.SmoothTransformation))\n self.logo.setMinimumSize(20, 20)\n self.logo.setGeometry(QRect(4, 4, 20, 20))\n self.logo.setParent(self)\n \n self.setup_ui()\n self.show()\n \n def setup_ui(self):\n self.main_layout = QVBoxLayout()\n self.setLayout(self.main_layout)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n self.main_tab_widget = QTabWidget()\n self.main_layout.addWidget(self.main_tab_widget)\n \n self.setup_job_ui()\n self.setup_settings_ui()\n \n def setup_job_ui(self):\n self.job_tab = QWidget()\n self.job_master_layout = QVBoxLayout(self.job_tab)\n self.job_master_layout.setContentsMargins(0,0,0,0)\n self.job_master_layout.addSpacing(5)\n self.main_tab_widget.addTab(self.job_tab, \"Job\")\n \n self.setup_job_ui_action_bar()\n self.setup_job_ui_list()\n \n def setup_job_ui_action_bar(self):\n # Button bar\n self.job_action_layout = QHBoxLayout()\n self.job_action_layout.setContentsMargins(5,0,5,0)\n \n # Divider\n self.job_action_div_one = QFrame()\n self.job_action_div_two = QFrame()\n self.job_action_div_three = QFrame()\n for d in [self.job_action_div_one, self.job_action_div_two, self.job_action_div_three]:\n d.setFrameShape(QFrame.VLine)\n d.setFrameShadow(QFrame.Sunken)\n \n # Add Buttons\n self.job_action_start_button = ImageButton(resourcePath(\"Start.png\"), \"Start Queue\", 24)\n self.job_action_stop_button = ImageButton(resourcePath(\"Stop.png\"), \"Stop Queue\", 24)\n for button in (self.job_action_start_button, \n self.job_action_stop_button, \n self.job_action_div_one):\n self.job_action_layout.addWidget(button)\n \n # Add Filter\n self.job_action_filter_waiting = QCheckBox(\"Waiting\")\n self.job_action_filter_running = QCheckBox(\"Running\")\n self.job_action_filter_disabled = QCheckBox(\"Disabled\")\n self.job_action_filter_finished = QCheckBox(\"Finished\")\n self.job_action_filter_search = SearchBar(placeholder=\"Search\")\n \n for button in (self.job_action_filter_waiting, \n self.job_action_filter_running, \n self.job_action_filter_disabled, \n self.job_action_filter_finished):\n button.setChecked(True)\n self.job_action_layout.addWidget(button)\n self.job_action_layout.addWidget(self.job_action_filter_search)\n \n self.job_action_layout.addWidget(self.job_action_div_two)\n \n # Add Buttons\n self.job_action_enable_button = ImageButton(resourcePath(\"Enable.png\"), \"Enable selected Jobs\", 24)\n self.job_action_disable_button = ImageButton(resourcePath(\"Disable.png\"), \"Disable selected Jobs\", 24)\n self.job_action_check_button = ImageButton(resourcePath(\"Check.png\"), \"Check selected Jobs\", 24)\n self.job_action_reset_button = ImageButton(resourcePath(\"Reset.png\"), \"Reset selected Jobs\", 24)\n self.job_action_delete_button = ImageButton(resourcePath(\"Delete.png\"), \"Delete selected Jobs\", 24)\n for button in (self.job_action_enable_button, \n self.job_action_disable_button, \n self.job_action_check_button, \n self.job_action_div_three, \n self.job_action_reset_button, \n self.job_action_delete_button):\n self.job_action_layout.addWidget(button)\n self.job_action_layout.addStretch()\n \n # Add to master layout\n self.job_master_layout.addLayout(self.job_action_layout)\n \n \n def setup_job_ui_list(self):\n # Add Table \n self.job_list_layout = QHBoxLayout()\n \n self.job_list_table = QTableWidget()\n self.job_list_table.setSortingEnabled(True)\n self.job_list_table.setAlternatingRowColors(True)\n self.job_list_table.setMinimumHeight(120)\n self.job_list_table.setColumnCount(10)\n self.job_list_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.job_list_table.horizontalHeader().setStretchLastSection(True)\n \n self.job_list_table.setRowCount(1)\n column_number = 0\n for name, column_width in ((\"ID\", 50), (\"Date\", 150), (\"Submitter\", 150), (\"File\", 550), (\"Layer\", 100), (\"Weight\", 80), (\"Progress\", 200), (\"ETA\", 120), (\"Workers\", 80), (\"Action\", 80)):\n self.job_list_table.setHorizontalHeaderItem(column_number, QTableWidgetItem(name))\n self.job_list_table.setColumnWidth(column_number, column_width)\n column_number += 1\n self.job_list_table.setColumnCount(column_number)\n self.job_list_table.setRowCount(0)\n \n self.job_list_table_vertical_header = self.job_list_table.verticalHeader()\n self.job_list_table_vertical_header.setDefaultSectionSize(35)\n self.job_list_table_vertical_header.setMinimumSectionSize(35)\n self.job_list_table.setShowGrid(False)\n \n self.job_list_layout.addWidget(self.job_list_table)\n \n \n # Add to master layout\n self.job_master_layout.addLayout(self.job_list_layout)\n \n def setup_settings_ui(self):\n self.settings_tab = QWidget()\n self.settings_master_layout = QVBoxLayout(self.settings_tab)\n self.settings_master_layout.setContentsMargins(0,0,0,0)\n self.settings_master_layout.addSpacing(5)\n self.main_tab_widget.addTab(self.settings_tab, \"Settings\")\n ","sub_path":"gui/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"490470850","text":"import spacy\nfrom EditDistance import EditDistanceFinder\nfrom LanguageModel import LanguageModel\nimport string\n\ntranspose = True\n\nclass SpellChecker(object):\n\n def __init__(self, max_distance, channel_model=None, language_model=None):\n self.channel_model = channel_model\n self.language_model = language_model\n self.max_distance = max_distance\n self.nlp = spacy.load(\"en\", pipeline=[\"tagger\", \"parser\"])\n\n def load_channel_model(self, fp):\n self.channel_model = EditDistanceFinder()\n self.channel_model.load(fp)\n\n def load_language_model(self, fp):\n self.language_model = LanguageModel()\n self.language_model.load(fp)\n\n def bigram_score(self, prev_word, focus_word, next_word):\n score = lambda x, y: self.language_model.bigram_prob(x,y)\n return (score(prev_word, focus_word) + score(focus_word, next_word))/(2.0)\n\n def unigram_score(self, word):\n return self.language_model.unigram_prob(word)\n\n def inserts(self, word):\n l = []\n for i in range(len(word)):\n for char in string.ascii_lowercase:\n l.append(word[:i] + char + word[i:])\n return [x for x in l if x in self.language_model]\n\n def deletes(self, word):\n l = []\n for i in range(len(word)):\n l.append(word[:i] + word[i+1:])\n return [x for x in l if x in self.language_model]\n\n def substitutions(self, word):\n l = []\n for i in range(len(word)):\n for char in string.ascii_lowercase:\n l.append(word[:i] + char + word[i+1:])\n return [x for x in l if x in self.language_model]\n\n def transposes(self, word):\n l = []\n for i in range(1,len(word)):\n l.append(word[:i-1] + word[i] + word[i-1] + word[i+1:])\n return [x for x in l if x in self.language_model]\n\n def cm_score(self, error_word, corrected_word):\n return self.channel_model.align(error_word, corrected_word)[0]\n\n def generate_candidates(self, word):\n source = [word]\n for i in range(self.max_distance):\n nested = list(map(self._one_step, source))\n flat = [l for sublist in nested for l in sublist]\n source = list(set(flat))\n return source\n\n def check_sentence(self, sentence, fallback=False):\n l = []\n for i in range(len(sentence)):\n word = sentence[i]\n if (word in self.language_model) or (word in string.punctuation) or word == '\\n':\n l.append([word])\n else:\n choices = self.generate_candidates(word)\n if len(choices) == 0:\n if fallback:\n l.append([word])\n else:\n if i<1:\n prev_word = ''\n else:\n prev_word = sentence[i-1]\n\n if i+1 == len(sentence):\n next_word = ''\n else:\n next_word = sentence[i+1]\n\n #rank = lambda x: self.cm_score(x, word)\n #rank = lambda x: self.bigram_score(prev_word, x, next_word)\n rank = lambda x: self._combine_scores(self.cm_score(x, word), self.bigram_score(prev_word, x, next_word), self.unigram_score(x))\n ranked = sorted(choices, key = rank, reverse=False)\n l.append(list(ranked))\n\n\n return l\n\n def _combine_scores(self, cm_score, bigram_score,unigram_score):\n return cm_score - 0.5*(bigram_score+unigram_score)\n\n\n\n def _one_step(self, word):\n if transpose:\n return self.inserts(word) + self.deletes(word) + self.substitutions(word) + self.transposes(word)\n else:\n return self.inserts(word) + self.deletes(word) + self.substitutions(word)\n\n def autocorrect_sentence(self, sentence):\n options = self.check_sentence(sentence, fallback=True)\n return [x[0] for x in options]\n\n def suggest_sentence(self, sentence, max_suggestions):\n options = self.check_sentence(sentence)\n get = lambda x: x[0] if len(x) == 0 else x[:max_suggestions]\n return [get(x) for x in options]\n\n def check_text(self, text, fallback=False):\n func = lambda x: self.check_sentence(x, fallback)\n return self._spacy_map(text, func)\n\n def autocorrect_line(self, line):\n return self._spacy_map(line, self.autocorrect_sentence)\n\n def suggest_text(self, text, max_suggestions):\n func = lambda x: self.suggest_sentence(x, max_suggestions)\n return self._spacy_map(text, func)\n\n def _spacy_map(self, text, function):\n doc = self.nlp(text.lower())\n l = []\n for sentence in doc.sents:\n stringlist = [str(x) for x in sentence]\n l += function(stringlist)\n return l\n\n\n\n\nif __name__ == \"__main__\":\n s = SpellChecker(5)\n\n with open('lm.pkl', 'rb') as fp:\n s.load_language_model(fp)\n\n with open('ed.pkl','rb') as fp:\n s.load_channel_model(fp)\n\n print(s.suggest_sentence(['it', 'was', 'the', 'best', 'of', 'times', 'it', 'was', 'the', 'blurst', 'of', 'times'], 4))\n\n print(s.suggest_text(\"one fish. two fish. red fish. blue fish.\", 4))\n\n print(s.suggest_text(\"you are teh best\", 4))\n","sub_path":"SpellCheck.py","file_name":"SpellCheck.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275069535","text":"\"\"\"\nCompare BU transfer factor ratios and UW transfer factor ratios\nUsage: python PlotTool/compareOutputRatio.py bu_input_file.root uw_input_file.root\n\"\"\"\n\nimport sys\nsys.path.append(\"PlotTool\")\n\nfrom PlotTool import *\nfrom ROOT import *\nimport config\n\ngroup = parser.add_group(__file__,__doc__,\"Script\")\n\nparser.parse_args()\n\ngROOT.SetBatch(1)\n\nregionlist = [ \"Wen\",\"Wmn\",\"Zee\",\"Zmm\",\"gjets\" ]\nprocessMap = {\n \"signal\": {\n \"Z\":{\"proc\":\"zjets\",\"label\":\"Z(#nu#nu)\",'text':'Znn'},\n \"W\":{\"proc\":\"wjets\",\"label\":\"W(l#nu)\",'text':'Wln'},\n },\n \"Wen\": {\"proc\":\"wjets\",\"label\":\"W(e#nu)\",'text':'Wen'},\n \"Wmn\": {\"proc\":\"wjets\",\"label\":\"W(#mu#nu)\",'text':'Wmn'},\n \"Wln\": {\"proc\":\"SumOfBkg\",\"label\":\"W(l#nu)\",'text':'Wln'},\n \"Zee\": {\"proc\":\"zll\",\"label\":\"Z(ee)\",'text':'Zee'},\n \"Zmm\": {\"proc\":\"zll\",\"label\":\"Z(#mu#mu)\",'text':'Zmm'},\n \"Zll\": {\"proc\":\"SumOfBkg\",\"label\":\"Z(ll)\",'text':'Zll'},\n \"gjets\": {\"proc\":\"gjets\",\"label\":\"#gamma\",\"text\":\"G\"},\n}\ndef TF_DataStyle(tf,color):\n # tf.SetLineWidth(2)\n tf.SetMarkerColor(color)\n tf.SetMarkerStyle(20);\n tf.SetMarkerSize(1);\ndef TF_MCStyle(tf,color):\n tf.SetLineWidth(2)\n tf.SetLineColor(color)\ndef GetRMS(hs): return (sum(ibin*ibin for ibin in hs)/len(hs))*0.5\ndef GetStdDev(hs):\n rms = GetRMS(hs)\n return ( sum( (ibin - rms)**2 for ibin in hs)/(len(hs)-1) )**0.5\ndef compareOutput(fname1,fname2):\n tfile1 = TFile.Open(fname1)\n tfile2 = TFile.Open(fname2)\n\n def compare(bu,uw,region_num,region_den):\n def getTF(num,den,proc): return GetRatio(num[proc],den[proc])\n bu_data = getTF(bu[region_num],bu[region_den],'data')\n bu_mc = getTF(bu[region_num],bu[region_den],'SumOfBkg')\n uw_data = getTF(uw[region_num],uw[region_den],'data')\n uw_mc = getTF(uw[region_num],uw[region_den],'SumOfBkg')\n\n rname = \"%s/%s\"%(processMap[region_num]['label'],processMap[region_den]['label'])\n tfname = \"Ratio_{%s}\" % rname\n \n c = TCanvas(\"c\", \"canvas\",800,800);\n gStyle.SetOptStat(0);\n gStyle.SetLegendBorderSize(0);\n # c.SetLeftMargin(0.15);\n #c.SetLogy();\n #c.cd();\n \n pad1 = TPad(\"pad1\",\"pad1\",0.01,0.25,0.99,0.99);\n pad1.Draw(); pad1.cd();\n pad1.SetFillColor(0);\n pad1.SetFrameBorderMode(0);\n pad1.SetBorderMode(0);\n pad1.SetBottomMargin(0.);\n\n TF_DataStyle(bu_data,kBlue)\n TF_MCStyle(bu_mc,kBlue)\n \n TF_DataStyle(uw_data,kRed)\n TF_MCStyle(uw_mc,kRed)\n\n max_rms = max( GetRMS(hs) for hs in (bu_data,bu_mc,uw_data,uw_mc) )\n max_std = max( GetStdDev(hs) for hs in (bu_data,bu_mc,uw_data,uw_mc) )\n\n nstd = 2\n bu_mc.SetMaximum(max_rms + nstd*max_std)\n bu_mc.SetMinimum(max(max_rms - nstd*max_std,0))\n\n bu_mc.SetTitle(\"\")\n bu_mc.GetYaxis().SetTitle(tfname)\n\n bu_mc.Draw(\"hist\")\n uw_mc.Draw(\"histsame\")\n bu_data.Draw(\"pex0same\")\n uw_data.Draw(\"pex0same\")\n \n leg = getLegend(xmin=0.1,xmax=0.7,ymin=0.8,ymax=0.9)\n leg.SetNColumns(2)\n leg.AddEntry(bu_data,\"%s BU Data\" % rname,'p')\n leg.AddEntry(bu_mc,\"%s BU MC\" % rname,'l')\n leg.AddEntry(uw_data,\"%s UW Data\" % rname,'p')\n leg.AddEntry(uw_mc,\"%s UW MC\" % rname,'l')\n leg.Draw()\n \n c.cd()\n pad2 = TPad(\"pad2\",\"pad2\",0.01,0.01,0.99,0.25);\n pad2.Draw(); pad2.cd();\n pad2.SetFillColor(0); pad2.SetFrameBorderMode(0); pad2.SetBorderMode(0);\n pad2.SetTopMargin(0);\n pad2.SetBottomMargin(0.35);\n \n ratio_data = GetRatio(bu_data,uw_data)\n ratio_mc = GetRatio(bu_mc,uw_mc)\n \n RatioStyle(ratio_mc,xname=\"Recoil (GeV)\",yname=\"BU/UW\")\n RatioStyle(ratio_data,xname=\"Recoil (GeV)\",yname=\"BU/UW\")\n ratio_mc.SetLineColor(kBlack)\n ratio_data.SetLineColor(kBlack)\n ratio_data.SetMarkerColor(kBlack)\n ratio_mc.Draw(\"hist\")\n ratio_data.Draw(\"pex0same\")\n \n line = getRatioLine(ratio_data.GetXaxis().GetXmin(),ratio_data.GetXaxis().GetXmax())\n line.Draw(\"same\");\n c.Update()\n\n fname = \"%s%s_ratio\" % (processMap[region_num]['text'],processMap[region_den]['text'])\n SaveAs(c,fname,year=config.version,sub=\"BU_Comparison/TransferFactors/\")\n return \n keylist1 =[ key.GetName() for key in tfile1.GetListOfKeys()]\n keylist2 =[ key.GetName() for key in tfile2.GetListOfKeys()]\n def SumOfBkg(sample):\n bkg = None\n for key,hs in sample.iteritems():\n if 'data' in key: continue\n\n if bkg is None: bkg = hs.Clone('SumOfBkg')\n else: bkg.Add(hs)\n sample['SumOfBkg'] = bkg\n def AddRegions(region1,region2):\n region_sum = {}\n for key in region1:\n hs1,hs2 = region1[key],region2[key]\n region_sum[key] = hs1.Clone()\n region_sum[key].Add(hs2)\n return region_sum\n bu,uw = {},{}\n for region in regionlist:\n bu[region] = { key.replace('%s_'%region,\"\"):tfile1.Get(key) for key in keylist1 if region in key }\n uw[region] = { key.replace('%s_'%region,\"\"):tfile2.Get(key) for key in keylist2 if region in key }\n SumOfBkg(bu[region])\n SumOfBkg(uw[region])\n for sample in (bu,uw):\n sample['Wln'] = AddRegions(sample['Wen'],sample['Wmn'])\n sample['Zll'] = AddRegions(sample['Zee'],sample['Zmm'])\n\n \n compare(bu,uw,'Wen','gjets')\n compare(bu,uw,'Zee','gjets')\n compare(bu,uw,'Zee','Wen')\n \n compare(bu,uw,'Wmn','gjets')\n compare(bu,uw,'Zmm','gjets')\n compare(bu,uw,'Zmm','Wmn')\n \n compare(bu,uw,'Wln','gjets')\n compare(bu,uw,'Zll','gjets')\n compare(bu,uw,'Zll','Wln')\n \n\n\ncompareOutput(parser.args.argv[0],parser.args.argv[1])\n \n","sub_path":"PlotTool/compareOutputRatio.py","file_name":"compareOutputRatio.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55696106","text":"\n\nfrom matplotlib.widgets import Cursor\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport matplotlib.animation as animation\n\n\nclass Plotter:\n def __init__(self, ax, title=None, ylabel=None, xlabel=None, \n maxt=25, dt=0.1, ydata=0, xdata=0):\n \n self.ax = ax \n self.maxt = maxt\n self.dt = dt\n\n self.ax.set_title(title)\n self.ax.set_ylabel(ylabel)\n self.ax.set_ylim(-60, 60)\n self.ax.set_xlabel(xlabel)\n self.ax.set_xlim(0, self.maxt)\n \n self.ax.grid(which='both')\n \n self.bdata = [0]\n self.cdata = [0]\n self.zdata = [0]\n \n self.tdata = [0]\n\n self.line_b = Line2D(self.tdata, self.bdata, color='red')\n self.line_c = Line2D(self.tdata, self.cdata, color='green')\n self.line_z = Line2D(self.tdata, self.zdata)\n\n self.ax.add_line(self.line_b)\n self.ax.add_line(self.line_c)\n self.ax.add_line(self.line_z)\n \n self.ax.legend([self.line_b, self.line_c, self.line_z],\n ['Bx', 'Bc', 'Bz'])\n\n #self.cursor = Cursor(self.ax, useblit=False, color='red', linewidth=2)\n\n def shift_axis_x(self):\n lastt = self.tdata[-1]\n if lastt > self.tdata[0] + self.maxt: # reset the arrays\n #self.tdata = [self.tdata[-1]]\n self.tdata = [self.tdata[0]]\n\n\n self.bdata = [self.bdata[-1]]\n self.cdata = [self.cdata[-1]]\n self.zdata = [self.zdata[-1]]\n \n self.ax.set_xlim(self.tdata[0], self.tdata[0] + self.maxt)\n self.ax.figure.canvas.draw()\n\n def update(self, y):\n if not y:\n return\n\n self.shift_axis_x()\n \n t = self.tdata[-1] + self.dt\n self.tdata.append(t)\n\n self.bdata.append(y[0])\n self.cdata.append(y[1])\n self.zdata.append(y[2])\n\n self.line_b.set_data(self.tdata, self.bdata)\n self.line_c.set_data(self.tdata, self.cdata)\n self.line_z.set_data(self.tdata, self.zdata)\n\n return self.line_b, self.line_c, self.line_z\n \n\nif __name__ == '__main__': \n from sensor import HMR\n import serial\n\n serobj = serial.Serial('/dev/ttyUSB0', baudrate=9600, timeout=0.03) \n hmr3500 = HMR(serobj)\n\n fig, ax = plt.subplots()\n plotter = Plotter(ax, title='Зависимость магнитного поля B(t)', \n ylabel='Bx, By, Bz', xlabel='t, c')\n\n def get_sensor_data():\n sensor_data = hmr3500.get_data()\n if sensor_data:\n print(sensor_data)\n roll, pitch, heading, magb, magc, magz = sensor_data\n yield magb, magc, magz\n else:\n yield None\n\n ani = animation.FuncAnimation(fig, plotter.update, get_sensor_data, interval=100)\n \n plt.show()\n\n","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"376963524","text":"# coding: utf-8\n\n# Copyright 2018 IBM All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ibm_whcs_sdk.insights_for_medical_literature as wh\n\ndef test_hit_count_model():\n model = wh.HitCount(50)\n model_diff = wh.HitCount(40)\n\n count_obj = {}\n count_obj['hitCount'] = 50\n hit_count = model._from_dict(count_obj)\n\n assert model.__str__()\n assert model.__eq__(hit_count)\n assert model.__ne__(model_diff)\n","sub_path":"ibm_whcs_sdk/insights_for_medical_literature/tests/integration/model/test_hit_count_model.py","file_name":"test_hit_count_model.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330065495","text":"\n\n##### Now, lets make a submission to Kaggle\n# lets start by building another functional test that exercises the whole pipeline:\n\n#### we create 2 global dfs, to simulate the train and test sets\n\n######### MAKE A FUNCTIONAL FAILING TEST\n\ndf = DDF({\n 'Id': np.arange(3),\n 'MiscFeature': [np.nan, 'Gar2', 'Othr'],\n 'MoSold': [2, 5, 9],\n 'SalePrice': [1000, 20000, 400000]\n })\n\ntrain_df = df.colslice(df.columns)\ntest_df = df.drop_columns(['SalePrice'])\n\n\ndef test_make_submission_file(tmpdir):\n sub_path = str(tmpdir) + 'sub.csv'\n model_params = {'min_data': 1, 'min_data_in_bin': 1}\n ms.make_submission_file(train_df, test_df, sub_path, extra_model_params=model_params)\n result_df = DDF.from_csv(sub_path)\n assert 'SalePrice' in result_df.columns\n assert 'Id' in result_df.columns\n assert len(result_df.columns) == 2\n\n########## Make the test pass, by writting the required functions:\ndef make_submission_file(train_df, test_df, sub_path, extra_model_params={}):\n df = append_dfs(train_df, test_df)\n df = ev.clean_data(df)\n df = ev.add_features(df)\n mm, targets = ev.get_mm(df), ev.get_targets(df)\n ixs = get_lb_ixs(df)\n params = {'n_estimators': 1100, 'learning_rate': 0.01, 'silent': 0}\n params.update(extra_model_params)\n model = ev.get_model(**params)\n fitter = CVFitter(model)\n results = fitter.fit(mm, targets, ixs)\n preds = results['preds'][0]\n generate_sub_file(preds, test_df['Id'], sub_path)\n\n\n############# UNIT tests\n\ndef test_append_dfs():\n result_df = ms.append_dfs(train_df, test_df)\n assert len(result_df) == len(train_df) + len(test_df)\n\n## make the test pass\ndef append_dfs(df_train, df_test):\n df_test['SalePrice'] = np.nan\n result_df = df_train.append(df_test, axis=0)\n return result_df\n\n\n############ test \n\ndef test_get_lb_ixs():\n df = DDF({\n 'col1': np.arange(5),\n 'SalePrice': np.array([100.]*3 + [np.nan]*2)\n })\n ixs = ms.get_lb_ixs(df)\n expected = {\n 0: {'train': np.array([True]*3 + [False]*2),\n 'val': np.array([False]*3 + [True]*2)}\n }\n assert np.all(expected[0]['train'] == ixs[0]['train'])\n assert np.all(expected[0]['val'] == ixs[0]['val'])\n\n## make the test pass\ndef get_lb_ixs(df):\n train_ixs = np.isfinite(df[ev.target_name])\n result = OrderedDict({0: {'train': train_ixs, 'val': ~train_ixs}})\n return result\n\n\n############ test\ndef test_generate_sub_file(tmpdir):\n preds = np.arange(10)\n id_sub = np.arange(10)\n sub_file_path = tmpdir + 'file.csv'\n ms.generate_sub_file(preds, id_sub, sub_file_path)\n result_df = DDF.from_csv(sub_file_path)\n assert result_df.shape[1] == 2\n assert result_df.shape[0] == 10\n\n\n## make the test pass\ndef generate_sub_file(preds, sub_id, sub_file_path):\n df = DDF({\n 'Id': sub_id,\n ev.target_name: np.exp(preds)\n })\n df.to_csv(sub_file_path)\n\n","sub_path":"steps_submission.py","file_name":"steps_submission.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80827182","text":"\"\"\"\n手机验证码\n\"\"\"\nimport json\nimport time\n\nfrom common.Container import Container\nfrom common.HttpClient import HttpClient\nfrom common.Logger import Logger\n\n\nclass PhoneVerificationsCode(object):\n\n def __init__(self, company_account):\n self.logger = Logger()\n self.account = company_account\n self.openid = None\n self.prev_verification_code = ''\n\n def request_phone_code(self, retry_times):\n # 2.告诉后端手机验证码已经发送\n # 暂时去掉,不走第二步,20190409\n # openid = self.code_already_send(retry_times)\n # retry_times += 1\n # if openid is False:\n # return False\n # self.openid = openid\n # 3.每隔5秒请求一次接口\n code = self.loop_request_code(retry_times)\n if code is False:\n return False\n return code\n\n def code_already_send(self, retry_times):\n \"\"\"\n 点击发送验证码之后,请求后端,告诉手机验证码发送发送按钮已点\n :param retry_times: 重试次数(在后端和业务代码中控制)\n :return: openid\n \"\"\"\n url = Container.PHONE_SEND_CODE\n method = 'POST'\n request_param = {'account': self.account, 'retry_times': retry_times,\n 'task_type': Container.COMMON_DATA['task_type']}\n result_bool, response = HttpClient().to_request_for_code(method, url, data=request_param)\n print(response)\n Logger().to_log('info', 'code_already_send')\n if result_bool is True and response['code'] == 200:\n return response['openid']\n return False\n\n def loop_request_code(self, retry_times):\n \"\"\"\n 轮询请求客户回复的手机验证码\n :param retry_times:\n :return:\n \"\"\"\n # 每隔5秒请求一次接口\n # 将本次验证码与上次的验证码做比较,如果一致d的话就表示重试请求后的验证码没有变化,还是错误的验证码(不然也不会重试)\n # 重试次数大于1的时候,将请求来的验证码保存下来,为下一次请求的验证码对比做准备\n url = Container.PHONE_GET_CODE\n method = 'POST'\n request_param = dict()\n # request_param['openid'] = self.openid\n request_param['account'] = self.account\n for i in range(int(Container.LOGIN_WAIT_SECOND / Container.LOGIN_REQUEST_INTERVAL)):\n result_bool, response = HttpClient().to_request_for_code(method, url, data=request_param)\n response = json.loads(response)\n if result_bool is True and response['code'] == 200:\n self.logger.to_log('info', '手机验证码:{}'.format(response['data']))\n if response['data'] == self.prev_verification_code:\n self.logger.to_log('info', '两次请求验证码结果相同,验证码错误')\n time.sleep(Container.LOGIN_REQUEST_INTERVAL)\n continue\n if retry_times > 1:\n self.prev_verification_code = response['data']\n return response['data']\n self.logger.to_log('info', '未请求到验证码数据')\n time.sleep(Container.LOGIN_REQUEST_INTERVAL)\n self.logger.to_log('info', '本次请求周期5分钟结束')\n return False\n\n\nif __name__ == '__main__':\n account = '91610133MA6TXDH24P'\n login_retry_times = 1\n Container.PATH = Container.PATH + '\\\\..\\\\'\n pvc = PhoneVerificationsCode(account)\n Container.COMMON_DATA = dict()\n Container.COMMON_DATA['task_type'] = 1\n while True:\n # 业务代码,填写账号密码等等\n # 点击发送手机验证码的按钮\n if login_retry_times > (Container.LOGIN_MAX_RETRY_TIMES - 1):\n Logger().to_log('info', '重试次数超标')\n break\n verification_code = pvc.request_phone_code(login_retry_times)\n login_retry_times += 1\n if verification_code is False:\n continue\n else:\n break\n # 业务代码,填写获取到的手机验证码\n # 然后验证是否正确,如果是验证码错误的话就继续循环\n # continue\n","sub_path":"common/PhoneVerificationCode.py","file_name":"PhoneVerificationCode.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576295309","text":"#! /usr/bin/env python\n# Time-stamp: <2014-02-11 14:34 christophe@pallier.org>\n\nimport random, sys\nc = \"bbbbccccddddffffggghhjjjjkkllllmmmnnnnppppqqqrrrrrsssssstttttvvvwxxz\"\n\n\ndef pseudo(len):\n return \"\".join([ random.choice(c) for x in range(len) ])\n\nif __name__ == '__main__':\n infile = file(sys.argv[1],'r',)\n for line in infile:\n lens = [ len(w) for w in line.split(\",\")]\n output = \",\".join([ pseudo(i) for i in lens ])\n print(output)\n\n","sub_path":"MathFormula2/stimulation/language_localizer/localizer_lists/text2consonants.py","file_name":"text2consonants.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"205567431","text":"import os\n\n\ncur_path=os.path.dirname(os.path.abspath(__file__))\n\nfilename = 'workfile.dat'\n\ndata = open(os.path.join(cur_path,filename),'w')\n\ndata.write(\"Mein Name ist Hase und ich bin ......\")\n\ndata.close()","sub_path":"examples/Diverses/writetest.py","file_name":"writetest.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"273764511","text":"from bs4.element import ResultSet\r\nimport discord\r\nfrom discord import client\r\nfrom discord.channel import VoiceChannel\r\nfrom discord.ext import commands\r\nfrom selenium.webdriver.chrome import options\r\nfrom youtube_dl import YoutubeDL\r\nimport bs4\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom discord.utils import get\r\nfrom discord import FFmpegPCMAudio\r\nimport asyncio\r\nimport time\r\n\r\n\r\n\r\nbot = commands.Bot(command_prefix=\"!!\")\r\nClient = discord.Client()\r\n\r\nuser = []\r\nmusictitle = []\r\nsong_queue = []\r\nmusicnow = []\r\n\r\nuserF = []\r\nuserFlist = []\r\nallplaylist = []\r\n\r\ndef title(msg):\r\n global music\r\n\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n\r\n options = webdriver.ChromeOptions()\r\n options.add_argument(\"headless\")\r\n\r\n chromedriver_dir = \"D:/다운로드/chromedriver_win32 (1)/chromedriver.exe\"\r\n driver = webdriver.Chrome(chromedriver_dir, options = options)\r\n driver.get(\"https://www.youtube.com/results?search_query=\"+msg+\"+lyrics\")\r\n source = driver.page_source\r\n bs = bs4.BeautifulSoup(source, 'lxml')\r\n entire = bs.find_all('a', {'id': 'video-title'})\r\n entireNum = entire[0]\r\n music = entireNum.text.strip()\r\n \r\n musictitle.append(music)\r\n musicnow.append(music)\r\n test1 = entireNum.get('href')\r\n url = 'https://www.youtube.com'+test1\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(url, download=False)\r\n URL = info['formats'][0]['url']\r\n\r\n driver.quit()\r\n \r\n musicnow.insert(0, entire)\r\n return music, URL\r\n\r\ndef play(ctx):\r\n global vc\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n URL = song_queue[0]\r\n del user[0]\r\n del musictitle[0]\r\n del song_queue[0]\r\n vc = get(bot.voice_clients, guild=ctx.guild)\r\n if not vc.is_playing():\r\n vc.play(discord.FFmpegPCMAudio(URL,**FFMPEG_OPTIONS), after=lambda e:play_next(ctx)) \r\n\r\ndef play_next(ctx):\r\n if len(musicnow) - len(user) >= 2:\r\n for i in range(len(musicnow) - len(user) - 1):\r\n del musicnow[0]\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n if len(user) >= 1:\r\n if not vc.is_playing():\r\n del musicnow[0]\r\n URL = song_queue[0]\r\n del user[0]\r\n del musictitle[0]\r\n del song_queue[0]\r\n vc.play(discord.FFmpegPCMAudio(URL,**FFMPEG_OPTIONS), after=lambda e: play_next(ctx))\r\n\r\n else:\r\n if not vc.is_playing():\r\n client.loop.creat_task(vc.disconnect)\r\n@bot.event\r\nasync def on_ready():\r\n print(\"다음으로 로그인 합니다: \")\r\n print(bot.user.name)\r\n print(\"connection was succesful\")\r\n await bot.change_presence(status=discord.Status.online, activity=discord.Game(\"둠피삭제\"))\r\n\r\n@bot.command()\r\nasync def 따라해(ctx, *, number):\r\n await ctx.send(embed = discord.Embed(title = \"따라해\", description = number, Color = 0x00ff00))\r\n\r\n@bot.command()\r\nasync def 입장(ctx):\r\n try:\r\n global vc\r\n vc = await ctx.message.author.voice.channel.connect()\r\n except:\r\n try:\r\n vc = await ctx.message.author.voice.channel.connect()\r\n await vc.move_to(ctx.message.author.voice.channel)\r\n except:\r\n await ctx.send(\"아니 음성채널에 접속도 안하고 들어오라고 하냐??\")\r\n\r\n@bot.command()\r\nasync def 퇴장(ctx):\r\n try:\r\n await vc.disconnect()\r\n except:\r\n await ctx.send(\"아니 들어와 있지도 않는데 무슨 나가야\")\r\n\r\n@bot.command()\r\nasync def 도움말(ctx):\r\n await ctx.send(embed = discord.Embed(title='도움말',description=\"\"\"\r\n\\n!!도움말 -> 뮤직봇의 모든 명령어를 볼 수 있습니다.\r\n\\n!!입장 -> 뮤직봇을 자신이 속한 채널로 부릅니다.\r\n\\n!!퇴장 -> 뮤직봇을 자신이 속한 채널에서 내보냅니다.\r\n\\n!!URL [노래링크] -> 유튜브URL를 입력하면 뮤직봇이 노래를 틀어줍니다.\r\n(목록재생에서는 사용할 수 없습니다.)\r\n\\n!!재생 [노래이름] -> 뮤직봇이 노래를 검색해 틀어줍니다.\r\n\\n!!끄기 -> 현재 재생중인 노래를 끕니다.\r\n!!중지 -> 현재 재생중인 노래를 일시정지시킵니다.\r\n!!다시재생 -> 일시정지시킨 노래를 다시 재생합니다.\r\n\\n!!지금노래 -> 지금 재생되고 있는 노래의 제목을 알려줍니다.\r\n\\n!!멜론차트 -> 최신 멜론차트를 재생합니다.\r\n\\n!!즐겨찾기 -> 자신의 즐겨찾기 리스트를 보여줍니다.\r\n!!즐겨찾기추가 [노래이름] -> 뮤직봇이 노래를 검색해 즐겨찾기에 추가합니다.\r\n!!즐겨찾기삭제 [숫자] ->자신의 즐겨찾기에서 숫자에 해당하는 노래를 지웁니다.\r\n\\n!!목록 -> 이어서 재생할 노래목록을 보여줍니다.\r\n!!목록재생 -> 목록에 추가된 노래를 재생합니다.\r\n!!목록초기화 -> 목록에 추가된 모든 노래를 지웁니다.\r\n\\n!!대기열추가 [노래] -> 노래를 대기열에 추가합니다.\r\n!!대기열삭제 [숫자] -> 대기열에서 입력한 숫자에 해당하는 노래를 지웁니다.\r\n\\n 봇 초대링크 https://discord.com/oauth2/authorize?client_id=881404524001107979&permissions=8&scope=bot\"\"\", color = 0x00ff00))\r\n\r\n@bot.command()\r\nasync def URL(ctx, *, url):\r\n\r\n try:\r\n global vc\r\n vc = await ctx.message.author.voice.channel.connect()\r\n except:\r\n try:\r\n vc = await ctx.message.author.voice.channel.connect()\r\n await vc.move_to(ctx.message.author.voice.channel)\r\n except:\r\n await ctx.send(\"아니 음성채널에 접속도 안하고 들어오라고 하냐??\") \r\n\r\n YDL_OPTIONS = {'format': 'bestaudio','noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n\r\n if not vc.is_playing():\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(url, download=False)\r\n URL = info['formats'][0]['url']\r\n vc.play(FFmpegPCMAudio(URL, **FFMPEG_OPTIONS))\r\n await ctx.send(embed = discord.Embed(title= \"노래 재생\", description = \"현재 \" + url + \"을(를) 재생하고 있습니다.\", color = 0x00ff00))\r\n else:\r\n await ctx.send(\"노래가 이미 재생되고 있습니다!\")\r\n\r\n@bot.command()\r\nasync def 재생(ctx, *, msg):\r\n\r\n try:\r\n global vc\r\n vc = await ctx.message.author.voice.channel.connect()\r\n except:\r\n try:\r\n vc = await ctx.message.author.voice.channel.connect()\r\n await vc.move_to(ctx.message.author.voice.channel)\r\n except:\r\n await ctx.send(\"아니 음성채널에 접속도 안하고 들어오라고 하냐??\")\r\n\r\n if not vc.is_playing():\r\n\r\n options = webdriver.ChromeOptions()\r\n options.add_argument(\"headless\")\r\n\r\n global entireText\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n \r\n chromedriver_dir = \"D:/다운로드/chromedriver_win32 (1)/chromedriver.exe\"\r\n driver = webdriver.Chrome(chromedriver_dir)\r\n driver.get(\"https://www.youtube.com/results?search_query=\"+msg+\"+lyrics\")\r\n source = driver.page_source\r\n bs = bs4.BeautifulSoup(source, 'lxml')\r\n entire = bs.find_all('a', {'id': 'video-title'})\r\n entireNum = entire[0]\r\n entireText = entireNum.text.strip()\r\n musicurl = entireNum.get('href')\r\n url = 'https://www.youtube.com'+musicurl \r\n\r\n driver.quit()\r\n\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(url, download=False)\r\n URL = info['formats'][0]['url']\r\n await ctx.send(embed = discord.Embed(title= \"노래 재생\", description = \"현재\" + entireText + \"을(를) 재생하고 있습니다.\", color = 0x00ff00))\r\n vc.play(FFmpegPCMAudio(URL, **FFMPEG_OPTIONS), after = lambda e: play_next(ctx))\r\n else:\r\n user.append(msg)\r\n result, URLTEST = title(msg)\r\n song_queue.append(URLTEST)\r\n await ctx.send(\"이미 노래가 재생중이라\" + result + \"을(를) 대기열로 추가시켰어요!\")\r\n \r\n\r\n@bot.command()\r\nasync def 중지(ctx):\r\n if vc.is_playing():\r\n vc.pause()\r\n await ctx.send(embed = discord.Embed(title= \"일시정지\", description = musicnow[0] + \"을(를) 일시정지 했습니다.\", color = 0x00ff00))\r\n else:\r\n await ctx.send(\"지금 노래가 재생되지 않네요.\")\r\n\r\n@bot.command()\r\nasync def 다시재생(ctx):\r\n try:\r\n vc.resume()\r\n except:\r\n await ctx.send(\"지금 노래가 재생되지 않네요.\")\r\n else:\r\n await ctx.send(embed = discord.Embed(title= \"다시재생\", description = musicnow[0] + \"을(를) 다시 재생했습니다.\", color = 0x00ff00))\r\n\r\n@bot.command()\r\nasync def 끄기(ctx):\r\n if vc.is_playing():\r\n vc.stop()\r\n await ctx.send(embed = discord.Embed(title= \"노래끄기\", description = musicnow[0] + \"을(를) 종료했습니다.\", color = 0x00ff00))\r\n else:\r\n await ctx.send(\"지금 노래가 재생되지 않네요.\")\r\n\r\n@bot.command()\r\nasync def 지금노래(ctx):\r\n if not vc.is_playing():\r\n await ctx.send(\"지금은 노래가 재생되지 않네요..\")\r\n else:\r\n await ctx.send(embed = discord.Embed(title = \"지금노래\", description = \"현재 \" + musicnow[0] + \"을(를) 재생하고 있습니다.\", color = 0x00ff00))\r\n\r\n@bot.command()\r\nasync def 멜론차트(ctx):\r\n if not vc.is_playing():\r\n \r\n options = webdriver.ChromeOptions()\r\n options.add_argument(\"headless\")\r\n\r\n global entireText\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n \r\n chromedriver_dir = \"D:/다운로드/chromedriver_win32 (1)/chromedriver.exe\"\r\n driver = webdriver.Chrome(chromedriver_dir, options = options)\r\n driver.get(\"https://www.youtube.com/results?search_query=멜론차트\")\r\n source = driver.page_source\r\n bs = bs4.BeautifulSoup(source, 'lxml')\r\n entire = bs.find_all('a', {'id': 'video-title'})\r\n entireNum = entire[0]\r\n entireText = entireNum.text.strip()\r\n musicurl = entireNum.get('href')\r\n url = 'https://www.youtube.com'+musicurl \r\n\r\n driver.quit()\r\n\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(url, download=False)\r\n URL = info['formats'][0]['url']\r\n await ctx.send(embed = discord.Embed(title= \"노래 재생\", description = \"현재 \" + entireText + \"을(를) 재생하고 있습니다.\", color = 0x00ff00))\r\n vc.play(FFmpegPCMAudio(URL, **FFMPEG_OPTIONS))\r\n else:\r\n await ctx.send(\"이미 노래가 재생 중이라 노래를 재생할 수 없어요!\")\r\n\r\n@bot.command()\r\nasync def 대기열추가(ctx, *, msg):\r\n user.append(msg)\r\n result, URLTEST = title(msg)\r\n song_queue.append(URLTEST)\r\n await ctx.send(result + \"를 재생목록에 추가했어요!\")\r\n\r\n@bot.command()\r\nasync def 대기열삭제(ctx, *, number):\r\n try:\r\n ex = len(musicnow) - len(user)\r\n del user[int(number) - 1]\r\n del musictitle[int(number) - 1]\r\n del song_queue[int(number)-1]\r\n del musicnow[int(number)-1+ex]\r\n \r\n await ctx.send(\"대기열이 정상적으로 삭제되었습니다.\")\r\n except:\r\n if len(list) == 0:\r\n await ctx.send(\"대기열에 노래가 없어 삭제할 수 없어요!\")\r\n else:\r\n if len(list) < int(number):\r\n await ctx.send(\"숫자의 범위가 목록개수를 벗어났습니다!\")\r\n else:\r\n await ctx.send(\"숫자를 입력해주세요!\")\r\n\r\n@bot.command()\r\nasync def 목록(ctx):\r\n if len(musictitle) == 0:\r\n await ctx.send(\"아직 아무노래도 등록하지 않았어요.\")\r\n else:\r\n global Text\r\n Text = \"\"\r\n for i in range(len(musictitle)):\r\n Text = Text + \"\\n\" + str(i + 1) + \". \" + str(musictitle[i])\r\n \r\n await ctx.send(embed = discord.Embed(title= \"노래목록\", description = Text.strip(), color = 0x00ff00))\r\n\r\n@bot.command()\r\nasync def 목록초기화(ctx):\r\n try:\r\n ex = len(musicnow) - len(user)\r\n del user[:]\r\n del musictitle[:]\r\n del song_queue[:]\r\n while True:\r\n try:\r\n del musicnow[ex]\r\n except:\r\n break\r\n await ctx.send(embed = discord.Embed(title= \"목록초기화\", description = \"\"\"목록이 정상적으로 초기화되었습니다. 이제 노래를 등록해볼까요?\"\"\", color = 0x00ff00))\r\n except:\r\n await ctx.send(\"아직 아무노래도 등록하지 않았어요.\")\r\n\r\n@bot.command()\r\nasync def 목록재생(ctx):\r\n\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n \r\n if len(user) == 0:\r\n await ctx.send(\"아직 아무노래도 등록하지 않았어요.\")\r\n else:\r\n if len(musicnow) - len(user) >= 1:\r\n for i in range(len(musicnow) - len(user)):\r\n del musicnow[0]\r\n if not vc.is_playing():\r\n play(ctx)\r\n else:\r\n await ctx.send(\"노래가 이미 재생되고 있어요!\")\r\n\r\n@bot.command()\r\nasync def 즐겨찾기(ctx):\r\n global Ftext\r\n Ftext = \"\"\r\n correct = 0\r\n global Flist\r\n for i in range(len(userF)):\r\n if userF[i] == str(ctx.message.author.name): #userF에 유저정보가 있는지 확인\r\n correct = 1 #있으면 넘김\r\n if correct == 0:\r\n userF.append(str(ctx.message.author.name)) #userF에다가 유저정보를 저장\r\n userFlist.append([]) #유저 노래 정보 첫번째에 유저이름을 저장하는 리스트를 만듬.\r\n userFlist[len(userFlist)-1].append(str(ctx.message.author.name))\r\n \r\n for i in range(len(userFlist)):\r\n if userFlist[i][0] == str(ctx.message.author.name):\r\n if len(userFlist[i]) >= 2: # 노래가 있다면\r\n for j in range(1, len(userFlist[i])):\r\n Ftext = Ftext + \"\\n\" + str(j) + \". \" + str(userFlist[i][j])\r\n titlename = str(ctx.message.author.name) + \"님의 즐겨찾기\"\r\n embed = discord.Embed(title = titlename, description = Ftext.strip(), color = 0x00ff00)\r\n embed.add_field(name = \"목록에 추가\\U0001F4E5\", value = \"즐겨찾기에 모든 곡들을 목록에 추가합니다.\", inline = False)\r\n embed.add_field(name = \"플레이리스트로 추가\\U0001F4DD\", value = \"즐겨찾기에 모든 곡들을 새로운 플레이리스트로 저장합니다.\", inline = False)\r\n Flist = await ctx.send(embed = embed)\r\n await Flist.add_reaction(\"\\U0001F4E5\")\r\n await Flist.add_reaction(\"\\U0001F4DD\")\r\n else:\r\n await ctx.send(\"아직 등록하신 즐겨찾기가 없어요.\")\r\n\r\n\r\n\r\n@bot.command()\r\nasync def 즐겨찾기추가(ctx, *, msg):\r\n correct = 0\r\n for i in range(len(userF)):\r\n if userF[i] == str(ctx.message.author.name): #userF에 유저정보가 있는지 확인\r\n correct = 1 #있으면 넘김\r\n if correct == 0:\r\n userF.append(str(ctx.message.author.name)) #userF에다가 유저정보를 저장\r\n userFlist.append([]) #유저 노래 정보 첫번째에 유저이름을 저장하는 리스트를 만듦.\r\n userFlist[len(userFlist)-1].append(str(ctx.message.author.name))\r\n\r\n for i in range(len(userFlist)):\r\n if userFlist[i][0] == str(ctx.message.author.name):\r\n \r\n options = webdriver.ChromeOptions()\r\n options.add_argument(\"headless\")\r\n\r\n chromedriver_dir = \"D:/다운로드/chromedriver_win32 (1)/chromedriver.exe\"\r\n driver = webdriver.Chrome(chromedriver_dir, options = options)\r\n driver.get(\"https://www.youtube.com/results?search_query=\"+msg+\"+lyrics\")\r\n source = driver.page_source\r\n bs = bs4.BeautifulSoup(source, 'lxml')\r\n entire = bs.find_all('a', {'id': 'video-title'})\r\n entireNum = entire[0]\r\n music = entireNum.text.strip()\r\n\r\n driver.quit()\r\n\r\n userFlist[i].append(music)\r\n await ctx.send(music + \"(이)가 정상적으로 등록되었어요!\")\r\n\r\n\r\n\r\n@bot.command()\r\nasync def 즐겨찾기삭제(ctx, *, number):\r\n correct = 0\r\n for i in range(len(userF)):\r\n if userF[i] == str(ctx.message.author.name): #userF에 유저정보가 있는지 확인\r\n correct = 1 #있으면 넘김\r\n if correct == 0:\r\n userF.append(str(ctx.message.author.name)) #userF에다가 유저정보를 저장\r\n userFlist.append([]) #유저 노래 정보 첫번째에 유저이름을 저장하는 리스트를 만듦.\r\n userFlist[len(userFlist)-1].append(str(ctx.message.author.name))\r\n\r\n for i in range(len(userFlist)):\r\n if userFlist[i][0] == str(ctx.message.author.name):\r\n if len(userFlist[i]) >= 2: # 노래가 있다면\r\n try:\r\n del userFlist[i][int(number)]\r\n await ctx.send(\"정상적으로 삭제되었습니다.\")\r\n except:\r\n await ctx.send(\"입력한 숫자가 잘못되었거나 즐겨찾기의 범위를 초과하였습니다.\")\r\n else:\r\n await ctx.send(\"즐겨찾기에 노래가 없어서 지울 수 없어요!\")\r\n\r\n@bot.event\r\nasync def on_reaction_add(reaction, users):\r\n if users.bot == 1:\r\n pass\r\n else:\r\n try:\r\n await Flist.delete()\r\n except:\r\n pass\r\n else:\r\n if str(reaction.emoji) == '\\U0001F4E5':\r\n await reaction.message.channel.send(\"잠시만 기다려주세요. (즐겨찾기 갯수가 많으면 지연될 수 있습니다.)\")\r\n print(users.name)\r\n for i in range(len(userFlist)):\r\n if userFlist[i][0] == str(users.name):\r\n for j in range(1, len(userFlist[i])):\r\n try:\r\n chromedriver_dir = \"D:/다운로드/chromedriver_win32 (1)/chromedriver.exe\"\r\n driver = webdriver.Chrome(chromedriver_dir, options = options)\r\n driver.close()\r\n except:\r\n print(\"NOT CLOSED\")\r\n\r\n user.append(userFlist[i][j])\r\n result, URLTEST = title(userFlist[i][j])\r\n song_queue.append(URLTEST)\r\n await reaction.message.channel.send(userFlist[i][j] + \"를 재생목록에 추가했어요!\")\r\n elif str(reaction.emoji) == '\\U0001F4DD':\r\n await reaction.message.channel.send(\"플레이리스트가 나오면 생길 기능이랍니다. 추후에 올릴 영상을 기다려주세요!\")\r\n\r\n\r\n\r\nbot.run(\"ODgxNDA0NTI0MDAxMTA3OTc5.YSsV7g.AO8HXFTBsc8ZJYZHAlYIzd3jI8s\")","sub_path":"musicbot.py","file_name":"musicbot.py","file_ext":"py","file_size_in_byte":19861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383198696","text":"\"\"\"Script for publishing new versions of Selenium to cloud storage.\n\nWhen you run this script, it will use OAuth 2.0 to authenticate with\nGoogle Cloud Storage before attempting to upload any files. This script\nwill fail if the authenticated account does not have write access to the\nindicated bucket.\n\nBy default, this script will use the adjacent client_secrets.json for\nOAuth authentication; this may be changed with the --client_secrets\nflag.\n\nExample usage:\n\npython publish_release.py \\\\\n --client_secrets my_secrets.json \\\\\n --project_id foo:bar \\\\\n --bucket releases \\\\\n --publish_version 3.14.15 \\\\\n --publish path/to/file/one.txt path/to/file/two.txt \\\\\n --acl \"public-read\"\n\nThis will publish\n http://releases.storage.googleapis.com/3.14.15/one.txt\n http://releases.storage.googleapis.com/3.14.15/two.txt\n\"\"\"\n\nimport argparse\nimport logging\nimport mimetypes\nimport os.path\nimport sys\n\n\ntry:\n from google.oauth2.credentials import Credentials\n from google_auth_oauthlib.flow import InstalledAppFlow\nexcept ImportError:\n print ('Could not import the library that provides oauthlib integration for Google Auth\\n'\n + 'Download available at https://github.com/googleapis/google-auth-library-python-oauthlib\\n'\n + 'or run `pip install google-auth-oauthlib`')\n sys.exit(1)\n\ntry:\n from google.cloud import storage\nexcept ImportError:\n print ('Could not import Python Client for Google Cloud Storage\\n'\n + 'Download available at https://github.com/googleapis/python-storage\\n'\n + 'or run `pip install google-cloud-storage`')\n sys.exit(1)\n\nOAUTH_SCOPE = ['https://www.googleapis.com/auth/devstorage.full_control']\n\nmimetypes.add_type(\"application/java-archive\", \".jar\")\n\n\ndef create_args_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--client_secrets',\n default='client_secrets.json',\n help='the OAuth 2.0 client secrets file to use (default: client_secrets.json)')\n parser.add_argument(\n '--save_credentials',\n default=False,\n action='store_true',\n help='should OAuth 2.0 credentials be saved to a local file or not (default: false)')\n parser.add_argument(\n '--credentials_file',\n default='credentials.json',\n help='a file to save OAuth 2.0 credentials to (default: credentials.json)')\n parser.add_argument(\n '--project_id',\n help='the Cloud Storage project id')\n parser.add_argument(\n '--bucket',\n help='the bucket to upload to')\n parser.add_argument(\n '--publish_version',\n help='the version being published (e.g. 1.23)')\n parser.add_argument(\n '--acl',\n default='private',\n choices=['private', 'public-read', 'authenticated-read'],\n help='the ACLs to assign to the uploaded files')\n parser.add_argument(\n '--publish',\n nargs='+',\n help='files to publish to Cloud Storage')\n parser.add_argument(\n '--logging_level',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n default='INFO',\n help='the level of logging detail')\n return parser\n\n\ndef _authenticate(client_secrets, save_credentials=False, credentials_file=None):\n if os.path.isfile(credentials_file):\n credentials = Credentials.from_authorized_user_file(credentials_file)\n if credentials is not None:\n return credentials\n\n flow = InstalledAppFlow.from_client_secrets_file(client_secrets, scopes=OAUTH_SCOPE)\n flow.run_local_server()\n credentials = flow.credentials\n if save_credentials:\n with open(credentials_file, 'w') as f:\n f.write(credentials.to_json())\n return credentials\n\n\ndef _upload(bucket, file_path, object_name, acl):\n blob = bucket.blob(object_name)\n # blob.delete()\n blob.upload_from_filename(file_path, predefined_acl=acl)\n return blob.public_url\n\n\ndef main():\n parser = create_args_parser()\n args = parser.parse_args()\n\n logging.basicConfig(level=args.logging_level)\n\n def die(message):\n logging.fatal(message)\n sys.exit(2)\n\n if args.client_secrets is None:\n die('You must specify a client secrets file via --client_secrets')\n if args.project_id is None:\n die('You must specify a project ID via --project_id')\n if args.bucket is None:\n die('You must specify a bucket via --bucket')\n if args.publish_version is None:\n die('You must specify a published version identifier via --publish_version')\n\n credentials = _authenticate(args.client_secrets, args.save_credentials, args.credentials_file)\n client = storage.Client(project=args.project_id, credentials=credentials)\n bucket = client.get_bucket(args.bucket)\n\n published = []\n for f in args.publish:\n object_name = '%s/%s' % (args.publish_version, os.path.basename(f))\n logging.info('Publishing %s as %s', f, object_name)\n public_url = _upload(bucket, f, object_name, args.acl)\n published.append(public_url)\n\n if published:\n logging.info('Published:\\n %s' % '\\n '.join(published))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"third_party/py/googlestorage/publish_release_v4.py","file_name":"publish_release_v4.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196513916","text":"from car_controller.StateModel import StateModel, Signal\nfrom car_controller.NetworkStateSubscriber import NetworkStateSubscriber\nfrom car_controller.NetworkCommandSender import NetworkCommandSender\nfrom car_controller.CANFramesModel import CANFramesModel\nfrom car_controller.CameraReceiver import CameraReceiver\nfrom car_controller.ZmqModel import ZmqModel\nfrom car_controller.SlamDataModel import SlamDataModel\nfrom car_controller.ConfigModel import ConfigModel\nfrom car_controller.LidarScanModel import LidarScanModel\nfrom car_controller.CANStateModel import CANStateModel\n\nfrom car_controller.LidarController import LidarController\nfrom car_controller.SlamController import SlamController\nfrom car_controller.RemoteSlamController import RemoteSlamController\nfrom car_controller.CANStateController import CANStateController\n\nfrom queue import Queue\nimport threading\nimport time\n\n# TODO: dictionary lookup of signal IDs instead of numbers\n\nclass CarController:\n def __init__(self, address):\n self.signal_queue = Queue()\n\n self.zmq_model = ZmqModel()\n self.lidar_scan_model = LidarScanModel()\n self.config_model = ConfigModel()\n self.config_model.address = address\n self.slam_data_model = SlamDataModel()\n self.can_state_model = CANStateModel()\n self.state_model = StateModel(44)\n\n self.camera_receiver = CameraReceiver(self.config_model.address,\n\t\t\t\t \t self.zmq_model,\n\t\t\t\t\t self.config_model)\n\n self.lidar_controller = LidarController(self.lidar_scan_model,\n self.config_model,\n self.zmq_model.context)\n\n if self.config_model.use_local_slam:\n self.slam_controller = SlamController(self.lidar_scan_model,\n self.slam_data_model)\n else:\n self.slam_controller = RemoteSlamController(self.slam_data_model,\n self.config_model.lidar_server_address)\n\n self.can_state_controller = CANStateController(self.can_state_model,\n self.config_model,\n self.zmq_model.context)\n\n\n self.network_state_subscriber = NetworkStateSubscriber(self.state_model, self.config_model.address)\n self.network_command_sender = NetworkCommandSender(self.signal_queue, self.config_model.address)\n\n def queue_state(self, signal_id, data):\n self.signal_queue.put(Signal(signal_id, data, time.time()), block=True)\n\n def arm_motors(self):\n self.queue_state(44, 3)\n\n def disarm_motors(self):\n self.queue_state(44, 0)\n\n def set_speed(self, speed):\n self.queue_state(22, speed)\n\n def set_turnrate(self, turn):\n self.queue_state(23, turn)\n\n def get_current(self):\n self.state_model.get_signal(5)\n\n def get_voltage(self):\n self.state_model.get_signal(46)\n\n def get_compass(self):\n raise NotImplementedError()\n\n def get_wheel_speeds(self):\n return(self.state_model.get_signal(x) for x in range(1,5))\n\n def get_picture(self):\n self.camera_receiver.get_picture()\n\n def get_lidar(self):\n if time.time() - self.lidar_scan_model.timestamp > 1:\n print(\"Warning: using lidar data older than 1 second.\")\n return self.lidar_scan_model.scan_data\n\n def get_sonar(self, id):\n raise NotImplementedError()\n\n\n\n def heartbeat_thread(self):\n \"\"\"Thread method for sending regular heartbeat.\"\"\"\n i = 0\n while True:\n self.queue_state(35, 1)\n self.queue_state(34, i)\n time.sleep(1)\n i = (i+1)%221\n\n def start(self):\n threading.Thread(target=self.heartbeat_thread, daemon=True).start()\n self.network_state_subscriber.start()\n self.network_command_sender.start()\n print(\"Car controller init OK!\")\n","sub_path":"car_controller/car_controller.py","file_name":"car_controller.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6643300","text":"#/usr/bin/env python\n#-*- encoding:utf8 -*-\n# File Name:mirror_gen.py\n# Author:ulin\n# Time:2018年02月28日 星期三 09时11分55秒\n\nimport contextlib\n\n@contextlib.contextmanager\ndef looking_glass():\n import sys\n try:\n yield 'JABBERWOCKY'\n except ZeroDivisionError:\n msg = \"Please DO NOT divide by zero\"\n finally:\n print('hello')\n","sub_path":"context-manager/mirror_gen.py","file_name":"mirror_gen.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457164869","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport feedparser\nimport roman\nfrom urllib import urlencode\n\nfrom ansicolor import green, red, yellow\n\nfrom parlament.settings import BASE_HOST\nfrom parlament.spiders import BaseSpider\nfrom parlament.resources.extractors.statement import (\n RSS_DEBATES,\n RSS_DEBATES_SIMPLE,\n HTML_DEBATE_DETAIL,\n DOCSECTIONS\n)\n\nfrom op_scraper.models import DebateStatement, Debate\nfrom op_scraper.models import Person, LegislativePeriod\n\nimport datetime\n\nimport json\n\n\ndef debatelist_makeurl_rss(llp, debatetype):\n baseurl = \"{}/{}\".format(BASE_HOST, \"PAKT/STPROT/filter.psp\")\n params = {\n 'view': 'RSS',\n 'NRBRBV': debatetype,\n 'GP': llp,\n 'R_PLSO': 'PL',\n 'NUR_VORL': 'N',\n 'FBEZ': 'FP_011',\n 'listeId': '212',\n }\n return baseurl + '?' + urlencode(params)\n\n\n# def debatelist_makeurl_html(llp, debatetype):\n# baseurl = \"{}/{}\".format(BASE_HOST, \"Filter/filter.psp\")\n# params = dict(\n# SUCH='',\n# pageNumber='',\n# R_PLSO='PL',\n# GP=llp,\n# INTRANET='N',\n# STEP='1210',\n# feldRnr='3',\n# STPROT='ALLE',\n# scDesc='DESC',\n# FBEZ='FP_011',\n# view='',\n# NRBRBV=debatetype,\n# BEZ='FP_211',\n# LISTE='',\n# NUR_VORL='N',\n# listeId='212'\n# )\n# return baseurl + '?' + urlencode(params)\n\n\nclass StatementSpider(BaseSpider):\n\n \"\"\"\n Spider to scrape debates and debate statements\n ----------------------------------------------\n\n Start the spider by specifying `llp` and `type` parameters.\n\n First step is to get urls of debate-transcripts (\"stenographische\n protokolle\"), for this, the RSS-Feed at\n `http://www.parlament.gv.at/PAKT/STPROT/` is used.\n We have to do one extra step to get the actual protocol url from an\n intermediate debate-detail page.\n\n Parameters are `type` (NR, BR) and `llp` (number) for type of\n debate and llp respectively::\n\n ./manage.py scrape crawl statement -a llp=24 -a type=NR\n\n To limit the debate list, use `snr` to scrape only debates that\n have 'snr' in the title::\n\n ./manage.py scrape crawl statement -a llp=24 -a type=NR\\\n -a snr=171\n\n \"\"\"\n\n\n BASE_URL = \"{}/{}\".format(BASE_HOST, \"PAKT/STPROT\")\n ALLOWED_LLPS = range(20, 26)\n DEBATETYPES = ['NR', 'BR']\n\n name = \"statement\"\n\n def __init__(self, **kw):\n super(StatementSpider, self).__init__(**kw)\n\n if 'type' in kw and kw['type'] in self.DEBATETYPES:\n self.DEBATETYPES = [kw['type']]\n if 'llp' in kw and kw['llp'] != 'all':\n try:\n self.LLP = [roman.toRoman(int(kw['llp']))]\n except:\n self.LLP = [kw['llp']]\n else:\n self.LLP = [roman.toRoman(llp) for llp in self.ALLOWED_LLPS]\n\n # Sitzungsnummer (further filtering down to just one 'sitzung')\n self.SNR = kw['snr'] if 'snr' in kw else None\n\n # The start url is actually not parsed at all, but we need some\n # url to get the scraping started.\n self.start_urls = [self.BASE_URL]\n\n def parse(self, response):\n \"\"\"\n Starting point - produces urls (requests) of debate items lists (urls\n of RSS feeds).\n\n It builds the list of requests/callbacks (alongsite metadata that\n is known beforenhand) from the set of LLPs and debate types.\n\n The feeds will be parsed in the next step, parse_debatelist.\n \"\"\"\n\n callback_requests = []\n for llp in self.LLP:\n for nrbr in self.DEBATETYPES:\n # Debatelist Url\n feed_url = debatelist_makeurl_rss(llp, nrbr)\n\n # Additional metadata (does a lookup on the LLP)\n llp_item = None\n try:\n llp_item = LegislativePeriod.objects.get(roman_numeral=llp)\n except LegislativePeriod.DoesNotExist:\n self.logger.warning(red(u\"LLP '{}' not found\".format(llp)))\n\n # Add a request and callback\n callback_requests.append(\n scrapy.Request(feed_url,\n callback=self.parse_debatelist,\n meta={'llp': llp_item, 'type': nrbr}))\n\n return callback_requests\n\n def parse_debatelist(self, response):\n \"\"\"\n Parse feed of debate items.\n\n Each response is an RSS feed with debate items.\n From each item (=debate), debate metadata and a detail url (not yet\n the debate protocol url) is extracted.\n\n The detail url is parsed in the next step, parse_debate_detail.\n \"\"\"\n\n llp = response.meta['llp'] if 'llp' in response.meta else None\n debate_type = response.meta['type'] if 'type' in response.meta else ''\n\n # debates = RSS_DEBATES.xt(response)\n debates = RSS_DEBATES_SIMPLE.xt(response)\n\n # If SNR is set filter debate list to contain the debate number.\n fetch_debates = filter(lambda r: r['detail_url'] != \"\" and\n (not self.SNR or self.SNR in r['title']),\n debates)\n\n self.logger.info(green(u\"{} of {} debates from {}\".format(\n len(fetch_debates), len(debates), response.url)))\n\n for debate in fetch_debates:\n debate['llp'] = llp\n debate['debate_type'] = debate_type\n yield scrapy.Request(debate['detail_url'],\n callback=self.parse_debate_detail,\n meta={'debate': debate})\n\n\n def parse_debate_detail(self, response):\n \"\"\"\n Process a detail page that contains the url to the debate protocol.\n\n Extract the protocol url, add it to metadata to return the request\n with the callback for the actual content parsing of the debate.\n\n The debate metadata is saved, and the next step is to parse the\n actual debate content, parse_debate\n \"\"\"\n\n # Complete debate metadata and store (insert/update) it\n debate = response.meta['debate']\n debate['protocol_url'] = BASE_HOST + HTML_DEBATE_DETAIL.xt(response)\n debate_item = self.store_debate(debate)\n\n yield scrapy.Request(\n debate['protocol_url'],\n callback=self.parse_debate,\n meta={'debate': debate_item})\n\n def parse_debate(self, response):\n \"\"\"\n Debate-transcript (\"Stenografisches Protokoll\") parser.\n\n Parses the actual debate content.\n \"\"\"\n i = 0\n for i, sect in enumerate(DOCSECTIONS.xt(response)):\n # Lookup + add references to the section data\n sect['debate'] = response.meta['debate']\n if 'speaker_id' in sect and sect['speaker_id'] is not None:\n try:\n sect['person'] = Person.objects.get(\n parl_id=sect['speaker_id'])\n except Person.DoesNotExist:\n self.logger.warning(\n red(u\"Person '{}' not found\".format(sect['speaker_id'])))\n else:\n sect['person'] = None\n\n # Select best timestamps for start and end and make datetime\n start_ts = sect['time_start'] or sect['ref_timestamp']\n end_ts = sect['time_end'] or sect['ref_timestamp']\n try:\n debate_date = sect['debate'].date()\n except:\n # Use some valid date, but recognizable to come from a parse error\n debate_date = datetime.datetime(2057, 1, 1)\n sect['date'] = self._apply_ts(debate_date, start_ts)\n sect['date_end'] = self._apply_ts(debate_date, end_ts)\n\n self.store_statement(sect, i)\n\n self.logger.info(\n green(u\"Saved {} sections from {}\".format(i, response.url)))\n\n def store_debate(self, data):\n \"\"\"\n Save (update or insert) debate to ORM\n \"\"\"\n try:\n debate = Debate.objects.get(llp=data['llp'], nr=data['nr'])\n except Debate.DoesNotExist:\n debate = Debate()\n for (key, value) in data.items():\n setattr(debate, key, value)\n debate.save()\n self.logger.info(green(u\"Debate metadata saved {}\".format(debate)))\n return debate\n\n def store_statement(self, data, index=-1):\n \"\"\"\n Save (update or insert) debate_statement to ORM\n \"\"\"\n data['index'] = int(index)\n self.logger.info(data)\n try:\n debate_statement = DebateStatement.objects.get(\n debate=data['debate'], doc_section=data['doc_section'])\n except DebateStatement.DoesNotExist:\n debate_statement = DebateStatement()\n keys = set(data.keys()) &\\\n set([v.name for v in DebateStatement._meta.get_fields()])\n for key in keys:\n setattr(debate_statement, key, data[key])\n debate_statement.save()\n\n def _apply_ts(self, date, timeparts):\n \"\"\"\n Apply hour, minutes and possibly secconds to a date.\n\n In the docsections, we scrape only minutes and seconds - but we\n have the date from the debate metadata.\n This helper method combines the two to get a full timestamp.\n \"\"\"\n if timeparts is not None and len(timeparts) >= 2:\n ts = {'hour': timeparts[0],\n 'minute': timeparts[1],\n 'second': timeparts[2]\n if len(timeparts) > 2 else 0}\n date = date.replace(**ts)\n return date\n","sub_path":"offenesparlament/op_scraper/scraper/parlament/spiders/statement.py","file_name":"statement.py","file_ext":"py","file_size_in_byte":9643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91761940","text":"import base64\nimport io\nimport json\nimport time\n\nimport cv2\nimport imutils\nimport numpy as np\nimport requests\nfrom PIL import Image\n\nimport face_recognition\nimport imagehash\n\n\ndef imread_buffer(buffer_):\n image = np.frombuffer(buffer_, dtype='uint8')\n image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n\ndef read_image_base64(base64string):\n image = Image.open(io.BytesIO(base64.b64decode(base64string)))\n image = np.array(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n\ndef get_profile_image_from_layout(id_img):\n layout_api_address = 'http://35.240.219.152:8989/profile_image'\n content_type = 'image/jpeg'\n headers = {'content-type': content_type}\n _, img_encoded = cv2.imencode('.jpg', id_img)\n response = requests.post(layout_api_address, data=img_encoded.tostring(),\n headers=headers)\n try:\n response = json.loads(response.text)\n except:\n return error('Crop profile images API failed with this respond: {}'.format(response.text))\n if len(response['prediction']) == 0:\n return None\n else:\n most_conf = max(response['prediction'], key=lambda x: x['confidence'])\n return read_image_base64(most_conf['cropped'])\n\n\ndef is_same_image(image1, image2):\n hash1 = imagehash.whash(Image.fromarray(image1))\n hash2 = imagehash.whash(Image.fromarray(image2))\n return abs(hash1-hash2) <= 15\n\n\ndef get_the_biggest_face(face_loc):\n return [max(face_loc, key=lambda x:abs(x[2]-x[0])*abs(x[3]-x[1]))]\n\n\ndef error(message):\n print('--------------------------------------------------------------------------')\n return {\n 'success': False,\n 'message': message\n }\n\n\ndef process(id_img, selfie_img):\n start_time = time.time()\n print('Start processing ...')\n if isinstance(id_img, bytes):\n id_img = imread_buffer(id_img)\n if isinstance(selfie_img, bytes):\n selfie_img = imread_buffer(selfie_img)\n\n # Call drake's API to get the cropped profile_image\n print('1. Calling profile image crop API ...')\n id_img = get_profile_image_from_layout(id_img)\n if id_img is None:\n return error(\"Can't find any face in the ID. Please take a new picture of your ID\")\n print('-> Done. Tooks {} secs'.format(time.time()-start_time))\n\n # Check hash if they are the same picture\n start_time = time.time()\n print('2. Checking duplicate image ...')\n if is_same_image(id_img, selfie_img):\n return error('Found the same images. Please take new picture of you and your id')\n print('-> Done. Tooks {} secs'.format(time.time()-start_time))\n\n # Check number of face in selfie images\n start_time = time.time()\n print('3. Cropping face from selfie image ...')\n for angle in [0, 90, 270, 360]:\n print('- Trying angle:', angle)\n rotated_selfie = imutils.rotate(selfie_img, angle=angle)\n selfie_face_loc = face_recognition.api.face_locations(rotated_selfie, number_of_times_to_upsample=1) # , model='cnn')\n if len(selfie_face_loc) > 1:\n print('- Multiple faces have been found. Selecting the biggest ones')\n selfie_face_loc = get_the_biggest_face(selfie_face_loc)\n if len(selfie_face_loc) == 1:\n print('-> Found it!')\n selfie_img = rotated_selfie\n break\n\n if not selfie_face_loc:\n return error(\"Can't find any face in your selfie. Please take a new picture of you\")\n print('-> Done. Tooks {} secs'.format(time.time()-start_time))\n\n start_time = time.time()\n print('4. Cropping face from ID image ...')\n id_face_loc = face_recognition.api.face_locations(id_img, number_of_times_to_upsample=1) # , model='cnn')\n if not id_face_loc:\n return error(\"Can't find any face in the ID. Please take a new picture of your ID\")\n elif len(id_face_loc) > 1:\n print('- Multiple faces have been found. Selecting the biggest ones')\n id_face_loc = get_the_biggest_face(id_face_loc)\n print('-> Done. Tooks {} secs'.format(time.time()-start_time))\n\n # Compare distance\n start_time = time.time()\n print('5. Dewarp and calculating face embedding ...')\n face1_encoding = face_recognition.face_encodings(id_img, known_face_locations=id_face_loc)[0]\n face2_encoding = face_recognition.face_encodings(selfie_img, known_face_locations=selfie_face_loc)[0]\n print('-> Done. Tooks {} secs'.format(time.time()-start_time))\n\n start_time = time.time()\n print('6. Getting face distance ...')\n face_distance = face_recognition.face_distance([face1_encoding], face2_encoding)[0]\n print('-> Done. Tooks {} secs'.format(time.time()-start_time))\n print('--------------------------------------------------------------------------')\n return {\n 'distance': face_distance,\n 'matched': 'True' if face_distance < 0.6 else 'False',\n 'matched_strict': 'True' if face_distance < 0.5 else 'False'\n }\n\n\nif __name__ == \"__main__\":\n img = cv2.imread('./test_img/1.png')\n print(get_profile_image_from_layout(img))\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460337354","text":"\n\n#calss header\nclass _EARTHY():\n\tdef __init__(self,): \n\t\tself.name = \"EARTHY\"\n\t\tself.definitions = [u'referring to sex and the human body in a direct way: ', u'like or relating to earth: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_earthy.py","file_name":"_earthy.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88976177","text":"'''\r\n\tCMPSC 121 - Final Exam\r\n\tDate: 5/1/18\r\n\t(5.5 Points) - Program runs without errors\r\n'''\r\n\r\n############################\r\n#\tProblem #1\r\n#\tCreate a function definition\r\n#\t-call the function name - binarySearch\r\n#\t-parameter: data - an array of data\r\n#\t-parameter: key - value to search for, set default to 0\r\n#\t(6 Points)\r\n############################\r\ndef binarySearch(data, key):\n \r\n low = 0\r\n high = len(data) - 1\r\n mid = 0\r\n\r\n ############################\r\n #\tProblem #2\r\n #\tCreate a while loop that runs as long\r\n #\tas low is less than or equal to high\r\n #\t(3 Points)\r\n ############################\r\n while low <= high:\r\n\r\n ############################\r\n\t# Problem #3\r\n\t# Calculate the mid value\r\n\t# -mid equals (low plus high) divided by 2\r\n\t# (5 Points)\r\n\t############################\r\n mid = (low + high) // 2\r\n if data[mid] == key:\r\n return mid\r\n elif key > data[mid]:\r\n low = mid + 1\r\n else:\r\n high = mid - 1\r\n\r\n ############################\r\n # Problem #5\r\n #\tReturn None from the function\r\n #\t(2 Points)\r\n ############################\r\n return None\r\n\r\ndef bubbleSort(data):\r\n swapped = True\r\n while swapped:\r\n swapped = False\r\n\r\n\t############################\r\n\t# Problem #6\r\n\t# Create a for loop that runs from 0\r\n\t# to the size minus one\r\n\t# (3 Points)\r\n\t############################\r\n for i in range(0, len(data)-1):\r\n if data[i] > data[i + 1]:\r\n ############################\r\n # Problem #7\r\n # Swap data[i] and data[i+1]\r\n # (6 Points)\r\n ############################\r\n data[i], data[i+1] = data[i+1], data[i]\r\n\r\n swapped = True\r\n\r\n############################\r\n# Problem #8\r\n# Create a function that prints out all of the\r\n# data in the list\r\n# -call the function name - outputData\r\n# -parameter: data - an array of data\r\n# (7.5 Points)\r\n############################\r\ndef outputData(data):\r\n for i in data:\r\n print(i)\n\r\n############################\r\n# Main Program\r\n############################\r\nunsortedData = [ 20, 4, -2, 0, -4, 10, 23, 14, 8, 9, -14, 35, 7 ]\r\n\r\nprint(\"Data Unsorted:\")\r\n############################\r\n# Problem #9\r\n# Call the Output Data function\r\n# -pass in the unsortedData\r\n# (2.5 Points)\r\n############################\r\noutputData(unsortedData)\r\n\r\n############################\r\n# Problem #10\r\n# Call the Bubble Sort function\r\n# -pass in the unsortedData\r\n# (2.5 Points)\r\n############################\r\nbubbleSort(unsortedData)\r\n\r\n\r\nprint(\"\\nData Sorted:\")\r\n############################\r\n# Problem #11\r\n# Call the Output Data function\r\n# -pass in the unsortedData\r\n# (2.5 Points)\r\n############################\r\noutputData(unsortedData)\r\n\r\nfor i in range(5):\r\n key = int(input(\"Enter a number to search the Array for: \"))\r\n\r\n ############################\r\n # Problem #12\r\n # Call the Binary Search function\r\n # -pass in the unsortedData and key\r\n # -store the return value in a variable called index\r\n # (2.5 Points)\r\n ############################\r\n index = binarySearch(unsortedData, key)\r\n if index == -1:\r\n ############################\r\n # Problem #13\r\n # Output that the number is not in the array.\r\n # (2.5 Points)\r\n ############################\r\n print(\"The number is not in the array\")\r\n else:\r\n print(\"The number is located at index:\", index)\r\n","sub_path":"Playground/cmpsc121_2018/CMPSC 121 - Final Exam - Section 1.py","file_name":"CMPSC 121 - Final Exam - Section 1.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"114599205","text":"from robot import Robot\nfrom servo import Servo\nfrom headturn import dictionary \n\nimport Adafruit_PCA9685\n\nFREQ = 60; \njoints = dict()\n\npwm = Adafruit_PCA9685.PCA9685() \npwm.set_pwm_freq(FREQ); \n\n\"\"\"\n#Foot Servo\nFOOT_CHANNEL = 0\nFOOT_MINTIME = \nFOOT_MAXTIME = \n\nfoot = Servo(pwm, FOOT_CHANNEL, FOOT_MINTIME, FOOT_MAXTIME)\n\njoints['foot'] = foot;\n\"\"\"\n\n#Shoulder Servo - HS755HB\nSHOULDER_CHANNEL = 1\nSHOULDER_MINTIME = 600\nSHOULDER_MAXTIME = 2200\n\nshoulder = Servo(pwm, SHOULDER_CHANNEL, SHOULDER_MINTIME, SHOULDER_MAXTIME)\njoints['shoulder'] = shoulder\n\n#Elbow Servo - HS645MG\nELBOW_CHANNEL = 2\nELBOW_MINTIME = 390\nELBOW_MAXTIME = 2050\n\nelbow = Servo(pwm, ELBOW_CHANNEL, ELBOW_MINTIME, ELBOW_MAXTIME)\njoints['elbow'] = elbow\n\n#Wrist Servo - HS425BB \nWRIST_CHANNEL = 3\nWRIST_MINTIME = 553\nWRIST_MAXTIME = 2520\n\nwrist = Servo(pwm, WRIST_CHANNEL, WRIST_MINTIME, WRIST_MAXTIME)\njoints['wrist'] = wrist\n\n#Neck Servo - HS422\nNECK_CHANNEL = 4\nNECK_MINTIME = 610\nNECK_MAXTIME = 2590\n\nneck = Servo(pwm, NECK_CHANNEL, NECK_MINTIME, NECK_MAXTIME)\njoints['neck'] = neck\n\n#Phone Servo - HS311\nPHONE_CHANNEL = 5\nPHONE_MINTIME = 500\nPHONE_MAXTIME = 2520\n\n#phone = Servo(pwm, PHONE_CHANNEL, PHONE_MINTIME, PHONE_MAXTIME)\n#joints['phone'] = phone\n\nif __name__ == '__main__':\n robit = Robot(joints)\n robit.setManyAngles()\n pwm.stop()\n","sub_path":"rasberryCode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"295341766","text":"#!/usr/bin/env python\n# ---------------------------------------------------------------------\n# File Name : binary_min_heap.py\n# Author : Subhakar K S\n# ---------------------------------------------------------------------\n# Description:\n# Implementation of Binary Min-Heap\n# ---------------------------------------------------------------------\n\nfrom algorithms.binary_heap import binary_heap\n\n\nclass binary_min_heap(binary_heap):\n def __init__(self, alist):\n super(binary_min_heap, self).__init__()\n if len(alist):\n if len(set([type(x) for x in alist])) > 1:\n raise Exception('binary_min_heap::multiple data types are not supported')\n\n self.h_size = len(alist)\n self.h_list += alist[:]\n idx = len(alist) // 2\n while idx > 0:\n self._shift_down(idx)\n idx -= 1\n\n def _shift_up(self, idx):\n # while item at parent idx is greater than item and current idx,\n # move current item up and bring down parent item.\n parent_idx = self.parent_idx(idx)\n while parent_idx > 0:\n if self.value_at(parent_idx) > self.value_at(idx):\n self._swap_two_at(parent_idx, idx)\n parent_idx = self.parent_idx(parent_idx)\n else:\n break\n\n def _shift_down(self, idx):\n # while the node at idx has a left child (and right child), do this;\n # see if any of its children are smaller than itself and if so push\n # this node and bring-up the smaller child.\n while self.lchild_idx(idx) <= self.h_size:\n # check if also has a right child.\n # if so, consider the minimum of both children\n lc_idx = self.lchild_idx(idx)\n rc_idx = self.rchild_idx(idx)\n if rc_idx <= self.h_size:\n min_child_idx = lc_idx if self.value_at(lc_idx) < self.value_at(rc_idx) else rc_idx\n else:\n min_child_idx = lc_idx\n if self.value_at(idx) > self.value_at(min_child_idx):\n self._swap_two_at(idx, min_child_idx)\n idx = min_child_idx\n else:\n break\n\n def min_heap_get_min(self):\n return self.value_at(1) if self.h_size else None\n\n def min_heap_insert(self, key):\n if self.h_size and not isinstance(key, type(self.h_list[1])):\n raise Exception('binary_min_heap::multiple data types are not supported')\n self.h_list.append(key)\n self.h_size += 1\n parent_idx = self.parent_idx(self.h_size)\n if self.value_at(parent_idx) > self.value_at(self.h_size):\n self._shift_up(self.h_size)\n\n def min_heap_extract(self):\n ret_val= self.value_at(1)\n self.h_list[1] = self.value_at(self.h_size)\n self.h_size -= 1\n self.h_list.pop()\n self._shift_down(1)\n return ret_val\n\n def min_heap_sort(self):\n ret = []\n for idx in xrange(1, self.h_size + 1):\n ret.append(self.min_heap_extract())\n return ret\n\n","sub_path":"datastructures/binary_min_heap.py","file_name":"binary_min_heap.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450037248","text":"from django.contrib.auth.models import User\r\nfrom django.shortcuts import render, redirect\r\nfrom django.utils import timezone\r\nfrom fac_plain.models import Course\r\n\r\n\r\ndef index(request):\r\n courses = Course.objects.filter(endDate__gte=timezone.now)\r\n pastc = Course.objects.filter(endDate__lt=timezone.now)\r\n return render(request, 'courses/index.html',\r\n {'courses': courses,\r\n 'pastc': pastc,\r\n 'page_name': 'All courses'})\r\n\r\n\r\ndef user_courses(request, user_pk):\r\n user = User.objects.get(pk=user_pk)\r\n courses = user.course_students.filter(endDate__gte=timezone.now)\r\n pastc = user.course_students.filter(endDate__lt=timezone.now)\r\n return render(request, 'courses/index.html',\r\n {'courses': courses,\r\n 'pastc': pastc,\r\n 'page_name': 'My courses'})\r\n\r\n\r\ndef show(request, course_pk):\r\n course = Course.objects.get(pk=course_pk)\r\n return render(request, 'courses/show.html', {'course': course})\r\n\r\n\r\ndef signup(request, course_pk):\r\n user = request.user\r\n course = Course.objects.get(pk=course_pk)\r\n course.signup(user)\r\n return redirect('course_info', course_pk=course_pk)","sub_path":"bachelors/year4/semestre1/python_ruby/HomeWorks/python/1/fac_plain/views/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104712830","text":"import numpy as np\nfrom tqdm import tqdm\n\ndef dds(Xmin, Xmax, fobj, r=0.2, m=100):\n # Passo 1\n Xmin = np.asarray(Xmin)\n Xmax = np.asarray(Xmax)\n X0 = (Xmin + Xmax)/2\n D = len(Xmin)\n ds = [i for i in range(D)]\n dX = Xmax - Xmin\n # Passo 2\n I = np.arange(1, m+1, 1)\n Xbest = X0\n Fbest = fobj(Xbest)\n # Passo 3\n for i in tqdm(I):\n Pi = 1 - np.log(i)/np.log(m)\n P = np.random.rand(len(Xmin))\n N = np.where(P < Pi)[0]\n if N.size == 0:\n N = [np.random.choice(ds)]\n # Passo 4\n Xnew = np.copy(Xbest)\n for j in N:\n Xnew[j] = Xbest[j] + r*dX[j]*np.random.normal(0, 1)\n if Xnew[j] < Xmin[j]:\n Xnew[j] = Xmin[j] + (Xmin[j] - Xnew[j])\n if Xnew[j] > Xmax[j]:\n Xnew[j] = Xmin[j]\n elif Xnew[j] > Xmax[j]:\n Xnew[j] = Xmax[j] - (Xnew[j] - Xmax[j])\n if Xnew[j] < Xmin[j]:\n Xnew[j] = Xmax[j]\n # Passo 5\n Fnew = fobj(Xnew)\n if Fnew <= Fbest:\n Fbest = Fnew\n print('fmin=',Fbest)\n Xbest = np.copy(Xnew)\n # Fim\n return Xbest, Fbest\n","sub_path":"artigo_sispshi/dds.py","file_name":"dds.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609682731","text":"#! /usr/bin/env python3\n\nimport sys\nfrom collections import deque, defaultdict\n\nfrom station import *\n\nclass MipsMachine():\n def __init__(self, stations, registers=12):\n self.ip = 0\n self.code = []\n self.labels = {}\n self.memory = defaultdict(int)\n\n self.reg_files = {\n '{}{}'.format(t, i) : RegisterFile()\n for t in ('R','F') for i in range(registers+1)\n }\n self.stations = {\n unit : [Station(Operation.NOP)] * stations for unit in [\n 'Load', 'Store', 'FPAdd', 'FPMult', 'ALU', 'Branch'\n ]}\n\n def load_program(self, fname):\n '''Parse assembly code into a list of instructions and a dictionary that\n maps code labels to indices in the array'''\n address = 0\n\n with open(fname, 'r') as assembly:\n for line in assembly:\n # strip out comments\n end = line.find(';')\n if end != -1:\n line = line[:end]\n\n # record jump points\n labelled = line.split(':')\n if len(labelled) == 2:\n label, inst = map(str.strip, labelled)\n self.labels[label.upper()] = address\n else:\n inst = line.strip()\n\n if inst:\n address += 1\n inst = inst.replace('.', '')\n inst = inst.replace(',', ' ')\n self.code.append(inst.upper())\n\n def tomasulo(self):\n instruction_queue = deque(self.code)\n\n while instruction_queue:\n line = instruction_queue.popleft()\n inst = line.split()\n op = Operation[inst[0]]\n print('\\t'.join(inst))\n\n # Find the first available reservation station\n idx = -1\n for i, s in enumerate(self.stations[op.value.unit]):\n if not s.busy:\n idx = i\n break\n\n if idx >= 0:\n # 'Load', 'Store', 'FPAdd', 'FPMult', 'ALU'\n if op.value.unit == 'Load':\n off, reg = inst[2].split('(')\n regstat = self.reg_files[reg[:-1]]\n if regstat.value:\n arg1 = RegisterFile(regstat.value)\n self.stations[op.value.unit][idx].assign_task(op, arg1)\n # self.reg_files[]\n\ndef main():\n if len(sys.argv) > 1:\n mips = MipsMachine(4)\n mips.load_program(sys.argv[1])\n mips.tomasulo()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tomasulo.py","file_name":"tomasulo.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224842019","text":"from lab3id import IdFromUsername\nfrom lab3frnds import Friends\n\nuClient = IdFromUsername('gmalkov')\nuid = uClient.execute()\nprint(uid)\n\nfriends_client = Friends(uid)\nfriends = friends_client.execute()\n\nfor (age, count) in friends:\n print('{} {}'.format(int(age), '#' * count))\n","sub_path":"vk3main.py","file_name":"vk3main.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211864165","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n# Standard imports\r\n\r\nfrom flask import Flask, jsonify, request\r\nfrom flask import render_template, send_from_directory\r\nimport argparse\r\nimport os\r\nimport re\r\nimport joblib\r\nimport socket\r\nimport json\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\n# Model imports\r\n\r\nfrom model import get_data_dir, model_train, model_load, model_predict\r\nfrom model import MODEL_VERSION, MODEL_VERSION_NOTE\r\n\r\n# Create an instance of the class for our use\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/index/')\r\ndef about():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/train', methods=['GET', 'POST'])\r\ndef train():\r\n \"\"\"\r\n API endpoint to train the model\r\n\r\n 'mode' - can be used to subset data essentially simulating a train, however in the API it used to set the test flag\r\n \"\"\"\r\n\r\n # Check if request contains json formatted data\r\n\r\n if not request.json:\r\n print('ERROR: train API did not receive request data is JSON format')\r\n return(jsonify(False))\r\n\r\n # Set the test flag based on mode value\r\n\r\n test = False\r\n if 'mode' in request.json and request.json['mode'] == 'test':\r\n test = True\r\n\r\n print('... training model')\r\n data_dir = get_data_dir(train=True)\r\n model = model_train(data_dir, test=test)\r\n print('... training complete')\r\n return(jsonify(True))\r\n\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef predict():\r\n \"\"\"\r\n API endpoint for predict function\r\n \"\"\"\r\n\r\n # Check if request contains json formatted data\r\n\r\n if not request.json:\r\n print('ERROR: predict API did not receive request data in JSON format')\r\n return(jsonify([]))\r\n\r\n # Check if request contains query field in json formatted data\r\n\r\n if 'query' not in request.json:\r\n print(\"ERROR: predict API received request, but 'query' is missing\")\r\n return(jsonify([]))\r\n\r\n # Check if request contains type field in json formatted data; if not, set to numpy\r\n\r\n if 'type' not in request.json:\r\n print(\"WARNING: predict API received request, but 'type' is missing, set to default value of 'numpy'\")\r\n query_type = 'numpy'\r\n\r\n # Set the test flag based on mode value\r\n\r\n test = False\r\n if 'mode' in request.json and request.json['mode'] == 'test':\r\n test = True\r\n\r\n if request.json['type'] == 'dict':\r\n pass\r\n else:\r\n print('ERROR: predict API only supports dict data types')\r\n return(jsonify([]))\r\n\r\n # Extract query and set predict parameters\r\n\r\n query = request.json['query']\r\n country = query['country']\r\n year = query['year']\r\n month = query['month']\r\n day = query['day']\r\n\r\n _result = model_predict(\r\n country,\r\n year,\r\n month,\r\n day,\r\n all_models=None,\r\n test=test\r\n )\r\n result = {}\r\n\r\n # Get numpy objects and serialize\r\n\r\n for (key, item) in _result.items():\r\n if isinstance(item, np.ndarray):\r\n result[key] = item.tolist()\r\n else:\r\n result[key] = item\r\n\r\n return(jsonify(result))\r\n\r\n\r\n@app.route('/logs/', methods=['GET'])\r\ndef logs(filename):\r\n \"\"\"\r\n API endpoint to fetch logs\r\n \"\"\"\r\n\r\n if not re.search('.log', filename):\r\n print('ERROR: logs API - file requested was not a log file: {}'.format(filename))\r\n return(jsonify([]))\r\n\r\n log_dir = os.path.join('.', 'logs')\r\n if not os.path.isdir(log_dir):\r\n print('ERROR: logs API - cannot find log dir')\r\n return(jsonify([]))\r\n\r\n file_path = os.path.join(log_dir, filename)\r\n if not os.path.exists(file_path):\r\n print('ERROR: logs API - file requested could not be found: {}'.format(filename))\r\n return(jsonify([]))\r\n\r\n return send_from_directory(log_dir, filename, as_attachment=True)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # Parse arguments and set debug mode\r\n\r\n ap = argparse.ArgumentParser()\r\n ap.add_argument('-d', '--debug', action='store_true',\r\n help='debug flask')\r\n args = vars(ap.parse_args())\r\n\r\n if args['debug']:\r\n app.run(debug=True, port=5000)\r\n else:\r\n app.run(host='0.0.0.0', threaded=True, port=5000)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168016583","text":"import os\n\n\n\"\"\"\n Constants useful for data module\n\"\"\"\nAPPS_TABLE = \"applications\"\nDOCS_TABLE = \"documentations\"\nRATINGS_TABLE = \"ratings\"\nUSERS_TABLE = \"users\"\n\nIMAGE_KEY = \"image\"\nTITLE_KEY = \"title\"\nBY_KEY = \"by\"\nSTATUS_KEY = \"status\"\nRATING_KEY = \"rating\"\nGROUPS_KEY = \"groups\"\nDESCRIPTION_KEY = \"description\"\nDOCUMENTATION_KEY = \"documentation\"\nUPDATED_KEY = \"updated\"\nOWNER_KEY = \"owner\"\n\nEXTERNAL_KEY = \"external\"\n\nUSER_KEY = \"user\"\nAPPLICATION_KEY = \"application\"\nCOMMENT_KEY = \"comment\"\n\nUSERNAME_KEY = \"username\"\nEMAIL_KEY = \"email\"\nPASSWORD_KEY = \"password\"\nDISABLED_KEY = \"disabled\"\nJOINED_KEY = \"joined\"\nACTIVE_KEY = \"active\"\nROLE_KEY = \"role\"\n\nAPI_TAGS_METADATA = [\n {\"name\": \"users\", \"description\": \"User data\"},\n {\"name\": \"applications\", \"description\": \"Application listing\"},\n {\"name\": \"documentation\", \"description\": \"More detailed app info\"},\n {\"name\": \"ratings\", \"description\": \"Ratings and user feedback\"},\n {\"name\": \"meta\", \"description\": \"API meta data\"},\n # {\"name\": \"admin\", \"description\": \"Admin only\"},\n]\n\n\"\"\"\n Constants useful for users module\n\"\"\"\n# to get a string like this run:\n# openssl rand -hex 32\nSECRET_KEY = (\n os.getenv(\"SECRET_KEY\")\n or \"09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7\"\n)\nALGORITHM = \"HS256\"\nACCESS_TOKEN_EXPIRE_MINUTES = 60\n","sub_path":"api/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576510418","text":"# Time: O(1)\n# Space: O(1)\n\n# 1275\n# Tic-tac-toe is played by two players A and B on a 3 x 3 grid.\n#\n# Here are the rules of Tic-Tac-Toe:\n#\n# Players take turns placing characters into empty squares (\" \").\n# The first player A always places \"X\" characters, while the second player B always places \"O\" characters.\n# \"X\" and \"O\" characters are always placed into empty squares, never on filled ones.\n# The game ends when there are 3 of the same (non-empty) character filling any row, column, or diagonal.\n# The game also ends if all squares are non-empty.\n# No more moves can be played if the game is over.\n# Given an array moves where each element is another array of size 2 corresponding to the row and column of the grid\n# where they mark their respective character in the order in which A and B play.\n#\n# Return the winner of the game if it exists (A or B), in case the game ends in a draw return \"Draw\",\n# if there are still movements to play return \"Pending\".\n#\n# You can assume that moves is valid (It follows the rules of Tic-Tac-Toe), the grid is initially empty and A will play first.\n#\nclass Solution(object):\n def tictactoe(self, moves):\n \"\"\"\n :type moves: List[List[int]]\n :rtype: str\n \"\"\"\n def check(x, y, c):\n return board[x][0]==board[x][1]==board[x][2] \\\n or board[0][y]==board[1][y]==board[2][y] \\\n or board[0][0]==board[1][1]==board[2][2]==c \\\n or board[0][2]==board[1][1]==board[2][0]==c\n\n\n board = [['.']*3 for _ in range(3)]\n for i, (x,y) in enumerate(moves):\n c = 'A' if i % 2 == 0 else 'B'\n board[x][y] = c\n if check(x, y, c):\n return c\n return 'Draw' if len(moves) == 9 else 'Pending'\n\n def tictactoe_kamyu(self, moves):\n row, col = [[0]*3 for _ in xrange(2)], [[0]*3 for _ in xrange(2)]\n diag, anti_diag = [0]*2, [0]*2\n p = 0\n for r, c in moves:\n row[p][r] += 1\n col[p][c] += 1\n diag[p] += r == c\n anti_diag[p] += r+c == 2\n if 3 in (row[p][r], col[p][c], diag[p], anti_diag[p]):\n return \"AB\"[p]\n p ^= 1\n return \"Draw\" if len(moves) == 9 else \"Pending\"\n\nprint(Solution().tictactoe([[0,0],[2,0],[1,1],[2,1],[2,2]])) # A\nprint(Solution().tictactoe([[0,0],[1,1],[0,1],[0,2],[1,0],[2,0]])) # B\nprint(Solution().tictactoe([[0,0],[1,1],[2,0],[1,0],[1,2],[2,1],[0,1],[0,2],[2,2]])) # Draw\nprint(Solution().tictactoe([[0,0],[1,1]])) # Pending","sub_path":"Python/find-winner-on-a-tic-tac-toe-game.py","file_name":"find-winner-on-a-tic-tac-toe-game.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"371943686","text":"#!/usr/bin/python3\n# SPDX-License-Identifier: Apache-2.0\n\n#\n# Copyright 2017 Wikimedia Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nDig through puppet configs, find and (sometimes) correct proxy records with\nmissing dns and dns records for missing proxies.\n\"\"\"\n\nimport argparse\n\nimport requests\n\nimport mwopenstackclients\nfrom designateclient.v2 import client as designateclientv2\n\nclients = mwopenstackclients.clients()\n\nPROXY_BACKEND_IP = \"185.15.56.49\"\n\n\ndef url_template():\n \"\"\"Get the url template for accessing the proxy service.\"\"\"\n keystone = clients.keystoneclient()\n proxy = keystone.services.list(type=\"proxy\")[0]\n endpoint = keystone.endpoints.list(service=proxy.id, interface=\"public\", enabled=True)[0]\n return endpoint.url\n\n\ndef proxy_client(project):\n proxy_url = url_template().replace(\"$(tenant_id)s\", project)\n session = clients.session(project)\n return proxy_url, session\n\n\ndef all_mappings(project):\n \"\"\"Return a list of proxies for a given project\n \"\"\"\n proxy_url, session = proxy_client(project)\n resp = session.get(f\"{proxy_url}/mapping\", raise_exc=False)\n\n if resp.status_code == 400 and resp.text == \"No such project\":\n return []\n elif not resp:\n raise Exception(\"Proxy service request got status \" + str(resp.status_code))\n else:\n return resp.json()[\"routes\"]\n\n\ndef delete_mapping(project, domain):\n \"\"\"Delete a single proxy\n \"\"\"\n proxy_url, session = proxy_client(project)\n session.delete(f\"{proxy_url}/mapping/{domain}\")\n\n\ndef get_project_dns_zones(project_id):\n session = clients.session(project_id)\n client = designateclientv2.Client(session=session)\n zones = client.zones.list()\n return zones\n\n\ndef get_wmcloud_dns_recordsets(zone):\n session = clients.session(\"cloudinfra\")\n client = designateclientv2.Client(session=session)\n return client.recordsets.list(zone[\"id\"])\n\n\ndef get_wmflabs_dns_recordsets(zone):\n session = clients.session(\"wmflabsdotorg\")\n client = designateclientv2.Client(session=session)\n return client.recordsets.list(zone[\"id\"])\n\n\ndef get_project_dns_recordsets(project_id, zone):\n session = clients.session(project_id)\n client = designateclientv2.Client(session=session)\n domains = client.recordsets.list(zone[\"id\"])\n return domains\n\n\ndef purge_leaks(delete=False):\n proxy_recordsets = {}\n proxyzones = get_project_dns_zones(\"wmflabsdotorg\")\n for zone in proxyzones:\n if zone[\"name\"] == \"wmflabs.org.\":\n for recordset in get_wmflabs_dns_recordsets(zone):\n if recordset[\"records\"][0] == PROXY_BACKEND_IP:\n proxy_recordsets[recordset[\"name\"]] = recordset\n\n proxyzones = get_project_dns_zones(\"cloudinfra\")\n for zone in proxyzones:\n if zone[\"name\"] == \"wmcloud.org.\":\n for recordset in get_wmcloud_dns_recordsets(zone):\n if recordset[\"records\"][0] == PROXY_BACKEND_IP:\n proxy_recordsets[recordset[\"name\"]] = recordset\n\n allinstances = clients.allinstances(allregions=True)\n all_nova_ips = []\n for instance in allinstances:\n for network in instance.addresses:\n all_nova_ips.append(instance.addresses[network][0][\"addr\"])\n\n for project in clients.allprojects():\n projectzones = get_project_dns_zones(project.id)\n project_recordsets = {}\n for zone in projectzones:\n for recordset in get_project_dns_recordsets(project.id, zone):\n if recordset[\"records\"][0] == PROXY_BACKEND_IP:\n project_recordsets[recordset[\"name\"]] = recordset\n\n mappings = all_mappings(project.id)\n projectinstances = clients.allinstances(project.id, allregions=True)\n\n all_project_ips = []\n for instance in projectinstances:\n for network in instance.addresses:\n all_project_ips.append(instance.addresses[network][0][\"addr\"])\n\n for mapping in mappings:\n backend_ip = mapping[\"backends\"][0].split(\":\")[1].strip(\"/\")\n if backend_ip not in all_project_ips:\n if backend_ip not in all_nova_ips:\n print(\"%s: possible stray proxy: %s\" % (project.id, mapping))\n if delete:\n delete_mapping(project.id, mapping[\"domain\"])\n else:\n print(\"%s: proxy mapping outside of its project: %s\" % (project.id, mapping))\n\n searchname = mapping[\"domain\"]\n if not searchname.endswith(\".\"):\n searchname += \".\"\n\n proxy_recordsets.pop(searchname, None)\n\n session = clients.session(\"wmflabsdotorg\")\n dotorgclient = designateclientv2.Client(session=session)\n session = clients.session(\"cloudinfra\")\n infraclient = designateclientv2.Client(session=session)\n for domain in proxy_recordsets:\n if domain == \"wmflabs.org.\":\n continue\n if domain == \"*.wmflabs.org.\":\n continue\n if domain == \"wmcloud.org.\":\n continue\n if domain == \"*.wmcloud.org.\":\n continue\n if domain == \"proxy-eqiad1.wmflabs.org.\":\n continue\n if domain == \"proxy-eqiad1.wmcloud.org.\":\n continue\n rset = proxy_recordsets[domain]\n print(\"found record unassociated with a proxy: %s\" % rset)\n # Let's make sure there's really nothing there.\n url = \"https://%s\" % domain.rstrip(\".\")\n resp = requests.get(url, verify=False)\n print(\"%s: %s\" % (resp.status_code, url))\n if resp.status_code != 502 and resp.status_code != 404:\n print(\" ---- We found a weird one, at %s\" % url)\n else:\n if delete:\n if \"wmflabs\" in domain:\n dotorgclient.recordsets.delete(rset[\"zone_id\"], rset[\"id\"])\n if \"wmcloud\" in domain:\n infraclient.recordsets.delete(rset[\"zone_id\"], rset[\"id\"])\n\n\nparser = argparse.ArgumentParser(description=\"Find (and, optionally, remove) leaked proxy entries.\")\nparser.add_argument(\n \"--delete\", dest=\"delete\", help=\"Actually delete leaked records\", action=\"store_true\"\n)\nargs = parser.parse_args()\n\npurge_leaks(args.delete)\n","sub_path":"modules/openstack/files/zed/admin_scripts/wmcs-novastats/wmcs-novastats-proxyleaks.py","file_name":"wmcs-novastats-proxyleaks.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"269840493","text":"import sys\nimport pexpect\nimport logging\nimport random\nimport time\nimport configure\n#---------------------\n# System configuration\n#---------------------\n#BENCHMARK = \"MAT\"\nlogger = None\n#------------------------\n#CUDA-GDB commands\n#------------------------\nCUDA_GDB_PATH = \"/usr/local/cuda/bin/cuda-gdb\"\nBREAKPOINT = \"break \"\n#BREAK_LOCATION = \"matrixMul_kernel.cu:38\"\nBREAK_LOCATION = configure.startline\nBREAK_LOCATION_2 = \"bucket_query.cu:274\"\nBREAK_LOCATION_3 = \"sort_scan.cu:120\"\nBREAK_LOCATION_4 = \"bucket_query.cu:116\"\nBREAK_LOCATION_5 = \"bucket_query.cu:970\"\nBREAK_LOCATION_7 = \"bucket_query.cu:968\"\nBREAK_LOCATION_6 = \"bucket_query.cu:138\"\n\n\nSTEPI = \"stepi\"\nNEXT = \"n\"\nPC = \"print $pc\"\nRUN = \"run\"\nARGUMENT= configure.parameter\nCONTINUE = \"continue\"\nCUDA_FUN_INFO = \"cuda kernel block thread\"\nCUDA_THREAD_INFO = \"info cuda threads block (2,0,0) thread (64,0,0)\"\nSWITCH = \"cuda block (2,0,0) thread (64,0,0)\"\n\nDELETE_BREAKPOINT = \"delete breakpoint 1\"\nQUIT= \"quit\"\nKILL = \"kill\"\nENTER = \"\"\nEXIT = \"Program exited normally\"\n#------------------------\n#Expect collection\n#------------------------\n\nCUDA_GDB_EXPECT = \"\\(cuda-gdb\\)\"\nCUDA_SYN_EXPECT = \"__syncthreads\\(\\)\"\nCUDA_SYN_EXPECT_2 = \" __syncthreads\\(\\)\"\nPC_EXPECT = \"=\"\nCUDA_FUN_INFO_EXPECT = \" \"\nTHREAD_CONTINUE_EXPECT = \"---Type \\ to continue, or q \\ to quit---\"\nTHREAD_CONTINUE_EXPECT_WERIED = \"---Type \\ to continue, or q \\ to quit---stepi\"\nNO_FOCUS = \"Focus not set on any active CUDA kernel\"\n\ndef profiler(path,trigger,trial):\n global CUDA_GDB_PATH, BREAKPOINT,BREAK_LOCATION,KILL,QUIT,DELETE_BREAKPOINT,CUDA_FUN_INFO,PC,RUN,CONTINUE,CUDA_THREAD_INFO,ENTER, BREAK_LOCATION_2, NEXT, CUDA_SYN_EXPECT_2\n global CUDA_GDB_EXPECT,PC_EXPECT,CUDA_FUN_INFO_EXPECT,THREAD_CONTINUE_EXPECT,CUDA_SYN_EXPECT,ARGUMENT,EXIT,NO_FOCUS,THREAD_CONTINUE_EXPECT_WERIED\n global logger, SWITCH\n global REAK_LOCATION_3, REAK_LOCATION_4, REAK_LOCATION_5,REAK_LOCATION_6\n cuda_gdb_p = pexpect.spawn(CUDA_GDB_PATH+\" \"+path)\n cuda_gdb_p.maxread = 1000000\n cuda_gdb_p.setecho(False)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT) \n #---------------\n # set breakpoint\n #---------------\n cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_2)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_3)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_4)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_7)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_5)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_6)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #---------------\n # run the program\n #---------------\n wc = cuda_gdb_p.sendline(RUN+ARGUMENT)\n resend = cuda_gdb_p.expect([CUDA_GDB_EXPECT,THREAD_CONTINUE_EXPECT])\n if resend == 1:\n cuda_gdb_p.sendline()\n rawstr = cuda_gdb_p.before\n # for debug\n #while \"Kernel 59\" not in rawstr:\n # cuda_gdb_p.sendline(CONTINUE)\n # cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n # rawstr = cuda_gdb_p.before\n # print rawstr\n lines = rawstr.split(\"\\r\\n\")\n #for line in lines:\n # if \"<<<\" and \">>>\" in line:\n # logger.info(line)\n \n #cuda_gdb_p.sendline(BREAKPOINT+\" \"+BREAK_LOCATION_5)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #cuda_gdb_p.sendline(CONTINUE)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n \n #------------------------------\n # check the current PC\n #------------------------------\n cuda_gdb_p.sendline(CUDA_FUN_INFO)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n logger.info(\"KERNEL INFO: \"+cuda_gdb_p.before)\n cuda_gdb_p.sendline(PC)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n value = cuda_gdb_p.before.lstrip().rstrip(\"\\r\\n\").split(PC_EXPECT)\n logger.info(\"PC is \"+value[len(value)-1])\n target = \"\"\n temp = \"\"\n cuda_gdb_p.sendline(SWITCH)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #while EXIT not in target or EXIT not in temp or \"is not being run\" in target or \"is not being run\" in temp: \n while \"No CUDA\" not in target and \"No CUDA\" not in temp: \n j = -1\n flag_step = 0\n flag_info = 0\n cuda_gdb_p.sendline(STEPI)\n while flag_step == 0:\n j = cuda_gdb_p.expect([CUDA_GDB_EXPECT,CUDA_SYN_EXPECT,THREAD_CONTINUE_EXPECT,THREAD_CONTINUE_EXPECT_WERIED,CUDA_SYN_EXPECT_2,pexpect.TIMEOUT],timeout=60)\n target = cuda_gdb_p.before\n logger.info(\"in stepi \"+target)\n if CUDA_SYN_EXPECT in target or CUDA_SYN_EXPECT_2 in target:\n logger.info(\"Hit the barrier!\")\n cuda_gdb_p.sendline(NEXT)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n break \n if j == 0:\n if NO_FOCUS in target and \"Switching\" not in target:\n logger.info(\"CONTINUE THREADS to hit breakpoint again! -1\")\n cuda_gdb_p.sendline(CONTINUE)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n target = cuda_gdb_p.before\n logger.info(\"target 1 \"+target)\n time.sleep(5)\n if CUDA_SYN_EXPECT in target:\n logger.info(\"Hit the barrier!\")\n cuda_gdb_p.sendline(NEXT)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n flag_step = 1\n elif j == 1:\n logger.info(\"Hit the barrier! - 1\")\n cuda_gdb_p.sendline(NEXT)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n flag_step = 1\n elif j == 2:\n cuda_gdb_p.sendline()\n logger.info(\"Send enter in stepi 1\")\n flag_step = 1\n elif j == 3:\n cuda_gdb_p.sendline()\n logger.info(\"Send enter in stepi 2\")\n elif j == 4:\n logger.info(\"Hit the barrier! - 2\")\n cuda_gdb_p.sendline(NEXT)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n flag_step = 1\n else:\n flag_step = 1\n i = -1\n cuda_gdb_p.sendline(CUDA_THREAD_INFO)\n while flag_info == 0: \n i = cuda_gdb_p.expect([CUDA_GDB_EXPECT,THREAD_CONTINUE_EXPECT,THREAD_CONTINUE_EXPECT_WERIED,CUDA_SYN_EXPECT,pexpect.TIMEOUT])\n temp = cuda_gdb_p.before\n logger.error(\"\\n\"+temp)\n if i == 0:\n if NO_FOCUS in temp and \"Switching\" not in temp:\n logger.info(\"CONTINUE THREADS to hit breakpoint again! -3\")\n cuda_gdb_p.sendline(CONTINUE)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n temp = cuda_gdb_p.before\n logger.info(\"temp 1 \"+temp)\n time.sleep(5)\n if CUDA_SYN_EXPECT in temp:\n logger.info(\"Hit the barrier!\")\n cuda_gdb_p.sendline(NEXT)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n flag_info = 1\n \n elif i == 1:\n \n cuda_gdb_p.sendline()\n logger.info(\"Send enter to continue!\")\n elif i == 2:\n \n cuda_gdb_p.sendline()\n logger.info(\"Send enter to continue!\")\n elif i == 3:\n logger.info(\"Hit the barrier! - 4\")\n cuda_gdb_p.sendline(NEXT)\n cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n flag_info = 1 \n else :\n flag_info = 1\n cuda_gdb_p.sendline(QUIT)\n #cuda_gdb_p.expect(CUDA_GDB_EXPECT)\n #------------------------\n # get the target register\n #------------------------\ndef main():\n global logger\n logger = logging.getLogger(configure.benchmark+\"profiler\")\n hdlr = logging.FileHandler(configure.profile_file)\n formatter = logging.Formatter(\"%(levelname)s %(message)s\")\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n for trial in range(1):\n profiler(configure.binary_path,0,trial)\n \nmain()\n \n","sub_path":"profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":8326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"84704160","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n#\n# Complete the 'solution' function below.\n#\n# The function is expected to return a STRING_ARRAY.\n# The function accepts following parameters:\n# 1. 2D_STRING_ARRAY items\n# 2. INTEGER orderBy\n# 3. INTEGER orderDirection\n# 4. INTEGER pageSize\n# 5. INTEGER pageNumber\n#\n\ndef solution(items, orderBy, orderDirection, pageSize, pageNumber):\n # 0 이름 1 관련도 2 가격\n if orderBy == 0:\n items.sort(key=lambda x:x[orderBy], reverse=orderDirection)\n else:\n items.sort(key=lambda x:int(x[orderBy]), reverse=orderDirection)\n\n total = len(items) // pageSize \n start = pageSize * pageNumber\n end = pageSize * pageNumber + pageSize\n \n if end // pageSize == total :\n page = items[start:]\n else:\n page = items[start:end]\n \n res = [p[0] for p in page]\n return res\n","sub_path":"test/k_/commerce_rec/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70295770","text":"import numpy as np\nimport os\nimport math\n\n#生成独热编码\ndef dense_to_one_hot(labels_dense, img_index, num_classes=20):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot[:,img_index] = 1\n return labels_one_hot\n\n#将图像和标签进行预处理\ndef extract_images(filename, labels_index):\n \"\"\"Extract the images into a 4D uint8 numpy array [index, y, x, depth].\"\"\"\n print('Extracting', filename)\n dir_img = \"E:\\\\QuickDrawMini\\\\\";\n imgs = np.load(dir_img + filename +'.npy')\n imgs_num = len(imgs) # The number of photos\n rows = 28\n cols = 28\n data = imgs.reshape(imgs_num, rows, cols, 1)\n file_labels = [filename]*imgs_num\n file_labels = np.array(file_labels)\n #转成独热编码\n labels_one_hot = dense_to_one_hot(labels_dense=file_labels,img_index=labels_index)\n return data, labels_one_hot\n\n#从文件获得所有类别\ndef getCategories():\n categories_file = open(\"categories.txt\",\"r\")\n str = categories_file.read()\n categories_file.close()\n categories = str.split('\\n')\n return np.array(categories)\n\nclass Data_set():\n def __init__(self, images, img_labels):\n images = images.astype(np.float32)\n self._images = images\n self._img_labels = img_labels\n for i,img in enumerate(self._images):\n #归一化\n img = np.multiply(img, 1.0 / 255.0)\n self._images[i] = img\n# for img in self._images:\n# print(img)\n self._num = len(images)\n # Shuffle the data\n perm = np.arange(self._num)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._img_labels = self._img_labels[perm]\n self._index_in_epoch = 0\n self._epochs_completed =0\n \n def next_batch(self, batch_size):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._img_labels = self._img_labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num\n end = self._index_in_epoch\n return self._images[start:end], self._img_labels[start:end]\n @property\n def images(self):\n return self._images\n @property\n def labels(self):\n return self._img_labels\n @property\n def num_examples(self):\n return self._num\n\ndef splitData(data, data_labels):\n num_data = len(data)\n num_label = len(data_labels)\n #70%当做训练集,30%当做测试集\n train_num = math.floor(num_data*0.7)\n train_images = data[:train_num,]\n train_labels = data_labels[:train_num,]\n test_images = data[train_num:,]\n test_labels = data_labels[train_num:,]\n return train_images, train_labels, test_images, test_labels\n\n#将数据添加进列表\ndef data_append(dataArr, _data):\n for datas in dataArr:\n# print(datas.shape)\n _data.append(datas)\n \n \n\n#保存batch\ndef save_batch_file(labels_index, _train_imgs, _train_labels, _test_imgs, _test_labels):\n print(labels_index)\n #多少类为一个batch\n batch_class_size = 5\n if (labels_index+1)%batch_class_size == 0:\n labels_index = math.floor( (labels_index+1)/batch_class_size )-1\n else:\n labels_index = math.floor( (labels_index+1)/batch_class_size )\n print(labels_index)\n _train_imgs = np.array(_train_imgs)\n _train_labels = np.array(_train_labels)\n _test_imgs = np.array(_test_imgs)\n _test_labels = np.array(_test_labels)\n dir_img = \"E:\\\\QuickDrawMini\\\\data\\\\\"\n np.save(dir_img + \"_train_imgs_batch_\"+str(labels_index), _train_imgs)\n np.save(dir_img + \"_train_labels_batch_\"+str(labels_index), _train_labels)\n np.save(dir_img + \"_test_imgs_batch_\"+str(labels_index), _test_imgs)\n np.save(dir_img + \"_test_labels_batch_\"+str(labels_index), _test_labels)\n\ndef read_data_sets():\n# class Data_sets():\n# pass\n# data_sets = Data_sets()\n # dir_img = \"E:\\\\QuickDrawMini\\\\\";\n _train_imgs = []\n _train_labels = []\n _test_imgs = []\n _test_labels = []\n #从文件获取所有类别\n img_names = getCategories()\n labels_index = 0#存放label下标\n #多少类为一个batch\n batch_class_size = 5\n for img_name in img_names:\n data,data_labels = extract_images(img_name, labels_index)\n train_image, train_label, test_image, test_label = splitData(data, data_labels)\n data_append(train_image, _train_imgs)\n data_append(train_label, _train_labels)\n data_append(test_image, _test_imgs)\n data_append(test_label, _test_labels)\n #每10个类别保存为一个batch,保存完后清空列表\n if (labels_index+1)%batch_class_size == 0 or labels_index == len(img_names)-1:\n save_batch_file(labels_index, _train_imgs, _train_labels, _test_imgs, _test_labels)\n _train_imgs.clear()\n _train_labels.clear()\n _test_imgs.clear()\n _test_labels.clear()\n labels_index += 1\n# _train_imgs = np.array(_train_imgs)\n# _train_labels = np.array(_train_labels)\n# _test_imgs = np.array(_test_imgs)\n# _test_labels = np.array(_test_labels)\n# data_sets.train = Data_set(_train_imgs, _train_labels)\n# data_sets.test = Data_set(_test_imgs, _test_labels)\n# return data_sets\n\n#读取一个batch的文件\ndef read_batch_data(batch_index):\n class Data_sets():\n pass\n data_sets = Data_sets()\n dir_img = \"E:\\\\QuickDrawMini\\\\data\\\\\";\n _train_imgs = np.load(dir_img+\"_train_imgs_batch_\" + str(batch_index) +\".npy\" )\n _train_labels = np.load(dir_img + \"_train_labels_batch_\"+ str(batch_index) +\".npy\")\n _test_imgs = np.load(dir_img + \"_test_imgs_batch_\"+ str(batch_index) +\".npy\")\n _test_labels = np.load(dir_img + \"_test_labels_batch_\"+ str(batch_index) +\".npy\")\n data_sets.train = Data_set(_train_imgs, _train_labels)\n data_sets.test = Data_set(_test_imgs, _test_labels)\n return data_sets\n\"\"\"\nquick_draw = read_data_sets()\ntrain_imgs = quick_draw.train.images\ntrain_labels = quick_draw.train.labels\ntest_imgs = quick_draw.test.images\ntest_labels = quick_draw.test.labels\nprint(len(train_imgs),\"|\",len(train_labels))\nprint(len(test_imgs),\"|\",len(test_labels))\n\"\"\"","sub_path":"cn/firefriend/quickMini/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456748494","text":"from flask import render_template, current_app, session, g, abort, jsonify, request\n\nfrom info import constants, db\nfrom info.models import News, User, Comment, CommentLike\nfrom info.modules.news import news_blu\nfrom info.utils.common import user_login_data\nfrom info.utils.response_code import RET\n\n@news_blu.route(\"/followed_user\",methods=[\"POST\"])\n@user_login_data\ndef followed_user():\n user=g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR,errmsg=\"未登录\")\n\n user_id=request.json.get(\"user_id\")\n action=request.json.get(\"action\")\n\n if not all([user_id,action]):\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n if action not in (\"follow\",\"unfollow\"):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n try:\n other=User.query.get(user_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据查询失败\")\n\n if not other:\n return jsonify(errno=RET.NODATA,errmsg=\"未查询到数据\")\n\n if action == \"follow\":\n if other not in user.followed:\n user.followed.append(other)\n else:\n return jsonify(errno=RET.DATAEXIST,errmsg=\"当前用户已被关注\")\n else:\n if other in user.followed:\n user.followed.remove(other)\n else:\n return jsonify(errno=RET.DATAEXIST,errmsg=\"当前用户未被关注\")\n\n return jsonify(errno=RET.OK,errmsg=\"OK\")\n\n@news_blu.route(\"/comment_like\",methods=[\"POST\"])\n@user_login_data\ndef comment_like():\n\n user=g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR,errmsg=\"用户未登录\")\n\n comment_id=request.json.get(\"comment_id\")\n news_id = request.json.get(\"news_id\")\n action = request.json.get(\"action\")\n\n if not all([comment_id,news_id,action]):\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n if action not in [\"add\",\"remove\"]:\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n try:\n comment_id=int(comment_id)\n news_id=int(news_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n try:\n comment=Comment.query.get(comment_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据查询错误\")\n\n if not comment:\n return jsonify(errno=RET.NODATA,errmsg=\"数据不存在\")\n\n if action==\"add\":\n comment_like_model=CommentLike.query.filter(CommentLike.user_id==user.id,CommentLike.comment_id==comment.id).first()\n if not comment_like_model:\n comment_like_model=CommentLike()\n comment_like_model.user_id=user.id\n comment_like_model.comment_id=comment.id\n db.session.add(comment_like_model)\n comment.like_count+=1\n else:\n comment_like_model=CommentLike.query.filter(CommentLike.user_id==user.id,CommentLike.comment_id==comment.id).first()\n if comment_like_model:\n db.session.delete(comment_like_model)\n comment.like_count -= 1\n try:\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据库操作失败\")\n\n return jsonify(errno=RET.OK,errmsg=\"OK\")\n\n@news_blu.route(\"/news_comment\",methods=[\"POST\"])\n@user_login_data\ndef comment_news():\n\n user=g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n\n news_id=request.json.get(\"news_id\")\n comment_content = request.json.get(\"comment\")\n parent_id = request.json.get(\"parent_id\")\n\n if not all([news_id,comment_content]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n try:\n news_id=int(news_id)\n if parent_id:\n parent_id=int(parent_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n try:\n news=News.query.get(news_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据查询错误\")\n\n if not news:\n return jsonify(errno=RET.NODATA,errmsg=\"未查到数据\")\n\n comment=Comment()\n comment.user_id=user.id\n comment.news_id=news_id\n comment.content=comment_content\n if parent_id:\n comment.parent_id=parent_id\n try:\n db.session.add(comment)\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n\n return jsonify(errno=RET.OK, errmsg=\"OK\",data=comment.to_dict())\n\n\n@news_blu.route(\"/news_collect\",methods=[\"POST\"])\n@user_login_data\ndef collect_news():\n\n user=g.user\n\n if not user:\n return jsonify(errno=RET.SESSIONERR,errmsg=\"用户未登录\")\n\n news_id=request.json.get(\"news_id\")\n action=request.json.get(\"action\")\n\n if not all([news_id,action]):\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n if action not in [\"collect\",\"cancel_collect\"]:\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n try:\n news_id=int(news_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n try:\n news=News.query.get(news_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据查询错误\")\n\n if not news:\n return jsonify(errno=RET.NODATA,errmsg=\"未查到数据\")\n\n if action==\"cancel_collect\":\n if news in user.collection_news:\n user.collection_news.remove(news)\n else:\n if news not in user.collection_news:\n user.collection_news.append(news)\n\n return jsonify(errno=RET.OK,errmsg=\"操作成功\")\n\n@news_blu.route(\"/\")\n@user_login_data\ndef news_detail(news_id):\n\n\n user= g.user\n\n new_list = []\n try:\n new_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)\n except Exception as e:\n current_app.logger.error(e)\n news_dict_li = []\n for news in new_list:\n news_dict_li.append(news.to_basic_dict())\n\n news=None\n\n try:\n news=News.query.get(news_id)\n except Exception as e:\n current_app.logger.error(e)\n\n if not news:\n abort(404)\n\n news.clicks+=1\n\n is_collected=False\n\n if user:\n if news in user.collection_news:\n is_collected=True\n\n comments=[]\n\n try:\n comments=Comment.query.filter(Comment.news_id==news_id).order_by(Comment.create_time.desc()).all()\n except Exception as e :\n current_app.logger.error(e)\n comment_like_ids=[]\n if g.user:\n try:\n comment_ids=[comment.id for comment in comments]\n\n comment_likes=CommentLike.query.filter(CommentLike.comment_id.in_(comment_ids),CommentLike.user_id==g.user.id).all()\n\n comment_like_ids=[comment_like.comment_id for comment_like in comment_likes]\n except Exception as e:\n current_app.logger.error(e)\n comment_dict_li=[]\n for comment in comments:\n comment_dict=comment.to_dict()\n comment_dict['is_like']=False\n if comment.id in comment_like_ids:\n comment_dict['is_like'] = True\n comment_dict_li.append(comment_dict)\n\n is_followed=False\n if news.user and user:\n if news.user in user.followed:\n is_followed=True\n\n data={\n \"user\": user.to_dict() if user else None,\n \"news_dict_li\":news_dict_li,\n \"news\":news.to_dict(),\n \"is_collected\":is_collected,\n \"is_followed\":is_followed,\n \"comments\":comment_dict_li\n }\n return render_template(\"news/detail.html\",data=data)","sub_path":"info/modules/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"213617652","text":"class Solution:\n # def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:\n def __init__(self):\n self.result = []\n\n def wordBreak(self, s, wordDict):\n if not s:\n return [\"\"]\n wordDict = set(wordDict)\n dp = self.dp(s, wordDict)\n self.dfs(s, \"\", 0, wordDict, dp)\n\n return self.result\n\n def dp(self, s, wordDict):\n N = len(s)\n dp = [False] * (N+1)\n dp[0] = True\n for i in range(N):\n for j in range(i, N+1):\n if dp[i] and s[i:j] in wordDict:\n dp[j] = True\n return dp\n\n def dfs(self, s, path, idx, wordDict, dp):\n if dp[idx + len(s)]:\n if not s:\n self.result.append(path.strip())\n\n for i in range(1, len(s)+1):\n if s[:i] in wordDict:\n self.dfs(s[i:], path + \" \" + s[:i], idx + i, wordDict, dp)\n\n","sub_path":"LeetCode/0140_WordBreakII/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416221370","text":"import cv2,sys\r\nimport numpy as np\r\nclass DenseDetector(object):\r\n def __init__(self, step_size=20, feature_scale=40, img_bound=20):\r\n # Create a dense feature detector\r\n self.detector = cv2.FeatureDetector_create(\"Dense\")\r\n # Initialize it with all the required parameters\r\n self.detector.setInt(\"initXyStep\", step_size)\r\n self.detector.setInt(\"initFeatureScale\", feature_scale)\r\n self.detector.setInt(\"initImgBound\", img_bound)\r\n def detect(self, img):\r\n # Run feature detector on the input image\r\n return self.detector.detect(img)\r\n\r\nif __name__=='__main__':\r\n input_image = cv2.imread(sys.argv[1])\r\n input_image_sift = np.copy(input_image)\r\n # Convert to grayscale\r\n gray_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\r\n keypoints = DenseDetector(20,20,5).detect(input_image)\r\n # Draw keypoints on top of the input image\r\n input_image = cv2.drawKeypoints(input_image, keypoints, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n # Display the output image\r\n cv2.imshow('Dense feature detector', input_image)\r\n # Initialize SIFT object\r\n sift = cv2.SIFT()\r\n # Detect keypoints using SIFT\r\n keypoints = sift.detect(gray_image, None)\r\n # Draw SIFT keypoints on the input image\r\n input_image_sift = cv2.drawKeypoints(input_image_sift,\r\n keypoints, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n # Display the output image\r\n cv2.imshow('SIFT detector', input_image_sift)\r\n # Wait until user presses a key\r\ncv2.waitKey()\r\n","sub_path":"chapter-10.py","file_name":"chapter-10.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"32941064","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport filepath\nimport time\n\ndriver = webdriver.Chrome(filepath.getChromeDriverPath())\ndriver.get(\"https://news.naver.com/main/main.nhn?mode=LSD&mid=sec&sid1=100\")\n\narticle_links_elements = driver.find_elements_by_xpath(\n '//*[@id=\"section_body\"]/ul/li/dl/dt[2]/a'\n) # XPath로 뉴스 링크 엘리먼트의 list 찾기\n\n\nfor article_links_element in article_links_elements:\n article_links_element.send_keys(Keys.CONTROL + \"\\n\")\n # for windows, Keys.CONTROL\n # for mac OS, Keys.COMMAND\n driver.switch_to_window(driver.window_handles[-1])\n time.sleep(3)\n driver.close()\n driver.switch_to_window(driver.window_handles[0])\n time.sleep(3)\n\n\ndriver.close()\n","sub_path":"lecture-4/01.complete-news-crawler/03.move-tap.py","file_name":"03.move-tap.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197718697","text":"import glob\nimport json\nimport argparse\n\n# works like a charm, put in your path and\n# make sure to include the path of the produced json in their merge name\n\n# This function will be run on the jsons of output, RHO_MOD, and MAX_OS\n# It updates all trial dictionaries, and since each dictionary has a different key \n# (which indicates the trial used), then will be added, till 1 dictionary for all\n# trials is made\n\ndef json_stack_keys(jsons_path, merge_path_name):\n print(\"in stack_keys.py\")\n # Include last / at end of path!\n files = glob.glob(\"{}*.json\".format(jsons_path))\n\n count = 0\n for file in files:\n if count == 0:\n count += 1\n with open(file, \"r\") as f:\n C_dictionary = json.load(f)\n else:\n with open(file, \"r\") as f:\n C_dictionary.update(json.load(f))\n\n with open('{}.json'.format(merge_path_name), 'w') as f:\n json.dump(C_dictionary, f, indent=2)\n \n print(\"leaving stack_key\")\n#def json_stack_keys(jsons_path, merge_path_name):\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--jsons_path', type=str)\n parser.add_argument('--merge_path_name', type=str)\n args = parser.parse_args()\n\n json_stack_keys(args.jsons_path,args.merge_path_name)\n","sub_path":"json_stack_keys.py","file_name":"json_stack_keys.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648420928","text":"import urllib.request\n# 添加请求头\n#方法1\nurl=\"http://blog.csdn.net/lastsweetop/article/details/79025517\"\nkeyAgent=\"User-Agent\"\nvalueAgent=\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36\"\nheaders=(keyAgent,valueAgent)\nopener=urllib.request.build_opener()\nopener.addheaders=[headers]\ndata=opener.open(url).read()\nprint(data)\nfhandle=open(\"D:/Python35/3.html\",'wb')\nfhandle.write(data)\nfhandle.close()\n# 方法2\nreq=urllib.request.Request(url)\nreq.add_header(keyAgent,valueAgent)\ndata=urllib.request.urlopen(req).read()\nprint(data)\n\nfor i in range(0,100):\n try:\n file=urllib.request.urlopen(url,timeout=50)\n data=file.read()\n print(len(data))\n except Exception as e:\n print(\"error-->\"+str(e))","sub_path":"Spider/2.requestHeader.py","file_name":"2.requestHeader.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"414802056","text":"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom test_imperative_base import new_program_scope\n\nimport paddle\nfrom paddle import fluid\nfrom paddle.fluid import core\nfrom paddle.fluid.dygraph.base import to_variable\nfrom paddle.nn import Linear\n\nSEED = 123123111\n\n\nclass SimpleImgConvPool(paddle.nn.Layer):\n def __init__(\n self,\n num_channels,\n num_filters,\n filter_size,\n pool_size,\n pool_stride,\n pool_padding=0,\n pool_type='max',\n global_pooling=False,\n conv_stride=1,\n conv_padding=0,\n conv_dilation=1,\n conv_groups=1,\n act=None,\n use_cudnn=False,\n param_attr=None,\n bias_attr=None,\n ):\n super().__init__()\n\n self._conv2d = paddle.nn.Conv2D(\n in_channels=num_channels,\n out_channels=num_filters,\n kernel_size=filter_size,\n stride=conv_stride,\n padding=conv_padding,\n dilation=conv_dilation,\n groups=conv_groups,\n weight_attr=None,\n bias_attr=None,\n )\n\n self._pool2d = paddle.nn.MaxPool2D(\n kernel_size=pool_size,\n stride=pool_stride,\n padding=pool_padding,\n )\n\n def forward(self, inputs):\n x = self._conv2d(inputs)\n x = self._pool2d(x)\n return x\n\n\nclass MNIST(paddle.nn.Layer):\n def __init__(self):\n super().__init__()\n\n self._simple_img_conv_pool_1 = SimpleImgConvPool(\n 1, 20, 5, 2, 2, act=\"relu\"\n )\n\n self._simple_img_conv_pool_2 = SimpleImgConvPool(\n 20, 50, 5, 2, 2, act=\"relu\"\n )\n\n self.pool_2_shape = 50 * 4 * 4\n SIZE = 100 # 10\n scale = (2.0 / (self.pool_2_shape**2 * SIZE)) ** 0.5\n self._fc = Linear(\n self.pool_2_shape,\n SIZE,\n weight_attr=paddle.ParamAttr(\n initializer=paddle.nn.initializer.Normal(mean=0.0, std=scale)\n ),\n )\n\n def forward(self, inputs):\n x = self._simple_img_conv_pool_1(inputs)\n x = self._simple_img_conv_pool_2(x)\n x = paddle.reshape(x, shape=[-1, self.pool_2_shape])\n x = self._fc(x)\n x = paddle.nn.functional.softmax(x)\n return x\n\n\nclass TestDygraphMultiForward(unittest.TestCase):\n def test_mnist_forward_float32(self):\n epoch_num = 1\n\n with fluid.dygraph.guard():\n paddle.seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n mnist = MNIST()\n sgd = paddle.optimizer.SGD(\n learning_rate=1e-3, parameters=mnist.parameters()\n )\n train_reader = paddle.batch(\n paddle.dataset.mnist.train(), batch_size=128, drop_last=True\n )\n\n dy_param_init_value = {}\n mnist.eval()\n for epoch in range(epoch_num):\n for batch_id, data in enumerate(train_reader()):\n dy_x_data = np.array(\n [x[0].reshape(1, 28, 28) for x in data]\n ).astype('float32')\n y_data = (\n np.array([x[1] for x in data])\n .astype('int64')\n .reshape(128, 1)\n )\n\n img = to_variable(dy_x_data)\n label = to_variable(y_data)\n label.stop_gradient = True\n\n cost = mnist(img)\n loss = paddle.nn.functional.cross_entropy(\n cost, label, reduction='none', use_softmax=False\n )\n avg_loss = paddle.mean(loss)\n\n dy_out = avg_loss.numpy()\n\n if epoch == 0 and batch_id == 0:\n for param in mnist.parameters():\n dy_param_init_value[param.name] = param.numpy()\n\n with new_program_scope():\n paddle.seed(SEED)\n paddle.framework.random._manual_program_seed(SEED)\n exe = fluid.Executor(\n fluid.CPUPlace()\n if not core.is_compiled_with_cuda()\n else fluid.CUDAPlace(0)\n )\n\n mnist = MNIST()\n sgd = paddle.optimizer.SGD(learning_rate=1e-3)\n train_reader = paddle.batch(\n paddle.dataset.mnist.train(), batch_size=128, drop_last=True\n )\n\n img = paddle.static.data(\n name='pixel', shape=[-1, 1, 28, 28], dtype='float32'\n )\n label = paddle.static.data(\n name='label', shape=[-1, 1], dtype='int64'\n )\n cost = mnist(img)\n loss = paddle.nn.functional.cross_entropy(\n cost, label, reduction='none', use_softmax=False\n )\n avg_loss = paddle.mean(loss)\n\n # initialize params and fetch them\n static_param_init_value = {}\n static_param_name_list = []\n for param in mnist.parameters():\n static_param_name_list.append(param.name)\n\n out = exe.run(\n fluid.default_startup_program(),\n fetch_list=static_param_name_list,\n )\n\n for i in range(len(static_param_name_list)):\n static_param_init_value[static_param_name_list[i]] = out[i]\n\n for epoch in range(epoch_num):\n for batch_id, data in enumerate(train_reader()):\n static_x_data = np.array(\n [x[0].reshape(1, 28, 28) for x in data]\n ).astype('float32')\n y_data = (\n np.array([x[1] for x in data])\n .astype('int64')\n .reshape([128, 1])\n )\n\n fetch_list = [avg_loss.name]\n out = exe.run(\n fluid.default_main_program(),\n feed={\"pixel\": static_x_data, \"label\": y_data},\n fetch_list=fetch_list,\n )\n\n static_out = out[0]\n\n np.testing.assert_allclose(\n dy_x_data.all(), static_x_data.all(), rtol=1e-05\n )\n\n for key, value in static_param_init_value.items():\n np.testing.assert_allclose(\n value, dy_param_init_value[key], rtol=1e-05\n )\n\n np.testing.assert_allclose(static_out, dy_out, rtol=1e-05)\n\n\nif __name__ == '__main__':\n paddle.enable_static()\n unittest.main()\n","sub_path":"test/legacy_test/test_dygraph_multi_forward.py","file_name":"test_dygraph_multi_forward.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"633360880","text":"class Solution:\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype List[List[int]]\n \"\"\"\n res = []\n self.backtrack(sorted(nums), target, 0, 0, res, [])\n return res\n\n def backtrack(self, nums, target, index, count, res, temp):\n if count > 4:\n return \n if count == 4 and sum(temp) == target and temp not in res:\n res.append(list(temp))\n for i in range(index, len(nums)):\n temp.append(nums[i])\n self.backtrack(nums, target, i + 1, count + 1, res, temp)\n temp.pop()\n\n def fourSumV2(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype List[List[int]]\n \"\"\"\n def findNSum(nums, target, N, result, results):\n if len(nums) < N or N < 2 or target < nums[0] * N or target > nums[-1] * N:\n return \n if N == 2:\n l, r = 0, len(nums) - 1\n while l < r:\n s = nums[l] + nums[r]\n if s == target:\n results.append(result + [nums[l], nums[r]])\n l += 1\n while l < r and nums[l] == nums[l - 1]:\n l += 1\n elif s < target:\n l += 1\n else:\n r -= 1\n else:\n for i in range(len(nums) - N + 1):\n if i == 0 or (i > 0 and nums[i - 1] != nums[i]):\n findNSum(nums[i+1:], target - nums[i], N - 1, result + [nums[i]], results)\n\n results = []\n findNSum(sorted(nums), target, 4, [], results)\n return results\n\nif __name__ == \"__main__\":\n solu = Solution()\n nums = [0, 0, 0, 0, 0]\n target = 0\n print(\"Solution1\")\n print(solu.fourSum(nums, target))\n print(\"Solution2\")\n print(solu.fourSumV2(nums, target))","sub_path":"18_4sum/18_4Sum.py","file_name":"18_4Sum.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"391022914","text":"# set the path\nimport os, sys\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom flask_script import Manager, Server\nfrom flask_migrate import Migrate, MigrateCommand\nfrom application import create_app\n\n\ndef run_app():\n app = create_app()\n manager = Manager(app)\n\n # Turn on debugger by default and reloader\n\n manager.add_command(\"runserver\", Server(\n use_debugger=True,\n use_reloader=True,\n host=os.getenv('FLASK_APP_IP', '0.0.0.0'),\n port=int(os.getenv('FLASK_APP_PORT', 5000))\n )\n )\n manager.add_command('db', MigrateCommand)\n\n manager.run()\n\n\nif __name__ == '__main__':\n run_app()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"368310858","text":"from rest_framework.renderers import JSONRenderer\n\n\nclass CustomRenderer(JSONRenderer):\n def render(self, data, accepted_media_type=None, renderer_context=None):\n \"\"\"\n :param data: 返回的数据\n :param accepted_media_type: 接收的类型\n :param renderer_context: 渲染呈现的内容\n \"\"\"\n # 如果有请求数据过来:类似之前的if request.method == \"POST\"\n if renderer_context:\n # 判断返回的数据是否为字典\n if isinstance(data, dict) and all(map(lambda x: x in data, ['code', 'result', 'msg', 'data'])):\n code = data.pop('code', 0)\n result = data.pop('result', True)\n msg = data.pop('msg', '请求成功')\n rp_data = data.pop('data', {})\n else:\n code = 0\n result = True\n msg = '请求成功'\n rp_data = data if data is not None else {}\n\n # 重新构建返回数据的格式\n ret = {\n 'code': code,\n 'result': result,\n 'msg': msg,\n 'data': rp_data\n }\n # 根据父类方式返回数据格式\n return super().render(ret, accepted_media_type, renderer_context)\n else: # 如果没有发生修改则返回原格式数据\n return super().render(data, accepted_media_type, renderer_context)","sub_path":"Application/utils/renderer_response.py","file_name":"renderer_response.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"256185889","text":"import socket\n\ntarget_host = \"127.0.0.1\"\ntarget_port = 8888\n\n#创建一个socket对象\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n#连接客服端\nclient.connect((target_host, target_port))\n\n#发送一些数据\nclient.send(b'Today is 520\\n')\n\n#接收一些数据\nresponse = client.recv(4096)\n\nprint(response)","sub_path":"chapter2/TCP_Client.py","file_name":"TCP_Client.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"623307924","text":"import tkinter as tk\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n def remove(self):\n pass\n\n\nclass AttitudeMonitor(tk.Frame):\n def __init__(self):\n master = tk.Toplevel()\n super(AttitudeMonitor, self).__init__(master)\n master.title(\"Attitude Monitor\")\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n figure = Figure(figsize=(10, 10))\n axis = figure.gca(projection='3d')\n axis.set_xlim(-1, 1)\n axis.set_ylim(-1, 1)\n axis.set_zlim(-1, 1)\n axis.xaxis.set_major_formatter(plt.NullFormatter())\n axis.yaxis.set_major_formatter(plt.NullFormatter())\n axis.zaxis.set_major_formatter(plt.NullFormatter())\n x_arrow = Arrow3D([0, 1], [0, 0], [0, 0], mutation_scale=20, lw=1, arrowstyle=\"-|>\", color=\"blue\")\n y_arrow = Arrow3D([0, 0], [0, 1], [0, 0], mutation_scale=20, lw=1, arrowstyle=\"-|>\", color=\"red\")\n z_arrow = Arrow3D([0, 0], [0, 0], [0, 1], mutation_scale=20, lw=1, arrowstyle=\"-|>\", color=\"yellow\")\n axis.set_aspect('equal')\n axis.add_artist(x_arrow)\n axis.add_artist(y_arrow)\n axis.add_artist(z_arrow)\n canvas = FigureCanvasTkAgg(figure, master=self)\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n def update(self):\n pass\n","sub_path":"attitude.py","file_name":"attitude.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"112640627","text":"import sys\nsys.path.append('./')\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nfrom torchvision import models\nfrom torch.autograd import Variable\nimport argparse\nfrom torchvision import models\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport os\n\nfrom src.MultiTaskCNN1 import DetectCNN\nfrom utils.yoloLoss import yoloLoss\nfrom src.yolo.net import vgg16_bn\nfrom src.yolo.resnet_yolo import resnet50\nfrom utils.yoloDataloader import YoloDataset\nfrom visualize import Visualizer\n\nparser = argparse.ArgumentParser(description='object detection using tiny yolov3')\n# train or test:\nparser.add_argument('ACTION', type=str, help=\"'train' or 'test' the detector.\")\nparser.add_argument('--img-dir', dest='img_dir', type=str, default='../data/samples',\n help=\"The path to the folder containing images to be detected or trained.\")\nparser.add_argument('--batch-size', dest='batch_size', type=int, default=4,\n help=\"The number of sample in one batch during training or inference.\")\nparser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument(\"--img-size\", dest='img_size', type=int, default=416,\n help=\"The size of the image for training or inference.\")\nparser.add_argument('--annot-path', dest='annot_path', type=str, default=None,\n help=\"TRAINING ONLY: The path to the file of the annotations for training.\")\nparser.add_argument('--no-augment', dest='data_augment', action='store_false',\n help=\"TRAINING ONLY: use this option to turn off the data augmentation of the dataset.\"\n \"Currently only COCO dataset support data augmentation.\")\nparser.add_argument('--normalize', action='store_true', default=False,\n help='whether to normolize loss')\n# Yolov4的tricks应用\n# mosaic 马赛克数据增强 True or False,实际测试时mosaic数据增强并不稳定,所以默认为False\nparser.add_argument('--mosaic', action='store_true', default=False,\n help='whether to use mosaic trick')\n# Cosine_scheduler 余弦退火学习率 True or False\nparser.add_argument('--cosine-lr', action='store_true', default=False,\n help='whether to use mosaic trick')\n# label_smoothing 标签平滑 0.01以下一般 如0.01、0.005\nparser.add_argument('--smooth-label', default=0, type=float,\n metavar='SL', help='smooth label,always use 0.01,0.005')\nargs = parser.parse_args()\n\n\n# ---------------------------------------------------#\n# 获得类和先验框\n# ---------------------------------------------------#\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape([-1,3,2])[::-1,:,:]\n\n# 验证集与训练集的划分,通过val_split参数控制划分比例,默认为0.1,即训练集:验证集=9:1\ndef train_val(annotation_path):\n val_split = 0.1\n with open(annotation_path) as f:\n lines = f.readlines()\n np.random.seed(10101)\n np.random.shuffle(lines)\n np.random.seed(None)\n num_val = int(len(lines) * val_split)\n num_train = len(lines) - num_val\n return lines, num_train\n\n# 输入图像的大小\ninput_shape = (448, 448)\n# 是否对损失进行归一化,用于改变loss的大小。用于决定计算最终loss是除上batch_size还是除上正样本数量\nnormalize = False\n# classes和anchor的路径,非常重要,训练前一定要修改classes_path,使其对应自己的数据集\nanchors_path = './data/VOCdevkit/yolo_anchors.txt'\nclasses_path = './data/VOCdevkit/voc_classes.txt'\nclass_names = get_classes(classes_path)\nanchors = get_anchors(anchors_path)\nnum_classes = len(class_names)\ndef train():\n # 记录数据在tensorboard中显示\n writer_loss = SummaryWriter(os.path.join(args.summary_dir, 'loss'))\n\n train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)\n\n\n","sub_path":"detect_main.py","file_name":"detect_main.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"158876620","text":"from django.conf.urls import patterns, include, url\nfrom socialnetwork.forms import LoginForm\nfrom django.views.generic.base import TemplateView\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = patterns('',\n url(r'^$', 'socialnetwork.home_views.home', name='home'),\n url(r'^followers$', 'socialnetwork.home_views.home_followers', name='home-followers'),\n url(r'^register$', 'socialnetwork.register_views.register_user', name='register'),\n url(r'^makepost-page$', 'socialnetwork.post_views.makepost_page', name='makepost-page'),\n url(r'^make-a-post$', 'socialnetwork.post_views.make_a_post', name='make-a-post'),\n url(r'^update-post/(?P\\d+)$', 'socialnetwork.post_views.update_posts', name='update-post'),\n url(r'^follow/(?P\\w+)$', 'socialnetwork.buttons_views.follow', name='follow'),\n url(r'^unfollow/(?P\\w+)$', 'socialnetwork.buttons_views.unfollow', name='unfollow'),\n url(r'^edit-profile$', 'socialnetwork.profile_views.edit_profile', name='edit-profile'),\n url(r'^edit-profile-img$', 'socialnetwork.profile_views.edit_profile_img',\n name='edit-profile-img'),\n url(r'^edit-profile-pw$', 'socialnetwork.profile_views.edit_profile_pw', name='edit-profile-pw'),\n url(r'^edit-profile-info$', 'socialnetwork.profile_views.edit_profile_info',\n name='edit-profile-info'),\n url(r'^view-profile/(?P\\w+)$', 'socialnetwork.profile_views.view_profile_user',\n name='view-profile-user'),\n url(r'^get-comments/(?P\\d+)$', 'socialnetwork.comments_views.get_comments',\n name='get-comments'),\n url(r'^add-comment/(?P\\d+)$', 'socialnetwork.comments_views.add_comment',\n name='add-comment'),\n url(r'^login$', 'django.contrib.auth.views.login',\n {'template_name': 'socialnetwork/login.html',\n 'authentication_form': LoginForm},\n name='login'),\n url(r'^logout$', 'django.contrib.auth.views.logout_then_login', name='logout'),\n ) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"hw5/socialnetwork/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"626582050","text":"from django.db.models import query\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\nfrom moduls.lists.models import List\nfrom moduls.lists.serializer import SerializarCreateList, GetSerializerList, AuxSerializerCards\nfrom moduls.lists.permissions import ListPermissions\nfrom rest_framework import status\nfrom rest_framework.decorators import action\n# Create your views here.\n\nclass ListModelViewSet(ModelViewSet):\n queryset = List.objects.all()\n serializer_class = SerializarCreateList\n permission_classes = [ListPermissions]\n \n def get_queryset(self):\n data={}\n for k, v in self.request.query_params.items():\n data[k] = v\n return self.queryset.filter(**data)\n \n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serialized = GetSerializerList(queryset, many=True)\n return Response(status=status.HTTP_200_OK, data=serialized.data)\n\n @action(methods=['GET'], detail=True)\n def cards(self, request, pk):\n board = request.data.get('board')\n if not board:\n board = request.query_params.get('board')\n if not board:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n board = request.query_params.get('board')[0]\n\n queryset = self.get_queryset().filter(board=board, id=pk)\n\n if not queryset.exists():\n return Response(status=status.HTTP_404_NOT_FOUND)\n cards = queryset[0].card_set.all()\n serialized = AuxSerializerCards(cards, many=True)\n return Response(status=status.HTTP_200_OK, data=serialized.data)\n\n @action(methods=['PUT'], detail=True)\n def order(self, request, pk):\n board = request.data.get('board')\n new_position = request.data.get('order')\n\n try:\n board = int(board)\n new_position = int(new_position)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n if new_position < 1:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n queryset = self.get_queryset().filter(board=board)\n\n if not queryset.exists():\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n list_data = queryset.filter(id=pk) \n if not list_data.exists():\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n old_position = list_data[0].position\n\n if old_position == new_position:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n if new_position < old_position:\n for list in queryset:\n if list.position < new_position or list.position >= old_position:\n continue\n queryset.filter(id=list.id).update(position=list.position + 1)\n list_data.update(position=new_position)\n\n if new_position > old_position:\n for list in queryset:\n if list.position <= old_position or list.position > new_position:\n continue\n queryset.filter(id=list.id).update(position=list.position - 1)\n list_data.update(position=new_position)\n return Response(status=status.HTTP_200_OK)\n","sub_path":"moduls/lists/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115624751","text":"#!/usr/bin/env python3\n\n'''Contains all requests that can be made to the SPAMD service.'''\n\nfrom aiospamc.common import RequestResponseBase\n\n\nclass Request(RequestResponseBase):\n '''SPAMC request object.\n\n Attributes\n ----------\n verb : :obj:`str`\n Method name of the request.\n version : :obj:`str`\n Protocol version.\n body : :obj:`str` or :obj:`bytes`\n String representation of the body. An instance of the\n :class:`aiospamc.headers.ContentLength` will be automatically added.\n '''\n\n def __init__(self, verb, version='1.5', headers=None, body=None):\n '''Request constructor.\n\n Parameters\n ----------\n verb : :obj:`str`\n Method name of the request.\n version: :obj:`str`\n Version of the protocol.\n body : :obj:`str` or :obj:`bytes`, optional\n String representation of the body. An instance of the\n :class:`aiospamc.headers.ContentLength` will be automatically added.\n headers : tuple of :class:`aiospamc.headers.Header`, optional\n Collection of headers to be added. If it contains an instance of\n :class:`aiospamc.headers.Compress` then the body is automatically\n compressed.\n '''\n\n self.verb = verb\n self.version = version\n super().__init__(body, headers)\n\n def __bytes__(self):\n if self._compressed_body:\n body = self._compressed_body\n elif self.body:\n body = self.body.encode()\n else:\n body = b''\n\n request = (b'%(verb)b '\n b'SPAMC/%(version)b'\n b'\\r\\n'\n b'%(headers)b\\r\\n'\n b'%(body)b')\n\n return request % {b'verb': self.verb.encode(),\n b'version': self.version.encode(),\n b'headers': b''.join(map(bytes, self._headers.values())),\n b'body': body}\n","sub_path":"aiospamc/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463956528","text":"print('Enter the two numbers seperated by space.')\nnumList = list(map(int, input().split()))\n\ntotal = 0\nfor x in numList:\n if x > 0:\n total += 1\n\nif total == 1:\n print('Yes')\nelse:\n print('No')","sub_path":"practice/num+ve-ve.py","file_name":"num+ve-ve.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"156488336","text":"from django.shortcuts import render\nfrom backManage import models\nfrom backManage import forms\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect,reverse\nfrom django.views.generic.base import View\n\ndef mylogin(request):\n if request.method == 'GET':\n print(\"reviewManage: 我是mylogin的GET\")\n judge_username = request.COOKIES.get('judge_username', '')\n return render(request, 'login-review.html', locals())\n elif request.method == 'POST':\n # 获取表单的数据\n print(\"reviewManage: 我是mylogin的POST\")\n judge_username = request.POST.get('judge_username', '')\n password = request.POST.get('password', '')\n print(judge_username, \"--\", password)\n# # 验证用户名,密码是否正确\n try:\n print(\"我进入mylogin的try里来了\")\n user = models.Judge.objects.get(judge_username = judge_username,\n password = password)\n print(user)\n print(\"我存在数据库中\")\n # 在当前连接的Session中记录当前用户的信息\n request.session['userinfo'] = {\n \"judge_username\": user.judge_username,\n }\n except:\n #登录失败\n print(\"我登录失败了\")\n return render(request, 'login-review.html', locals())\n # 处理COOKIES\n print(\"我要跳转到login-review.html\")\n resp = redirect(reverse('review:rmain'))\n resp.set_cookie('judge_username', judge_username, 5*24*60*60)\n print(\"我已经设置了cookie\")\n return resp\n\ndef review_main(request):\n session_review = request.session.get('userinfo', '')\n if session_review:\n print(\"session_review\", session_review)\n judge_username = session_review['judge_username']\n user = models.Judge.objects.get(judge_username = judge_username)\n judge_name = user.judge_name\n return render(request, \"index-review.html\", locals())\n else:\n print(\"我没有session\")\n return redirect(reverse('review:rlogin'))\n\ndef no_review_work(request):\n session_review = request.session.get('userinfo', '')\n judge_username = session_review['judge_username']\n judge = models.Judge.objects.get(judge_username = judge_username)\n scores = models.Score.objects.filter(judge = judge, judge_is_review = \"否\")\n print(scores)\n if scores:\n works = [s.work for s in scores]\n teams = [w.work_id for w in works]\n else:\n works = []\n if request.method == \"GET\":\n return render(request, 'work-no-review.html', locals())\n elif request.method == \"POST\":\n return HttpResponse(\"此界面无POST方法.\")\n\ndef judge_score(request, work_id):\n session_review = request.session.get('userinfo', '')\n judge_username = session_review['judge_username']\n judge = models.Judge.objects.get(judge_username = judge_username)\n team = models.Team.objects.get(work_id = work_id)\n work = models.Work.objects.get(work_id = team)\n score = models.Score.objects.get(work = work, judge = judge)\n print(score)\n print(work_id)\n is_submit_judge = score.judge_is_review\n if request.method == \"GET\":\n return render(request, \"judge-score.html\", locals())\n elif request.method == \"POST\":\n workScoreForm = forms.WorkScoreForm(request.POST, request.FILES)\n if workScoreForm.is_valid():\n score_ponit = request.POST.get(\"score_ponit\", \"\")\n my_score = request.POST.get(\"score\", \"\")\n score.judge_score = int(my_score)\n score.judge_detail = score_ponit\n score.judge_is_review = \"是\"\n is_submit_judge = score.judge_is_review\n \n score.save()\n judge_detail = score_ponit\n judge_score = score\n return render(request, \"work-no-review.html\", locals())\n else:\n file_error = forms.get_errors(workScoreForm)\n print(workScoreForm.errors.get_json_data())\n return render(request, 'judge-score.html', locals()) \n \ndef score_temp(request, work_id):\n session_review = request.session.get('userinfo', '')\n judge_username = session_review['judge_username']\n judge = models.Judge.objects.get(judge_username = judge_username)\n team = models.Team.objects.get(work_id = work_id)\n work = models.Work.objects.get(work_id = team)\n score = models.Score.objects.get(work = work, judge = judge)\n \n is_submit_judge = \"否\"\n \n if request.method == \"POST\":\n workScoreForm = forms.WorkScoreForm(request.POST, request.FILES)\n if workScoreForm.is_valid():\n score_ponit = request.POST.get(\"score_ponit\", \"\")\n score = request.POST.get(\"score\", \"\")\n score.judge_score = score\n score.judge_detail = score_ponit\n work.save()\n judge_detail = score_ponit\n judge_score = score\n return render(request, \"work-no-review.html\", locals())\n else:\n file_error = forms.get_errors(workScoreForm)\n print(workScoreForm.errors.get_json_data())\n return render(request, 'judge-score.html', locals()) \n\ndef already_review_work(request):\n session_review = request.session.get('userinfo', '')\n judge_username = session_review['judge_username']\n judge = models.Judge.objects.get(judge_username = judge_username)\n scores = models.Score.objects.filter(judge = judge, judge_is_review = \"是\")\n works = [s.work for s in scores]\n teams = [w.work_id for w in works]\n if request.method == \"GET\":\n return render(request, 'work-already-review.html', locals())\n elif request.method == \"POST\":\n return HttpResponse(\"此界面无POST方法.\")\n\n\ndef judge_already_score(request, work_id):\n session_review = request.session.get('userinfo', '')\n judge_username = session_review['judge_username']\n judge = models.Judge.objects.get(judge_username = judge_username)\n team = models.Team.objects.get(work_id = work_id)\n work = models.Work.objects.get(work_id = team)\n score = models.Score.objects.get(work = work, judge = judge)\n score_ponit = score.judge_detail\n score = score.judge_score\n work_score = models.Score.objects.filter(work = work)\n score_list = [s.judge_score for s in work_score]\n score_list_limit = [int(sl) for sl in score_list if sl != \"0\"]\n ave_score = round(sum(score_list_limit)/len(score_list_limit), 2)\n return render(request, 'judge-score-details.html', locals())\n\ndef mylogout(request):\n if 'userinfo' in request.session:\n del request.session['userinfo']\n return redirect(reverse('review:rlogin'))\n ","sub_path":"reviewManage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577353223","text":"from flask import Flask, request, abort\nimport csv\nimport sys\nimport random\nimport re\nimport pandas as pd\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\nimport os\n\napp = Flask(__name__)\n\nLINE_BOT_API = os.environ[\"LINE_BOT_API\"]\nCHANNEL_SECRET = os.environ[\"CHANNEL_SECRET\"]\nMY_USER_ID = os.environ[\"MY_USER_ID\"]\nline_bot_api = LineBotApi(LINE_BOT_API)\nhandler = WebhookHandler(CHANNEL_SECRET)\nmy_user_id = MY_USER_ID\n\ndef main():\n word_data = pd.read_csv(\"word.csv\").values.tolist()\n target_word = random.choice(word_data)\n\n messages = TextSendMessage(text=target_word[0])\n line_bot_api.push_message(my_user_id, messages=messages)\n\n messages = TextSendMessage(text=target_word[1])\n line_bot_api.push_message(my_user_id, messages=messages)\n\n@app.route('/')\ndef hello():\n name = \"Hello World\"\n return name\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n\n return 'OK'\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"303982433","text":"import codecs\n\nfile = codecs.open(\"/Users/zhenghaodeng/Desktop/Q3/ALY6140/W5/Capstone/winequality-red.csv\", 'r', 'utf-8')\ndata = file.readlines()\n\nfor i in range(len(data)):\n if i == 0:\n \t# strip 只能删除【首尾】的指定字符\n data[0] = [item.strip('\"') for item in data[i].split(';')]\n # 注意到最后还有一个换行符,对最后一个串额外 strip\n data[0][len(data[0]) - 1] = data[0][len(data[0]) - 1].strip('\"\\n')\n continue\n data[i] = [float(item) for item in data[i].split(';')]\n\nwine_df = pd.DataFrame(columns=data[0], data=data[1:])\nwine_df.drop_duplicates(inplace=True)\n\nprint(wine_df.head())\nprint(wine_df.describe())\n\n''' 统计各品质占比的代码可参考这个 '''\n# 分离特征与标签\nfeatures = wine_df.drop('quality', 1)\nlabels = wine_df['quality']\n\nl = len(labels)\nprint('Statistic info of wine quality is as follows:')\nfor i in range(7):\n print('Quality = %d: %.3f%%' % (i + 3, labels[labels == i + 3].count() / l * 100))\n\nprint('Quality between 5~7: %.3f%%' % (labels[(labels < 8) & (labels > 4)].count() / l * 100))\n\n''' 统计特征取值个数的可参考这个 '''\ncol = wine_df.columns\n\nunique_value = [len(wine_df[col[i]].unique()) for i in range(len(col))]\nprint(pd.DataFrame(data=unique_value, index=col, columns=['unique value']))\n\n'''数据集划分'''\nfrom sklearn.model_selection import train_test_split\n# 测试集大小占 20%;random_state 设置为 0 表示完全随机,默认参数下不管运行几次划分结果都是一样的(伪随机)\nfeatures_train, features_test, labels_train, labels_test = train_test_split(\n features, labels, test_size=0.2, random_state=0)\n\nprint(features_train.shape)\nprint(features_test.shape)\n\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nsp = SelectKBest(chi2, k=9)\nfeatures_train_selected = sp.fit_transform(features_train, labels_train)\n# 你会发现如果你在此之前还没有划分数据集,那么下面这一行可以省略,只是结果可能会稍微不同\nfeatures_test_selected = sp.transform(features_test)\n\nprint(features_train_selected.shape)\nprint(features_test_selected.shape)\n# 下面这三行纯粹是为了看到底剔除了哪些变量,如这里是倒数第三和倒数第四个被剔除了\nprint(sp.scores_)\nprint(sp.get_params())\nprint(sp.get_support())\n\n\n# drop fliers via LOF\ndef dropFliers(features, labels, threshold):\n from sklearn.neighbors import LocalOutlierFactor as LOF\n\n lof = LOF(contamination=threshold).fit(features)\n r_features = features[lof.negative_outlier_factor_ > lof.threshold_]\n r_labels = labels[lof.negative_outlier_factor_ > lof.threshold_]\n return r_features, r_labels\n\n\nr_features_train_selected, r_labels_train = dropFliers(features_train_selected, labels_train, 0.1)\nr_features_test_selected, r_labels_test = dropFliers(features_test_selected, labels_test, 0.1)\n\nprint(r_features_train_selected.shape)\nprint(r_features_test_selected.shape)\n\n\n'''默认参数训练结果'''\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\ndef clf(estimator, features_train, features_test, labels_train, labels_test):\n estimator.fit(features_train, labels_train)\n # 准确率\n print('Accuracy: %.3f%%' % (estimator.score(features_test, labels_test) * 100))\n\t# 混淆矩阵\n cm = metrics.confusion_matrix(labels_test, estimator.predict(features_test))\n print(cm)\n # 输出完整的分类结果\n print(metrics.classification_report(labels_test, estimator.predict(features_test)))\n plt.figure()\n plt.xlabel('Predicted labels')\n plt.ylabel('True labels')\n plt.imshow(cm)\n\nrf = RandomForestClassifier()\nprint('\\nTraining result for RandomForest:')\nclf(rf, features_train_norm, features_test_norm, r_labels_train, r_labels_test)\nada = AdaBoostClassifier()\nprint('\\nTraining result for AdaBoost:')\nclf(ada, features_train_norm, features_test_norm, r_labels_train, r_labels_test)\n","sub_path":"whole.py","file_name":"whole.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"129499714","text":"from sklearn.neighbors import KernelDensity\r\nimport numpy as np\r\nimport sklearn.neighbors.kde as kde\r\nimport numpy as np\r\nimport scipy.integrate as integrate\r\nimport random\r\n\r\nimport itertools\r\nimport math\r\nimport collections\r\n\r\n\r\nclass KD32:\r\n def __init__(self, \r\n window_size=45, \r\n window_queue_size=10,\r\n accumulative_threshold=0.00001, \r\n detection_threshold=0.0001,\r\n kernel=\"gaussian\",\r\n bandwidth=0.5\r\n ):\r\n\r\n self.kernel =kernel\r\n self.bandwidth = bandwidth\r\n self.data_point_sequence = []\r\n self.data_raw = []\r\n\r\n self.diff_sequence = [] \r\n self.diff_sum_sequence = [] \r\n self.drift_detected_seq_nums = [] \r\n\r\n self.window_size = window_size\r\n self.accumulative_threshold = accumulative_threshold\r\n self.accumulative_threshold_to_detect = detection_threshold\r\n\r\n self.current_diff = 0 # may not need to be a memeber\r\n self.diff_sum = 0\r\n\r\n\r\n\r\n def calculate_windows_bound(self, window_1, window_2):\r\n window_1 = window_1+window_2\r\n #calculate windows bound\r\n bounds = []\r\n bounds.append(min(window_1))\r\n bounds.append(max(window_1))\r\n return bounds\r\n\r\n\r\n def add_element(self, data_points):\r\n # data_points must be an iterable\r\n #print(data_points);\r\n self.data_point_sequence.extend([data_points])\r\n #self.data_raw.extend(data_raw) \r\n \r\n def integrate_distance(self, kde1,kde2,bound):\r\n chk1,e1 = integrate.quad(lambda x: np.exp(kde1.score_samples(np.array([x]).reshape(-1,1)))[0], bound[0], bound[1])\r\n chk2,e2 = integrate.quad(lambda x: np.exp(kde2.score_samples(np.array([x]).reshape(-1,1)))[0], bound[0], bound[1])\r\n diff = chk1 - chk2\r\n # diff = diff * diff\r\n diff = np.absolute(diff)\r\n return diff\r\n\r\n def estimate_kde(self,samples,kernel='gaussian',bandwidth=0.5):\r\n\r\n kde_estimator = KernelDensity()\r\n kde_estimator.fit(samples)\r\n return kde_estimator\r\n\r\n\r\n\r\n # Return tuple (is_drift_detected, current_diff, diff_sum)\r\n def detected_change(self):\r\n \r\n sequence_size = len(self.data_point_sequence)\r\n \r\n # Not enough data points for two windows\r\n if (sequence_size < 2 * self.window_size):\r\n diff = 0\r\n self.diff_sequence.append(diff)\r\n self.diff_sum_sequence.append(self.diff_sum)\r\n return False\r\n #check wheter data is enough for 1 window??\r\n if (sequence_size % self.window_size != 0):\r\n return False\r\n\r\n #return True\r\n window_2_left_bound = sequence_size - self.window_size\r\n window_1_left_bound = sequence_size - 2 * self.window_size\r\n\r\n window_1 = self.data_point_sequence[window_1_left_bound: window_2_left_bound]\r\n window_2 = self.data_point_sequence[window_2_left_bound: ]\r\n\r\n #Calculate the bound form window 1\r\n bounds = self.calculate_windows_bound(window_1, window_2)\r\n window_1 = np.reshape(window_1, (len(window_1),-1))\r\n window_2 = np.reshape(window_2,(len(window_1),-1))\r\n\r\n kde_window_1 = self.estimate_kde(window_1,self.kernel,self.bandwidth)\r\n kde_window_2 = self.estimate_kde(window_2,self.kernel,self.bandwidth)\r\n \r\n \r\n diff = self.integrate_distance(kde_window_1, kde_window_2, bounds)\r\n \r\n\r\n \r\n if (diff > self.accumulative_threshold):\r\n self.diff_sum = self.diff_sum + diff\r\n\r\n self.diff_sequence.append(diff)\r\n \r\n\r\n is_drift_detected = False\r\n if (self.diff_sum >= self.accumulative_threshold_to_detect):\r\n # Check whether this happened when diff was rising (rather than falling)\r\n self.drift_detected_seq_nums.append(sequence_size )\r\n self.diff_sum = 0\r\n is_drift_detected = True\r\n\r\n return is_drift_detected\r\n\r\n\r\n","sub_path":"detector/kd32.py","file_name":"kd32.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160255336","text":"import sys\nsys.path.append('.')\nfrom __init__ import Tr, g0, P_sat_G, omega, lamda, deltaOmega, calcEnergyInOneStepSize\n\ndef G(u):\n \"\"\"\n Gain corresponding to each frequency\n \"\"\"\n energyInOneStepSize = calcEnergyInOneStepSize(u) # energy in one step size (J)\n P_ave = energyInOneStepSize/Tr\n g_ = g0/(1+P_ave/P_sat_G)\n\n # Lorentzian lineshape\n g = g_*(deltaOmega)**2/(omega**2+(deltaOmega)**2)\n\n return g\n","sub_path":"python program/G.py","file_name":"G.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"564360988","text":"#################################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#################################################################################\nfrom pyomo.environ import (\n ConcreteModel,\n value,\n TransformationFactory,\n units as pyunits,\n)\nimport idaes.logger as idaeslog\nfrom pyomo.network import Arc, SequentialDecomposition\nfrom pyomo.util.check_units import assert_units_consistent\n\nfrom idaes.core import FlowsheetBlock\nfrom idaes.core.solvers import get_solver\nfrom idaes.models.unit_models import Product\nimport idaes.core.util.scaling as iscale\nfrom idaes.core import UnitModelCostingBlock\n\nfrom watertap.core.util.initialization import assert_degrees_of_freedom, check_solve\n\nfrom watertap.core.wt_database import Database\nimport watertap.core.zero_order_properties as prop_ZO\nfrom watertap.unit_models.zero_order import (\n FeedZO,\n WaterPumpingStationZO,\n CoagulationFlocculationZO,\n SedimentationZO,\n OzoneZO,\n FixedBedZO,\n GACZO,\n UVZO,\n IonExchangeZO,\n ChlorinationZO,\n StorageTankZO,\n)\nfrom watertap.core.zero_order_costing import ZeroOrderCosting\n\n# Set up logger\n_log = idaeslog.getLogger(__name__)\n\n\ndef main():\n m = build()\n\n set_operating_conditions(m)\n assert_degrees_of_freedom(m, 0)\n\n initialize_system(m) # initialization needed for ozone unit\n\n results = solve(m, checkpoint=\"solve flowsheet after initializing system\")\n display_results(m)\n\n add_costing(m)\n initialize_costing(m)\n assert_degrees_of_freedom(m, 0)\n assert_units_consistent(m)\n\n results = solve(m, checkpoint=\"solve flowsheet with costing\")\n display_costing(m)\n return m, results\n\n\ndef build():\n # flowsheet set up\n m = ConcreteModel()\n m.db = Database()\n\n m.fs = FlowsheetBlock(dynamic=False)\n m.fs.prop = prop_ZO.WaterParameterBlock(solute_list=[\"tds\", \"tss\", \"toc\"])\n\n # unit models\n m.fs.feed = FeedZO(property_package=m.fs.prop)\n m.fs.intake_pump = WaterPumpingStationZO(\n property_package=m.fs.prop, database=m.db, process_subtype=\"raw\"\n )\n m.fs.coag_and_floc = CoagulationFlocculationZO(\n property_package=m.fs.prop, database=m.db\n )\n m.fs.sedimentation = SedimentationZO(property_package=m.fs.prop, database=m.db)\n m.fs.ozonation = OzoneZO(property_package=m.fs.prop, database=m.db)\n m.fs.gravity_basin = FixedBedZO(\n property_package=m.fs.prop, database=m.db, process_subtype=\"gravity_basin\"\n )\n m.fs.gac = GACZO(\n property_package=m.fs.prop, database=m.db, process_subtype=\"pressure_vessel\"\n )\n m.fs.backwash_pump = WaterPumpingStationZO(\n property_package=m.fs.prop, database=m.db, process_subtype=\"treated\"\n )\n m.fs.uv = UVZO(property_package=m.fs.prop, database=m.db)\n m.fs.anion_exchange = IonExchangeZO(\n property_package=m.fs.prop, database=m.db, process_subtype=\"anion_exchange\"\n )\n m.fs.chlorination = ChlorinationZO(property_package=m.fs.prop, database=m.db)\n m.fs.storage = StorageTankZO(property_package=m.fs.prop, database=m.db)\n m.fs.recharge_pump = WaterPumpingStationZO(\n property_package=m.fs.prop, database=m.db, process_subtype=\"treated\"\n )\n m.fs.product = Product(property_package=m.fs.prop)\n\n # connections\n m.fs.s01 = Arc(source=m.fs.feed.outlet, destination=m.fs.intake_pump.inlet)\n m.fs.s02 = Arc(source=m.fs.intake_pump.outlet, destination=m.fs.coag_and_floc.inlet)\n m.fs.s03 = Arc(\n source=m.fs.coag_and_floc.outlet, destination=m.fs.sedimentation.inlet\n )\n m.fs.s04 = Arc(source=m.fs.sedimentation.treated, destination=m.fs.ozonation.inlet)\n m.fs.s05 = Arc(source=m.fs.ozonation.treated, destination=m.fs.gravity_basin.inlet)\n m.fs.s06 = Arc(source=m.fs.gravity_basin.treated, destination=m.fs.gac.inlet)\n m.fs.s07 = Arc(source=m.fs.gac.treated, destination=m.fs.uv.inlet)\n m.fs.s08 = Arc(source=m.fs.gac.byproduct, destination=m.fs.backwash_pump.inlet)\n m.fs.s09 = Arc(source=m.fs.uv.treated, destination=m.fs.anion_exchange.inlet)\n m.fs.s10 = Arc(\n source=m.fs.anion_exchange.treated, destination=m.fs.chlorination.inlet\n )\n m.fs.s11 = Arc(source=m.fs.chlorination.treated, destination=m.fs.storage.inlet)\n m.fs.s12 = Arc(source=m.fs.storage.outlet, destination=m.fs.recharge_pump.inlet)\n m.fs.s13 = Arc(source=m.fs.recharge_pump.outlet, destination=m.fs.product.inlet)\n TransformationFactory(\"network.expand_arcs\").apply_to(m)\n\n # scaling\n iscale.calculate_scaling_factors(m)\n\n return m\n\n\ndef set_operating_conditions(m):\n # ---specifications---\n # feed\n flow_vol = 0.9224 * pyunits.m**3 / pyunits.s\n conc_mass_tds = 0.63 * pyunits.kg / pyunits.m**3\n conc_mass_tss = 0.006525 * pyunits.kg / pyunits.m**3\n conc_mass_toc = 0.004 * pyunits.kg / pyunits.m**3\n\n m.fs.feed.flow_vol[0].fix(flow_vol)\n m.fs.feed.conc_mass_comp[0, \"tds\"].fix(conc_mass_tds)\n m.fs.feed.conc_mass_comp[0, \"tss\"].fix(conc_mass_tss)\n m.fs.feed.conc_mass_comp[0, \"toc\"].fix(conc_mass_toc)\n solve(m.fs.feed, checkpoint=\"solve feed block\")\n\n # intake pump\n m.fs.intake_pump.load_parameters_from_database()\n m.fs.intake_pump.electricity.fix(93.2)\n\n # coagulation and flocculation\n m.fs.coag_and_floc.load_parameters_from_database(use_default_removal=True)\n\n # sedimentation\n m.fs.sedimentation.load_parameters_from_database(use_default_removal=True)\n\n # # ozonation\n m.fs.ozonation.load_parameters_from_database(use_default_removal=True)\n\n # fixed bed gravity basin\n m.fs.gravity_basin.load_parameters_from_database(use_default_removal=True)\n\n # granular activated carbon\n m.fs.gac.load_parameters_from_database(use_default_removal=True)\n\n # backwash pump\n m.fs.backwash_pump.load_parameters_from_database()\n m.fs.backwash_pump.electricity.fix(37.3)\n\n # uv aop\n m.fs.uv.load_parameters_from_database(use_default_removal=True)\n m.fs.uv.uv_reduced_equivalent_dose.fix(200)\n m.fs.uv.uv_transmittance_in.fix(0.90)\n\n # anion exchange\n m.fs.anion_exchange.load_parameters_from_database(use_default_removal=True)\n m.fs.anion_exchange.removal_frac_mass_comp[0, \"tds\"].fix(0.9)\n\n # chlorination\n m.fs.chlorination.load_parameters_from_database(use_default_removal=True)\n\n # storage\n m.fs.storage.load_parameters_from_database(use_default_removal=True)\n m.fs.storage.storage_time.fix(6)\n\n # recharge pump\n m.fs.recharge_pump.load_parameters_from_database()\n m.fs.recharge_pump.electricity.fix(186.4)\n\n\ndef initialize_system(m):\n seq = SequentialDecomposition()\n seq.options.tear_set = []\n seq.options.iterLim = 1\n seq.run(m, lambda u: u.initialize())\n\n\ndef solve(blk, solver=None, checkpoint=None, tee=False, fail_flag=True):\n if solver is None:\n solver = get_solver()\n results = solver.solve(blk, tee=tee)\n check_solve(results, checkpoint=checkpoint, logger=_log, fail_flag=fail_flag)\n return results\n\n\ndef display_results(m):\n unit_list = [\n \"feed\",\n \"intake_pump\",\n \"coag_and_floc\",\n \"sedimentation\",\n \"ozonation\",\n \"gravity_basin\",\n \"gac\",\n \"backwash_pump\",\n \"uv\",\n \"anion_exchange\",\n \"chlorination\",\n \"storage\",\n \"recharge_pump\",\n \"product\",\n ]\n\n for u in unit_list:\n m.fs.component(u).report()\n\n\ndef add_costing(m):\n m.fs.costing = ZeroOrderCosting()\n # typing aid\n costing_kwargs = {\"flowsheet_costing_block\": m.fs.costing}\n m.fs.intake_pump.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.coag_and_floc.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.sedimentation.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.ozonation.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.gravity_basin.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.gac.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.backwash_pump.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.uv.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.anion_exchange.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.chlorination.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.storage.costing = UnitModelCostingBlock(**costing_kwargs)\n m.fs.recharge_pump.costing = UnitModelCostingBlock(**costing_kwargs)\n\n m.fs.costing.cost_process()\n m.fs.costing.add_electricity_intensity(m.fs.product.properties[0].flow_vol)\n m.fs.costing.add_LCOW(m.fs.product.properties[0].flow_vol)\n\n\ndef initialize_costing(m):\n m.fs.costing.initialize()\n\n\ndef display_costing(m):\n\n m.fs.costing.total_capital_cost.display()\n m.fs.costing.total_operating_cost.display()\n m.fs.costing.LCOW.display()\n\n print(\"\\nUnit Capital Costs\\n\")\n for u in m.fs.costing._registered_unit_costing:\n print(\n u.name,\n \" : \",\n value(pyunits.convert(u.capital_cost, to_units=pyunits.USD_2018)),\n )\n\n print(\"\\nUtility Costs\\n\")\n for f in m.fs.costing.used_flows:\n print(\n f,\n \" : \",\n value(\n pyunits.convert(\n m.fs.costing.aggregate_flow_costs[f],\n to_units=pyunits.USD_2018 / pyunits.year,\n )\n ),\n )\n\n print(\"\")\n total_capital_cost = value(\n pyunits.convert(m.fs.costing.total_capital_cost, to_units=pyunits.MUSD_2018)\n )\n print(f\"Total Capital Costs: {total_capital_cost:.2f} M$\")\n total_operating_cost = value(\n pyunits.convert(\n m.fs.costing.total_operating_cost, to_units=pyunits.MUSD_2018 / pyunits.year\n )\n )\n print(f\"Total Operating Costs: {total_operating_cost:.2f} M$/year\")\n electricity_intensity = value(\n pyunits.convert(\n m.fs.costing.electricity_intensity, to_units=pyunits.kWh / pyunits.m**3\n )\n )\n print(f\"Electricity Intensity: {electricity_intensity:.4f} kWh/m^3\")\n LCOW = value(\n pyunits.convert(m.fs.costing.LCOW, to_units=pyunits.USD_2018 / pyunits.m**3)\n )\n print(f\"Levelized Cost of Water: {LCOW:.4f} $/m^3\")\n\n\nif __name__ == \"__main__\":\n m, results = main()\n","sub_path":"watertap/examples/flowsheets/case_studies/municipal_treatment/municipal_treatment.py","file_name":"municipal_treatment.py","file_ext":"py","file_size_in_byte":10747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"621048826","text":"import string\nimport random\n\n\nSTRINGS = string.ascii_letters + string.digits + \"+/.\"\n\n\nclass IdentityService(object):\n \"\"\"Identity service provides a service for querying an identity\n for a user given by a payload from the database or generate a new\n one if not already exists.\n \"\"\"\n\n def __init__(self, redis_conn, ident_size):\n self.redis_conn = redis_conn\n self.ident_size = ident_size\n\n def _get_key(self, **kwargs):\n return \"services.identity:%s\" % (\n (\",\".join(\"%s=%s\" % (k, v) for k, v in sorted(kwargs.items()))),\n )\n\n def identity_for(self, **kwargs):\n \"\"\"Query the identity for user matching :param:`kwargs` payload\n or generate a new one if not exists.\n\n :param payload: Payload to identify this rate limit.\n \"\"\"\n key = self._get_key(**kwargs)\n ident = self.redis_conn.get(key)\n\n if ident is not None:\n return ident.decode(\"utf-8\")\n\n ident = \"\".join(random.choice(STRINGS) for x in range(self.ident_size))\n self.redis_conn.setnx(key, ident)\n self.redis_conn.expire(key, 86400)\n return ident\n","sub_path":"fanboi2/services/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"60530627","text":"#!/usr/bin/env python\n\nimport logging,sys,json\nfrom ambx.ambx import AMBX\nfrom ambx.ambx import PKT_HEADER,Lights,SET_LIGHT_COLOR\nfrom ambx.ByteArray import B,Bhex\n\nlight_names = {\n\"left\": Lights.LEFT,\n\"right\": Lights.RIGHT,\n\"wwleft\": Lights.WWLEFT,\n\"wwcenter\": Lights.WWCENTER,\n\"wwright\": Lights.WWRIGHT,\n}\n\nlogging.basicConfig(level=logging.DEBUG,\n datefmt='%a, %d %b %Y %H:%M:%S',\n format='%(asctime)s %(levelname)-8s %(name)-8s %(message)s',\n stream=sys.stdout)\n\nlogger = logging.getLogger(\"light\")\n\ndev = AMBX(0)\n\ninData = sys.stdin.readline();\n\naction = json.loads(inData);\n\nfor item in action:\n\tdev.set_color_rgb8(light_names[item], action[item])\n\nsys.exit(0)","sub_path":"bin/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"472050823","text":"\nfrom app.models import User\nfrom flask.helpers import flash\nfrom app.users.forms import EditUserForm\nfrom flask import Blueprint\nfrom flask.globals import request\nfrom flask.json import jsonify\nfrom flask.templating import render_template\nfrom flask_login.utils import login_required\nimport app.database.users_db as u_manage\nfrom flask_restful import abort\nusers_bp = Blueprint('users', __name__)\n\n\n@users_bp.route(\"/\")\n@login_required\ndef users():\n u_data = u_manage.get_users()\n u_data = list(u_data.values())\n return render_template(\"users/users.jinja\", user_data=u_data)\n\n\n@users_bp.route(\"/get\", methods=[\"GET\", \"POST\"])\ndef home():\n if request.method == \"GET\":\n return jsonify(u_manage.get_users())\n if request.method == \"POST\":\n u_manage.add_user(request.form)\n return (\"\", 204)\n\n\n@users_bp.route(\"/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\n@login_required\ndef user_info(username):\n user_dict = u_manage.get_user(username)\n if user_dict is None:\n abort(404)\n if request.method == \"PUT\":\n user_dict.update(dict(request.form))\n u_manage.update_user(user_dict)\n if request.method == \"DELETE\":\n u_manage.remove_user(username)\n return (\"\", 204)\n return jsonify(user_dict)\n\n\n@users_bp.route(\"/edit/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef edit_user(user):\n form = EditUserForm()\n just_updated = False\n current_u = u_manage.get_user(user)\n if form.validate_on_submit():\n if current_u is not None: # Check if course still exists\n if form.userName.data != user and u_manage.get_user(form.userName.data) is not None:\n # The name we are changing to already exists! Abort\n flash(f'The username \"{form.userName.data}\" is already taken!')\n else:\n u = User.validate({\"userName\": form.userName.data,\n \"type\": form.role.data, \"password\": form.password.data})\n # We won't be changin the course code... for now\n u_manage.update_user(u)\n flash('User edited!')\n just_updated = True\n else:\n flash('The current course has been modified or deleted')\n\n # Just get the initial values\n\n if just_updated:\n form.userName.data = form.userName.data\n form.password.data = form.password.data\n form.role.data = form.role.data\n else:\n form.userName.data = current_u['userName']\n form.password.data = current_u['password']\n form.role.data = current_u['type']\n\n return render_template(\"users/create.jinja\", form=form)\n","sub_path":"app/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"421804116","text":"from ray.rllib.agents.dqn.apex import apex_execution_plan\nfrom ray.rllib.agents.ddpg.ddpg import DDPGTrainer, \\\n DEFAULT_CONFIG as DDPG_CONFIG\n\nAPEX_DDPG_DEFAULT_CONFIG = DDPGTrainer.merge_trainer_configs(\n DDPG_CONFIG, # see also the options in ddpg.py, which are also supported\n {\n \"optimizer\": {\n \"max_weight_sync_delay\": 400,\n \"num_replay_buffer_shards\": 4,\n \"debug\": False\n },\n \"exploration_config\": {\n \"type\": \"PerWorkerOrnsteinUhlenbeckNoise\"\n },\n \"n_step\": 3,\n \"num_gpus\": 0,\n \"num_workers\": 32,\n \"buffer_size\": 2000000,\n \"learning_starts\": 50000,\n \"train_batch_size\": 512,\n \"rollout_fragment_length\": 50,\n \"target_network_update_freq\": 500000,\n \"timesteps_per_iteration\": 25000,\n \"worker_side_prioritization\": True,\n \"min_iter_time_s\": 30,\n # If set, this will fix the ratio of sampled to replayed timesteps.\n # Otherwise, replay will proceed as fast as possible.\n \"training_intensity\": None,\n # Which mode to use in the ParallelRollouts operator used to collect\n # samples. For more details check the operator in rollout_ops module.\n \"parallel_rollouts_mode\": \"async\",\n # This only applies if async mode is used (above config setting).\n # Controls the max number of async requests in flight per actor\n \"parallel_rollouts_num_async\": 2,\n },\n)\n\nApexDDPGTrainer = DDPGTrainer.with_updates(\n name=\"APEX_DDPG\",\n default_config=APEX_DDPG_DEFAULT_CONFIG,\n execution_plan=apex_execution_plan)\n","sub_path":"rllib/agents/ddpg/apex.py","file_name":"apex.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563091066","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 17 18:01:16 2020\r\n\r\n@author: conno\r\n\"\"\"\r\n\r\n\r\n\r\nimport sys\r\n\r\nimport json\r\nimport urllib.request\r\nfrom datetime import datetime\r\nfrom time import sleep\r\nfrom io import StringIO\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport boto3\r\n\r\n\r\ntoken ='pk_871cfbde34a04823ba26850dfb9134b8'\r\nbucket='bigolebucket12345'\r\ntag = 'fb'\r\n\r\ndef _write_dataframe_to_csv_on_s3(dataframe, filename):\r\n \"\"\" Write a dataframe to a CSV on S3 \"\"\"\r\n print(\"Writing {} records to {}\".format(len(dataframe), filename))\r\n # Create buffer\r\n csv_buffer = StringIO()\r\n # Write dataframe to buffer\r\n dataframe.to_csv(csv_buffer, sep=\",\", index=False)\r\n # Create S3 object\r\n s3_resource = boto3.resource(\"s3\")\r\n # Write buffer to S3 object\r\n s3_resource.Object(bucket, filename).put(Body=csv_buffer.getvalue())\r\n\r\n\r\n\r\nn = 0\r\ntimestep=60\r\nopenseconds=(14-7.5)*60*60\r\nrecords_to_be_recorded = openseconds/timestep\r\n\r\n\r\ndata=[]\r\nwhile n<=records_to_be_recorded:\r\n \r\n try:\r\n html = urllib.request.urlopen(\"https://cloud.iexapis.com/stable/tops?token=\"+token+\"&symbols=\"+tag)\r\n lst = json.loads(html.read().decode('utf-8'))\r\n symbol = lst[0]['symbol']\r\n BidPrice = lst[0]['bidPrice']\r\n BidSize = lst[0]['bidSize']\r\n AskPrice = lst[0]['askPrice']\r\n AskSize = lst[0]['askSize']\r\n lastUpdatedTime = lst[0]['lastUpdated']\r\n lastSalePrice = lst[0]['lastSalePrice']\r\n lastSaleSize = lst[0]['lastSaleSize']\r\n lastSaleTime = lst[0]['lastSaleTime']\r\n volume = lst[0]['volume']\r\n x=datetime.now()\r\n y=lastSalePrice\r\n \r\n\r\n n += 1\r\n data.append([symbol,lastSaleTime,lastSalePrice,volume])\r\n sleep(timestep)\r\n except:\r\n print('No return on iteration ',n)\r\n\r\n\r\n\r\ntodaytag=datetime.today().strftime('%Y-%m-%d')\r\ndf=pd.DataFrame(data,columns=['symbol','lastSaleTime','lastSalePrice','volume'])\r\n_write_dataframe_to_csv_on_s3(df, tag+todaytag+'.csv')\r\n\r\n ","sub_path":"Brenden's AWS and Bound Forecaster files/AWSrequest.py","file_name":"AWSrequest.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104285303","text":"# Adapted from VRNN implemented by p0werHu: https://github.com/p0werHu/VRNN\n\n\nimport torch.distributions.normal as Norm\nimport torch.distributions.kl as KL\nimport torch.nn.functional as F\nimport torch\n\n\ndef loss(package, x):\n\n prior_means, prior_var, decoder_means, decoder_var, x_decoded,_ = package\n loss = 0.\n classification_loss = 0\n kld_loss_total = 0\n nll_loss_total = 0\n for i in range(x.shape[1]):\n # Kld loss\n norm_dis1 = Norm.Normal(prior_means[i], prior_var[i])\n norm_dis2 = Norm.Normal(decoder_means[i], decoder_var[i])\n kld_loss = torch.mean(KL.kl_divergence(norm_dis1, norm_dis2))\n #print(kld_loss)\n # reconstruction loss\n nll_loss = torch.mean(F.binary_cross_entropy(x_decoded[i], x[:, i, :], reduction='none'))\n #print(xent_loss)\n loss += nll_loss + kld_loss\n #print(all_classified[i].shape,labels.shape)\n kld_loss_total +=kld_loss\n nll_loss_total += nll_loss\n return loss,nll_loss_total,kld_loss_total","sub_path":"src/VRNN_loss_function.py","file_name":"VRNN_loss_function.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391859621","text":"import json\nimport logging\nimport logging.config\nimport sys\n\nimport os\nimport pytumblr\nfrom boxmover import delete_queue, reblog_everything, confirm, like_everything, new_oauth, unlike_everything, \\\n get_follow_list\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {'simple': {'format': '%(asctime)s %(levelname)s: %(message)s'}},\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'stream': sys.stdout,\n 'formatter': 'simple'\n },\n 'status_file': {\n 'class': 'logging.FileHandler',\n 'filename': 'status.log',\n 'formatter': 'simple'\n }\n },\n 'loggers': {'root': {'handlers': ['console', 'status_file'], 'level': 'DEBUG'}}\n}\nlogging.config.dictConfig(LOGGING)\n\nlogger = logging.getLogger('root')\n\n\ndef move_posts():\n if confirm('About to empty your queue on {}, are you sure?'.format(new_blog), default='n'):\n delete_queue(client, new_blog)\n reblog_everything(client, old_blog, new_blog, limit=None, offset=None, interactive=False, dry_run=True)\n\n\ndef move_likes():\n like_everything(client, old_blog, dry_run=True)\n\n\ndef remove_likes():\n unlike_everything(client, dry_run=True)\n\n\ndef save_follow_list(save_path):\n with open(save_path, 'w+') as ff:\n json.dump(get_follow_list(client), ff, indent=4)\n\n\ndef load_follow_list(save_path, interactive=False):\n with open(save_path, 'r') as fl:\n follow_list = json.load(fl)\n i = 0\n total = len(follow_list)\n for follow in follow_list:\n i += 1\n logger.info('======= %s/%s: %s (%s)', i, total, follow['name'], follow['url'])\n if interactive and not confirm('Follow?', default='y'):\n continue\n logger.info('Followed %s.', follow['url'])\n # client.unfollow(follow['url'])\n client.follow(follow['url'])\n\n\nif __name__ == '__main__':\n # Load app credentials\n json_path = 'secrets.json'\n tokens = {}\n if not os.path.exists(json_path):\n tokens = new_oauth()\n with open(json_path, 'w+') as f:\n json.dump(tokens, f, indent=4)\n else:\n with open(json_path, 'r') as f:\n tokens = json.load(f)\n\n client = pytumblr.TumblrRestClient(\n tokens['consumer_key'],\n tokens['consumer_secret'],\n tokens['oauth_token'],\n tokens['oauth_token_secret']\n )\n\n # Script settings\n with open('settings.json') as f:\n settings = json.load(f)\n old_blog = settings['old_blog']\n new_blog = settings['new_blog']\n\n # save_follow_list('follows.json')\n load_follow_list('follows.json', interactive=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"25864862","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n\nimport socket\nfrom zlib import adler32\nfrom six.moves._thread import start_new_thread\n\nfrom drove.plugin import Plugin\nfrom drove.data import Data\nfrom drove.util.network import parse_addr\n\n\nMAX_UDP_SIZE = 1024\n\n\nclass NetworkPlugin(Plugin):\n\n def setup(self, config):\n self.listen = config.get(\"plugin.network.listen\", \"\")\n self.server = config.get(\"plugin.network.server\", \"\").split(\",\")\n self.sock = {\n socket.AF_INET: socket.socket(socket.AF_INET, socket.SOCK_DGRAM),\n socket.AF_INET6: socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n }\n self._cache = {}\n\n if self.server:\n self.write = self.network_write\n\n if self.listen:\n try:\n host, port, af = parse_addr(self.listen)\n self.s_listen = socket.socket(af, socket.SOCK_DGRAM)\n self.s_listen.bind((host, port))\n self.log.info(\"Plugin network listening: %s:%d\" %\n (host, port,))\n start_new_thread(self._listen, ())\n except ValueError:\n self.log.error(\"Invalid address in 'listen' config: %s\" %\n (self.listen,))\n\n def _listen(self):\n while True:\n data, addr = self.s_listen.recvfrom(MAX_UDP_SIZE)\n try:\n value = Data.from_dump(data.decode(\"utf-8\"))\n except ValueError:\n self.log.debug(\"Plugin network received \" +\n \"invalid data from %s:%d\" % addr)\n else:\n self.emit(value)\n\n def _shard(self, node):\n if \"node\" in self._cache:\n return self._cache[\"node\"]\n\n if len(self.server) == 1:\n server = self.server[0]\n else:\n print(node)\n server = self.server[adler32(node.encode('utf-8')) % len(self.server)]\n\n self._cache[\"node\"] = parse_addr(server)\n return self._cache[\"node\"]\n\n def _send(self, conn, data):\n self.log.debug(\"Sending data: %s to %s:%d\" % (data, conn[0], conn[1],))\n self.sock[conn[2]].sendto(bytes(data, \"utf-8\"), conn[0:2])\n\n def network_write(self, channel):\n for data in channel.receive(\"network\"):\n self._send(self._shard(data.nodename), data.dump())\n","sub_path":"plugin/droveio/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"317189378","text":"#CONSOLIDADOR DE ARCHIVOS EXCEL. Total de filas y columnas --- > estáticas\r\n#Colocar este archivo dentro de la carpeta contenedora de los archivos a consolidar (*.xls*)\r\n\r\nimport pandas as pd\r\nfrom glob import glob\r\nfrom os import getcwd\r\n#######################################################################################################\r\n\r\n#cwd = getcwd()\r\nexcel_names = glob('H:\\\\Digital\\\\HUB\\\\Fronts\\\\Media_Planes\\\\2019\\\\Junio\\\\*.xls*') #Conjunto de archivos Excel en la ruta especificada\r\ndf = pd.DataFrame()\r\ncontador = 0\r\n\r\nfor e in excel_names:\r\n\tx = pd.read_excel(e, sheet_name = 'MEDIA PLAN')\r\n\tx = x.loc[10:60,'Unnamed: 1':'Unnamed: 56']\r\n\tdf = df.append(x)\r\n\tcontador += 1\r\n\tprint(f\"Copia de MP {contador} finalizada!\")\r\n\r\nprint(f\"Cantidad total de MP: {contador}\")\r\n\r\n#From df to Excel:\r\nwriter = pd.ExcelWriter('C:\\\\Users\\\\Juan.Elorriaga\\\\Desktop\\\\vieja pc\\\\Proyectos Py\\\\Bases_consolidadas\\\\EXCEL_UNIFICADO.xlsx')\r\ndf.to_excel(writer)\r\nwriter.save()\r\n","sub_path":"Python/merger_excels_groupm.py","file_name":"merger_excels_groupm.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316169345","text":"from distutils.core import setup, Extension\nimport sys\n\nif sys.version_info < (3,5):\n macro_list = [ ( \"PYTHON_VERSION_OLDER_THREE_FIVE\", \"1\" ) ]\nelse:\n macro_list = [ ]\n\nsetup(\n name = 'PyNormaliz',\n version = '0.1',\n description = 'A simple interface to LibNormaliz',\n author = 'Sebastian Gutsche',\n author_email = 'sebastian.gutsche@gmail.com',\n url = 'https://github.com/sebasguts/PyNormaliz',\n ext_modules= [ Extension( \"PyNormaliz\",\n [ \"NormalizModule.cpp\" ],\n include_dirs=['/usr/local/include/LibNormaliz', '/usr/local/include/', '/usr/include' ],\n library_dirs=['/usr/local/lib', '/usr/lib' ],\n extra_link_args=['-lnormaliz', '-lgmp' ],\n define_macros = macro_list ) ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117559445","text":"\r\nimport re\r\n\r\ndef contraseña(contraseña):\r\n if 0 < len(contraseña) < 26:\r\n if re.search('[a-z]', contraseña) or re.search('[A-Z]', contraseña):\r\n return True \r\n \r\n return False\r\n \r\n\r\nclave = input('Digite contraseña: ')\r\n\r\nprint(contraseña(clave))","sub_path":"Modulo 1/problema2.py","file_name":"problema2.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586206139","text":"import os\nimport torch\nimport torchvision as tv\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport argparse\nimport skimage.data\nimport skimage.io\nimport skimage.transform\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 使用请修改第168行神经网络参数\n\n\n# 卷积核大小\nddd = '5'\n\n# 定义是否使用GPU\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# plt.figure(dpi=150)\n# Load training and testing datasets.\n\n# 定义数据预处理方式(将输入的类似numpy中arrary形式的数据转化为pytorch中的张量(tensor))\ntransform = transforms.ToTensor()\n\n\ndef get_picture(picture_dir, transform):\n '''\n 该算法实现了读取图片,并将其类型转化为Tensor\n '''\n img = skimage.io.imread(picture_dir)\n img256 = skimage.transform.resize(img, (256, 256))\n img256 = np.asarray(img256)\n img256 = img256.astype(np.float32)\n\n return transform(img256)\n\n\ndef get_picture_rgb(picture_dir):\n '''\n 该函数实现了显示图片的RGB三通道颜色\n '''\n img = skimage.io.imread(picture_dir)\n img256 = skimage.transform.resize(img, (256, 256))\n # skimage.io.imsave('4.jpg',img256)\n\n # 取单一通道值显示\n for i in range(3):\n img = img256[:,:,i]\n ax = plt.subplot(1, 3, i + 1)\n ax.set_title('Feature {}'.format(i))\n ax.axis('off')\n plt.imshow(img)\n plt.show()\n\n r = img256.copy()\n r[:,:,0:2]=0\n ax = plt.subplot(1, 4, 1)\n ax.set_title('B Channel')\n # ax.axis('off')\n plt.imshow(r)\n\n g = img256.copy()\n g[:,:,0]=0\n g[:,:,2]=0\n ax = plt.subplot(1, 4, 2)\n ax.set_title('G Channel')\n # ax.axis('off')\n plt.imshow(g)\n\n b = img256.copy()\n b[:,:,1:3]=0\n ax = plt.subplot(1, 4, 3)\n ax.set_title('R Channel')\n # ax.axis('off')\n plt.imshow(b)\n\n img = img256.copy()\n ax = plt.subplot(1, 4, 4)\n ax.set_title('image')\n # ax.axis('off')\n plt.imshow(img)\n\n # img = img256.copy()\n # ax = plt.subplot()\n # ax.set_title('image')\n # # ax.axis('off')\n # plt.imshow(img)\n\n plt.show()\n\n\ndef gabor_fn(kernel_size, channel_in, channel_out, sigma, theta, Lambda, psi, gamma):\n sigma_x = sigma # [channel_out]\n sigma_y = sigma.float() / gamma # element-wize division, [channel_out]\n\n # Bounding box\n nstds = 3 # Number of standard deviation sigma\n xmax = kernel_size // 2\n ymax = kernel_size // 2\n xmin = -xmax\n ymin = -ymax\n ksize = xmax - xmin + 1\n y_0 = torch.arange(ymin, ymax+1)\n y = y_0.view(1, -1).repeat(channel_out, channel_in, ksize, 1).float()\n x_0 = torch.arange(xmin, xmax+1)\n x = x_0.view(-1, 1).repeat(channel_out, channel_in, 1, ksize).float() # [channel_out, channelin, kernel, kernel]\n\n # Rotation\n # don't need to expand, use broadcasting, [64, 1, 1, 1] + [64, 3, 7, 7]\n x_theta = x * torch.cos(theta.view(-1, 1, 1, 1)) + y * torch.sin(theta.view(-1, 1, 1, 1))\n y_theta = -x * torch.sin(theta.view(-1, 1, 1, 1)) + y * torch.cos(theta.view(-1, 1, 1, 1))\n\n # [channel_out, channel_in, kernel, kernel]\n gb = torch.exp(-.5 * (x_theta ** 2 / sigma_x.view(-1, 1, 1, 1) ** 2 + y_theta ** 2 / sigma_y.view(-1, 1, 1, 1) ** 2)) \\\n * torch.cos(2 * math.pi / Lambda.view(-1, 1, 1, 1) * x_theta + psi.view(-1, 1, 1, 1))\n\n return gb\n\n\nclass GaborConv2d(nn.Module):\n def __init__(self, channel_in, channel_out, kernel_size, stride=1, padding=0):\n super(GaborConv2d, self).__init__()\n self.kernel_size = kernel_size\n self.channel_in = channel_in\n self.channel_out = channel_out\n self.stride = stride\n self.padding = padding\n\n self.Lambda = nn.Parameter(torch.rand(channel_out), requires_grad=True)\n self.theta = nn.Parameter(torch.randn(channel_out) * 1.0, requires_grad=True)\n self.psi = nn.Parameter(torch.randn(channel_out) * 0.02, requires_grad=True)\n self.sigma = nn.Parameter(torch.randn(channel_out) * 1.0, requires_grad=True)\n self.gamma = nn.Parameter(torch.randn(channel_out) * 0.0, requires_grad=True)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n theta = self.sigmoid(self.theta) * math.pi * 2.0\n gamma = 1.0 + (self.gamma * 0.5)\n sigma = 0.1 + (self.sigmoid(self.sigma) * 0.4)\n Lambda = 0.001 + (self.sigmoid(self.Lambda) * 0.999)\n psi = self.psi\n\n kernel = gabor_fn(self.kernel_size, self.channel_in, self.channel_out, sigma, theta, Lambda, psi, gamma)\n kernel = kernel.float() # [channel_out, channel_in, kernel, kernel]\n\n out = F.conv2d(x, kernel, stride=self.stride, padding=self.padding)\n\n return out\n\n\nclass LeNet(nn.Module):\n '''\n 该类继承了torch.nn.Modul类\n 构建LeNet神经网络模型\n '''\n def __init__(self):\n super(LeNet, self).__init__()\n\n # 第一层神经网络,包括卷积层、线性激活函数、池化层\n # a = torch.randn(20, 16, 50)\n # self.weight = nn.Parameter(torch.tensor([[1., 2., 3.], [4.,5.,6.],[1.,1.,1.]]), requires_grad=True)\n # self.sss = F.conv2d(a,self.weight)\n \n self.conv1 = nn.Sequential(\n # 这里进行设计卷积核大小、步长等参数\n nn.Conv2d(3, 30, int(ddd), 1, 0, groups=1), # input_size=(3*256*256),padding=2\n nn.ReLU(), # input_size=(32*256*256)\n nn.MaxPool2d(kernel_size=2, stride=2), # output_size=(32*128*128)\n )\n\n # 第二层神经网络,包括卷积层、线性激活函数、池化层\n self.conv2 = nn.Sequential(\n nn.Conv2d(32, 64, 3, 1, 0), # input_size=(32*128*128)\n nn.ReLU(), # input_size=(64*128*128)\n nn.MaxPool2d(2, 2) # output_size=(64*64*64)\n )\n\n # 全连接层(将神经网络的神经元的多维输出转化为一维)\n self.fc1 = nn.Sequential(\n nn.Linear(64 * 64 * 64, 128), # 进行线性变换\n nn.ReLU() # 进行ReLu激活\n )\n\n # 输出层(将全连接层的一维输出进行处理)\n self.fc2 = nn.Sequential(\n nn.Linear(128, 84),\n nn.ReLU()\n )\n\n # 将输出层的数据进行分类(输出预测值)\n self.fc3 = nn.Linear(84, 62)\n\n # 定义前向传播过程,输入为x\n def forward(self, x):\n print('hello')\n x = self.conv1(x)\n x = self.conv2(x)\n # nn.Linear()的输入输出都是维度为一的值,所以要把多维度的tensor展平成一维\n x = x.view(x.size()[0], -1)\n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n return x\nstep = 1\n# 中间特征提取\nclass FeatureExtractor(nn.Module):\n def __init__(self, submodule, extracted_layers):\n super(FeatureExtractor, self).__init__()\n self.submodule = submodule\n self.extracted_layers = extracted_layers\n \n def forward(self, x):\n outputs = []\n print(self.submodule._modules.items())\n try:\n for name, module in self.submodule._modules.items():\n if \"fc\" in name: \n print(name)\n x = x.view(x.size(0), -1)\n global step\n print(step)\n step += 1\n print(module)\n x = module(x)\n print(name)\n if name in self.extracted_layers:\n outputs.append(x)\n print(x)\n except:\n return outputs\n\n\ndef get_feature(pic_dir):\n # 输入数据\n img = get_picture(pic_dir, transform)\n # 插入维度\n img = img.unsqueeze(0)\n img = img.to(device)\n\n # 特征输出\n net = LeNet().to(device)\n # net.load_state_dict(torch.load('./model/net_050.pth'))\n exact_list = [\"conv1\",\"conv2\"]\n # exact_list = [\"conv1\"]\n myexactor = FeatureExtractor(net, exact_list)\n x = myexactor(img)\n print('=========================')\n # 特征输出可视化\n for i in range(30):\n ax = plt.subplot(5, 6, i + 1)\n # ax.set_title('Feature {}'.format(i),fontsize=5)\n ax.axis('off')\n plt.imshow(x[0].data.numpy()[0,i,:,:],cmap='jet')\n plt.savefig('res'+ddd+'_img/'+pic_dir.split('/')[1])\n plt.show()\n\n# 训练\nif __name__ == \"__main__\":\n pic_dir = os.listdir('test_img')\n try:\n for i in range(1,8,2):\n os.mkdir('res'+str(i)+'_img')\n except:\n pass\n for i in pic_dir:\n # get_picture_rgb(pic_dir)\n get_feature('test_img/'+i)\n \n","sub_path":"实验整合_徐征/代码及需要/中间特征可视化(实验一).py","file_name":"中间特征可视化(实验一).py","file_ext":"py","file_size_in_byte":8658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140457918","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread(r\"C:\\Users\\H.A.R\\Downloads\\1.jpg\",1)\nheightImg = 640\nwidthImg = 480\nimg = cv2.resize(img,(widthImg,heightImg))\n\nimg1 = img.copy()\nimg2 = img.copy()\n\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (5,5), 0)\nedged = cv2.Canny(gray, 70, 200)\n\nkernal = np.ones((5,5))\ndilate = cv2.dilate(edged, kernal, iterations = 2)\nerode = cv2.erode(dilate, kernal, iterations = 1)\n\ncontours, hierarchy = cv2.findContours(erode, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\nbiggest = np.array([])\nmax_area = 0\nfor i in contours:\n area = cv2.contourArea(i)\n if area > 5000:\n peri = cv2.arcLength(i, True)\n approx = cv2.approxPolyDP(i, 0.02 * peri, True)\n if area > max_area and len(approx) == 4:\n biggest = approx\n max_area = area\n\ncv2.drawContours(img1, contours, -1, (0,255,0),10)\ncv2.drawContours(img2, [biggest], -1, (0,255,0), 2)\nprint(biggest)\n\npts1 = np.float32([biggest[0], biggest[3], biggest[1], biggest[2]])\npts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]])\nM = cv2.getPerspectiveTransform(pts1,pts2)\npers = cv2.warpPerspective(img2, M, (widthImg, heightImg))\n\nplt.figure(figsize=[15,15])\nplt.subplot(121);plt.imshow(img2[...,::-1])\nplt.subplot(122);plt.imshow(pers[...,::-1])\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Doc Scanner.py","file_name":"Doc Scanner.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"376297638","text":"from pdf2image import convert_from_path, convert_from_bytes\n\nfrom pdf2image.exceptions import (\n PDFInfoNotInstalledError,\n PDFPageCountError,\n PDFSyntaxError\n)\n\nimport tempfile\n\n# with tempfile.TemporaryDirectory() as path:\nimages_from_path = convert_from_path('E:/test.pptx.pdf', 500)\nfor i, page in enumerate(images_from_path):\n page.save('{}.jpg'.format(i), 'JPEG')\na= 1","sub_path":"ppt2pdf/pdf2img.py","file_name":"pdf2img.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64814057","text":"import os\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader, random_split\nfrom src.models.hparams import Hyperparams as hp\n\nimport torchvision\nfrom torchvision import transforms\n\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\n\n\n# Dataset to load the WikiArt data\n# Data will come in pairs of (artwork, artist)\nclass WikiArtDataset(Dataset):\n __genre = hp.genre_to_train\n\n def __init__(self, transform, root_dir=hp.wikiart_root_dir, image_loader=Image.open,\n load_artist_indices=False, artist_indices_fname=hp.artist_indices_fname,\n genre=hp.genre_to_train):\n\n self.root_dir = root_dir\n self.image_loader = image_loader\n self.transform = transform\n self.load_artist_indices = load_artist_indices\n self.artist_indices_fname = artist_indices_fname\n\n # this contains the names of the images and the corresponding artist in the\n # following format [(artwork, artist_num), (artwork, artist_num) ... ]\n self.dataset = list()\n # TODO: remote hard coding of filenames later\n with open(\"src/utils/artists.txt\", \"r\") as f:\n self.artists = set(map(lambda x: x.rstrip(), f.readlines()))\n with open(\"src/utils/genres.txt\", \"r\") as f:\n self.genres = set(map(lambda x: x.rstrip(), f.readlines()))\n\n self.artist_num = dict()\n self.populate_dataset()\n\n if not self.load_artist_indices:\n self.save_dataset()\n\n def enumerate_dataset(self):\n for genre_dir in os.listdir(self.root_dir):\n if genre_dir in self.genres:\n for image_fname in os.listdir(os.path.join(self.root_dir, genre_dir)):\n artist = image_fname.split('_')[0]\n if artist in self.artists:\n image_path = os.path.join(self.root_dir, genre_dir, image_fname)\n yield image_path, artist, genre_dir\n\n def populate_dataset(self):\n if self.load_artist_indices:\n self.populate_artist_indices()\n\n for image_path, artist, genre in self.enumerate_dataset():\n data_point = (image_path, self.get_artist_num(artist))\n self.dataset.append(data_point)\n\n def get_artist_num(self, artist):\n if artist not in self.artist_num:\n print(artist)\n assert self.load_artist_indices == False\n self.artist_num[artist] = len(self.artist_num)\n\n return self.artist_num[artist]\n\n def populate_artist_indices(self):\n with open(self.artist_indices_fname) as f:\n for line in f.readlines():\n index, artist = line.split(' ')\n self.artist_num[artist.strip('\\n')] = int(index)\n\n def save_dataset(self):\n reversed_artist_num = dict()\n for artist in self.artist_num:\n reversed_artist_num[self.artist_num[artist]] = artist\n\n with open(self.artist_indices_fname, 'w') as f:\n for i in range(len(reversed_artist_num)):\n line = str(i) + ' ' + reversed_artist_num[i] + '\\n'\n f.write(line)\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n image_dir, artist_num = self.dataset[index]\n image = self.transform(self.image_loader(image_dir))\n sample = {'image': image, 'artist': artist_num}\n return sample\n\n\ndef visualize_batch(sampled_batch):\n plt.figure()\n image_batch = sampled_batch['image']\n grid = torchvision.utils.make_grid(image_batch)\n plt.imshow(grid.numpy().transpose((1, 2, 0)))\n plt.show()\n\n\nif __name__ == '__main__':\n np.random.seed(hp.random_seed)\n torch.manual_seed(hp.random_seed)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() & hp.train_on_gpu else \"cpu\")\n if torch.cuda.is_available() & hp.train_on_gpu:\n # torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.cuda.manual_seed(hp.random_seed)\n\n transform = transforms.Compose([\n transforms.CenterCrop(500),\n transforms.ToTensor()])\n\n # TODO: Split based on artist\n full_dataset = WikiArtDataset(transform)\n train_set_len = int(hp.train_set_percentage * full_dataset.__len__())\n test_set_len = len(full_dataset) - train_set_len\n\n train_set, test_set = random_split(full_dataset, [train_set_len, test_set_len])\n\n train_loader = DataLoader(train_set, batch_size=2, shuffle=True, num_workers=1)\n test_loader = DataLoader(test_set, batch_size=2, shuffle=True, num_workers=0)\n\n max_visualizations = 1\n for i_batch, sampled_batch in enumerate(train_loader):\n if i_batch == max_visualizations:\n break\n print('batch: {} | X shape: {} | y shape: {}'.format(i_batch, sampled_batch['image'].size(), sampled_batch['artist'].size()))\n visualize_batch(sampled_batch)\n\n # TODO: Use prefetcher to speed up data loading\n\n","sub_path":"src/utils/wikiart_dataloader.py","file_name":"wikiart_dataloader.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520148429","text":"import matplotlib.pyplot as plt\nfrom traffic_discrete import TrafficDiscrete\n\n# Simulate traffic at three different densities and compare\n# the plots.\n\ntr = TrafficDiscrete(density=0.15, p_slowdown=0.4)\nprint (tr.evolve(tr.road_len))\ntr.make_plot(filename='2_spacetime_d015.pdf')\ntr = TrafficDiscrete(vmax=3.0, density=0.5, p_slowdown=0.4)\ntr.evolve(tr.road_len)\n'''\ntr.make_plot(filename='2_spacetime_d050.pdf')\ntr = TrafficDiscrete(vmax=3.0, density=0.7, p_slowdown=0.4)\ntr.evolve(tr.road_len)\ntr.make_plot(filename='2_spacetime_d070.pdf')\n\n# Determine the velocity-density plot for purely deterministic\n# traffic...\n\ntr = TrafficDiscrete(vmax=1.0, p_slowdown=0.0)\ntr.average_velocity_density_plot(4000, filename='2_average_velocity_deterministic.pdf')\n\n# ... and for traffic with random slowdown.\n\ntr = TrafficDiscrete(vmax=4.0, p_slowdown=0.3)\ntr.average_velocity_density_plot(8000, filename='2_average_velocity_randomised.pdf')\n\n# Generate the flow-density plot for deterministic behaviour.\n\ntr = TrafficDiscrete(vmax=3.0, density=0.15, p_slowdown=0)\ntr.flow_density_comparison_plot(4000, vmaxs=[1.0, 2.0, 3.0], filename='2_flow_density_deterministic.pdf')\n\n# Generate the flow-density plot for three values of the\n# maximal legal velocity.\n\ntr = TrafficDiscrete(vmax=3.0, density=0.15, p_slowdown=0.4)\ntr.flow_density_comparison_plot(4000, vmaxs=[1.0, 2.0, 3.0], filename='2_flow_density_randomised.pdf')\n'''","sub_path":"MathModelling/TrafficJams/traffic_discrete_test.py","file_name":"traffic_discrete_test.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637146432","text":"from create import create_by_pre_order\nfrom order import pre_order, in_order, end_order\nfrom create import create_by_pre_in_order\n\nvalues = [1, 2, 4, 0, 7, 0, 0, 0, 3, 5, 0, 0, 6, 8, 0, 0, 0]\n\nroot = create_by_pre_order(values)\n\npre_order(root)\n\nprint()\n\nin_order(root)\n\nprint()\n\nend_order(root)\n\nprint()\n\nvalues1 = [1, 2, 4, 7, 3, 5, 6, 8]\nvalues2 = [4, 7, 2, 1, 5, 3, 8, 6]\nroot = create_by_pre_in_order(values1, values2)\n\npre_order(root)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147178584","text":"import cv2 as cv\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimg=cv.imread(\"Ch3-images/modelinveil.jpg\",0)\r\nclahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\r\ncl1 = clahe.apply(img)\r\ncl2=clahe.apply(cl1)\r\ncl3=clahe.apply(cl2)\r\nimgs=np.hstack((img,cl1,cl2,cl3))\r\ncv.imshow(\"Adaptive local histogram Equalization. A)Image B)Single CLAHE C)Double CLAHE D)Tripple CLAHE\",imgs)\r\ncv.waitKey(0)\r\n","sub_path":"Programs/Chapter3/P-3-23-LocalAdaptiveHistogramEqualization.py","file_name":"P-3-23-LocalAdaptiveHistogramEqualization.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328289859","text":"from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\n\nfrom prof_types.views import init\nfrom questions.models import ProfessionQuestion\nfrom .question import Question\nfrom .models import Answer, Rate, Profession\n\n\ntheQuestion = Question()\n\n\ndef get_detail_profession(question, check=True):\n question_position = theQuestion.get_position(ProfessionQuestion)\n if check:\n context = {'question': question,\n 'end_question': question_position['all'],\n 'cur_question': question_position['current'],\n 'avrg_question': question_position['avrg']}\n else:\n context = {'question': question,\n 'end_question': question_position['all'],\n 'cur_question': question_position['current'],\n 'avrg_question': question_position['avrg'],\n 'error_message': \"Вы не выбрали ни один из вариантов!!!\",}\n\n return context\n\n\ndef index(request):\n return render(request, 'professions/index.html')\n\n\ndef start(request):\n profession_qlist = ProfessionQuestion.objects.all()\n profession_qfirst = profession_qlist.first()\n theQuestion.all_question = 0\n theQuestion.current_question = 1\n theQuestion.average = 0\n theQuestion.user_profession_rate = Rate.objects.create()\n return HttpResponseRedirect(reverse('professions:detail', args=(profession_qfirst.slug,)))\n\ndef detail(request, question_slug):\n question = get_object_or_404(ProfessionQuestion, slug=question_slug)\n # if check=true call method without error\n context = get_detail_profession(question, check=True)\n return render(request, 'professions/detail.html', context)\n\n\ndef result(request, question_slug):\n question = get_object_or_404(ProfessionQuestion, slug=question_slug)\n return render(request, 'professions/result.html', {'question': question})\n\n\ndef vote(request, question_slug):\n question = get_object_or_404(ProfessionQuestion, slug=question_slug)\n if request.method == \"POST\":\n try:\n selected_choice = question.answer_set.get(pk=request.POST['answer'])\n except (KeyError, Answer.DoesNotExist):\n # Check is False call redisplay the question voting form.\n context = get_detail_profession(question, check=False)\n return render(request, 'professions/detail.html', context)\n else:\n theQuestion.set_profession_rate(selected_choice.rate)\n selected_choice.save()\n try:\n question = theQuestion.next_question(ProfessionQuestion, question.id)\n except ProfessionQuestion.DoesNotExist:\n theQuestion.user_profession_rate.delete()\n professions = get_list_or_404(Profession)\n your_profession = theQuestion.list_profession(professions)\n init(your_profession)\n return HttpResponseRedirect(reverse('types:index', args=()))\n else:\n return HttpResponseRedirect(reverse('professions:detail', args=(question.slug,)))\n else:\n return HttpResponseRedirect(reverse('professions:detail', args=(question.slug,)))\n","sub_path":"professions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479247207","text":"import matplotlib.pyplot as plt\nimport skrf as rf\nfrom skrf import Network\n\nplt.rcParams[\"font.family\"] = \"Century Gothic\"\nplt.rcParams[\"font.size\"] = \"14\"\n\nSP = Network('C:/Users/SH/Desktop/homework_V2/Рабочий стол/Утюг/Микрополосковая линия/S2P/Four_line_Solaris.s8p')\n\n\nplt.figure()\n\nSP.plot_s_db(m=2-1, n=1-1, label='S21', linewidth ='3')\nSP.plot_s_db(m=4-1, n=3-1, label='S43', linewidth ='3')\nSP.plot_s_db(m=6-1, n=5-1, label='S65', linewidth ='3')\nSP.plot_s_db(m=8-1, n=7-1, label='S87', linewidth ='3')\nplt.xlabel('F, Гц')\nplt.ylabel('SP, дБ')\nplt.grid()\n\nplt.figure()\n\nSP.plot_s_db(m=1-1, n=1-1, label='S11', linewidth ='3')\nSP.plot_s_db(m=3-1, n=3-1, label='S33', linewidth ='3')\nSP.plot_s_db(m=5-1, n=5-1, label='S55', linewidth ='3')\nSP.plot_s_db(m=7-1, n=7-1, label='S77', linewidth ='3')\nplt.xlabel('F, Гц')\nplt.ylabel('SP, дБ')\nplt.grid()\n\n\nplt.show()\n\n","sub_path":"SP_Silikat.py","file_name":"SP_Silikat.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"406549625","text":"import csv\nimport random\nfrom functools import partial\nfrom typing import Callable, Optional\nfrom pdb import set_trace as st\nimport os\nimport random\nimport pandas as pd\nfrom typing import Any, Callable, Dict, Iterable, List, Tuple, Union\nimport datetime\nimport shutil\nimport copy\n\nimport numpy as np\nimport tensorflow as tf\nfrom foolbox.attacks import (\n FGSM,\n DeepFoolAttack,\n DeepFoolLinfinityAttack,\n DeepFoolL2Attack,\n IterativeGradientSignAttack,\n SaliencyMapAttack,\n RandomPGD,\n CarliniWagnerL2Attack,\n ADefAttack,\n SinglePixelAttack,\n LocalSearchAttack,\n ApproximateLBFGSAttack,\n BoundaryAttack,\n SpatialAttack,\n PointwiseAttack,\n GaussianBlurAttack,\n)\n\n# from foolbox.criteria import TargetClass\n# from foolbox.models import TensorFlowModel\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training.session_manager import SessionManager\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\nplt.switch_backend('Agg')\n\nfrom model.config import LENET\nfrom model import LeNet\nimport nninst_mode as mode\nfrom dataset import mnist\nfrom dataset.config import MNIST_TRAIN, MNIST_PATH, CIFAR10_PATH\nfrom dataset.cifar10_main import input_fn_for_adversarial_examples\nfrom dataset.mnist_transforms import *\nfrom trace.lenet_mnist_class_trace_v2 import (\n data_config,\n)\nfrom trace.common import (\n class_trace,\n)\nfrom tf_utils import new_session_config\nfrom nninst_statistics import calc_trace_side_overlap\nfrom nninst_trace import TraceKey\nfrom nninst_utils.numpy import arg_approx, arg_abs_approx\nfrom nninst_utils.ray import ray_init\nfrom nninst_utils.fs import (ensure_dir, IOAction, \n CsvIOAction, abspath, IOBatchAction,\n IOObjAction)\nfrom model.resnet10cifar10 import ResNet10Cifar10\nfrom model.resnet10cifar10_feature import ResNet10Cifar10_Feature\n\nfrom .common import get_overlay_summary, clean_overlap_ratio, \\\n translation_overlap_ratio, attack_overlap_ratio, \\\n resnet10_cifar10_example\nfrom .cw_attack import cw_generate_adversarial_example\nfrom .eval_mnist import foolbox_generate_adversarial_example\nfrom .cw_attacks import CarliniL2\nfrom nninst_graph import AttrMap, Graph, GraphAttrKey\nfrom nninst_utils.ray import ray_iter\nfrom tf_graph import (\n MaskWeightWithTraceHook,\n model_fn_with_fetch_hook,\n)\nfrom trace.common import (\n get_predicted_value,\n get_rank,\n predict,\n predict_batch,\n reconstruct_class_trace_from_tf,\n reconstruct_trace_from_tf,\n reconstruct_trace_from_tf_to_trace,\n reconstruct_trace_from_tf_brute_force,\n)\nfrom .analyse_class_trace import reconstruct_edge\nfrom eval.lenetmnist_save_traces import ClassTraceIOAction\n\n# Model config\nmodel_label = \"dropout\"\nmodel_dir = f\"result/resnet10cifar10/model_{model_label}\"\n\nthreshold = 0.5\n\ndataset_mode = \"test\"\nresult_dir = f\"{model_dir}/nninst_mu_posneg/forward_feature\"\n# result_dir = f\"result/lenet/test\"\nclass_num = 10\nimages_per_class = 100\nchunksize = 1\nbatch_size = 1000\n\n \nattacks = {\n \"original\": [FGSM],\n \"FGSM_1\": [FGSM],\n \"FGSM_2\": [FGSM],\n \"FGSM_4\": [FGSM],\n \"FGSM_8\": [FGSM],\n \n \"DeepFoolLinf\": [DeepFoolLinfinityAttack],\n \"DeepFoolL2\": [DeepFoolL2Attack],\n \n \"JSMA\": [SaliencyMapAttack],\n \n \"BIM_2\": [IterativeGradientSignAttack],\n \"BIM_4\": [IterativeGradientSignAttack],\n \"BIM_8\": [IterativeGradientSignAttack],\n \n \"RPGD_B\": [RandomPGD],\n \"RPGD_2\": [RandomPGD],\n \"RPGD_4\": [RandomPGD],\n \"RPGD_8\": [RandomPGD],\n \n \"CWL2\": [CarliniWagnerL2Attack],\n \"ADef\": [ADefAttack],\n\n \"SinglePixel\": [SinglePixelAttack],\n \"LocalSearch\": [LocalSearchAttack],\n \n \"Boundary\": [BoundaryAttack],\n \"Spatial\": [SpatialAttack],\n \"Pointwise\": [PointwiseAttack],\n \"GaussianBlur\": [GaussianBlurAttack],\n}\n\n# DeepFool will shutdown when num_gpu<0.2\nnum_gpus = 0.5\n\ndef forward_propagate_batch_feature(\n create_model,\n input_fn,\n model_dir: str,\n forward_fn: Callable[[tf.Tensor], tf.Tensor] = lambda logits: tf.argmax(logits, axis=1),\n data_format: str = \"channels_first\",\n parallel: int = 1,\n prediction_hooks = None,\n) -> Union[int, float]:\n\n def model_fn(features, labels, mode, params):\n image = features\n if isinstance(image, dict):\n image = features[\"image\"]\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n logits, feature = create_model()(image, training=False)\n predictions = {\n \"classes\": forward_fn(logits),\n \"feature\": feature,\n }\n return tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=predictions,\n prediction_hooks=prediction_hooks,\n export_outputs={\n \"classify\": tf.estimator.export.PredictOutput(predictions)\n },\n )\n\n model_dir = abspath(model_dir)\n model_function = model_fn\n if data_format is None:\n data_format = (\n \"channels_first\" if tf.test.is_built_with_cuda() else \"channels_last\"\n )\n estimator_config = tf.estimator.RunConfig(\n session_config=new_session_config(parallel=parallel)\n )\n if not os.path.exists(model_dir):\n raise RuntimeError(f\"model directory {model_dir} is not existed\")\n classifier = tf.estimator.Estimator(\n model_fn=model_function,\n model_dir=model_dir,\n params={\"data_format\": data_format},\n config=estimator_config,\n )\n\n result = list(classifier.predict(input_fn=input_fn))\n prediction = np.array([v[\"classes\"] for v in result])\n feature = np.array([v[\"feature\"] for v in result])\n return prediction, feature\n\n# Compute the mean overlap ratio of attacked image\ndef save_training_feature_batch(\n image_id_index,\n batch_size,\n class_id,\n model_dir = model_dir,\n transforms = None,\n transform_name = \"noop\",\n graph_dir = \"result/test\",\n dataset_mode = dataset_mode,\n images_per_class = 1,\n **kwargs,\n):\n \n # mode.check(False)\n data_dir = abspath(CIFAR10_PATH)\n model_dir = abspath(model_dir)\n ckpt_dir = f\"{model_dir}/ckpts\"\n create_model = lambda: partial(\n ResNet10Cifar10_Feature(),\n training = False,\n )\n graph = ResNet10Cifar10_Feature.graph().load()\n \n batch_size = min(batch_size, images_per_class - image_id_index)\n \n prediction, feature = forward_propagate_batch_feature(\n create_model=create_model,\n input_fn=lambda: (\n input_fn_for_adversarial_examples(\n is_training= (dataset_mode == \"train\"),\n data_dir=data_dir,\n num_parallel_batches=1,\n is_shuffle=False,\n transform_fn=None,\n )\n .filter(\n lambda image, label: tf.equal(\n tf.convert_to_tensor(class_id, dtype=tf.int32), label\n )\n )\n .skip(image_id_index)\n .take(batch_size)\n .batch(batch_size)\n .make_one_shot_iterator()\n .get_next()[0]\n ),\n model_dir=ckpt_dir,\n )\n \n label = np.repeat([class_id], batch_size)\n return feature, label, prediction\n \n \ndef save_training_features(\n images_per_class,\n batch_size,\n dataset_mode,\n):\n \n features, labels, preds = [], [], []\n for class_id in range(class_num):\n print(f\"Saving training features of class {class_id}\")\n for image_id_index in range(0, images_per_class, batch_size):\n feature, label, pred = save_training_feature_batch(\n image_id_index=image_id_index,\n batch_size=batch_size,\n class_id=class_id,\n model_dir=model_dir,\n dataset_mode = dataset_mode,\n images_per_class=images_per_class,\n )\n features.append(feature)\n labels.append(label)\n preds.append(pred)\n\n features = np.concatenate(features)\n labels = np.concatenate(labels)\n preds = np.concatenate(preds)\n data = {\n \"features\": features,\n \"labels\": labels,\n \"preds\": preds,\n }\n \n path = os.path.join(result_dir, \"train.pkl\")\n with open(path, \"wb\") as f:\n pickle.dump(data, f)\n \n \n\n# Compute the mean overlap ratio of attacked image\ndef save_adversarial_feature_batch(\n attack_name,\n image_id_index,\n batch_size,\n class_id,\n model_dir = model_dir,\n graph_dir = \"result/test\",\n dataset_mode = dataset_mode,\n images_per_class = 1,\n adversarial_dir = \"result/test\",\n **kwargs,\n):\n \n # mode.check(False)\n data_dir = abspath(CIFAR10_PATH)\n model_dir = abspath(model_dir)\n ckpt_dir = f\"{model_dir}/ckpts\"\n create_model = lambda: partial(\n ResNet10Cifar10_Feature(),\n training = False,\n )\n graph = ResNet10Cifar10_Feature.graph().load()\n \n batch_size = min(batch_size, images_per_class - image_id_index)\n \n adversarial_examples = [\n resnet10_cifar10_example(\n attack_name=attack_name,\n attack_fn=None,\n generate_adversarial_fn=None,\n class_id=class_id,\n image_id=image_id,\n # model_dir not ckpt_dir\n model_dir=model_dir,\n transforms = None,\n transform_name = \"noop\",\n dataset_mode = dataset_mode,\n ).load()\n for image_id in range(image_id_index, image_id_index + batch_size)\n ]\n adversarial_examples = [v for v in adversarial_examples if v is not None]\n adversarial_examples = np.concatenate(adversarial_examples)\n \n adversarial_prediction, feature = forward_propagate_batch_feature(\n create_model=create_model,\n input_fn=lambda: tf.data.Dataset.from_tensors(\n adversarial_examples\n ),\n model_dir=ckpt_dir,\n )\n \n label = np.repeat([class_id], adversarial_examples.shape[0])\n \n return feature, label, adversarial_prediction\n \ndef save_adversarial_features(\n images_per_class,\n batch_size,\n dataset_mode,\n \n):\n adversarial_dir = os.path.join(model_dir, \"attack\", \"test\")\n for attack_name in [\n # \"original\",\n \"FGSM_2\", \"FGSM_4\", \"FGSM_8\",\n \"DeepFoolLinf\", \"DeepFoolL2\",\n \"JSMA\",\n \"RPGD_2\", \"RPGD_4\", \"RPGD_8\",\n \"CWL2\", \"ADef\",\n \"SinglePixel\", \"LocalSearch\",\n \"Boundary\", \"Spatial\", \"Pointwise\", \"GaussianBlur\",\n ]:\n features, labels, preds = [], [], []\n for class_id in range(class_num):\n print(f\"Saving training features of attack {attack_name} class {class_id}\")\n for image_id_index in range(0, images_per_class, batch_size):\n feature, label, pred = save_adversarial_feature_batch(\n attack_name=attack_name,\n image_id_index=image_id_index,\n batch_size=batch_size,\n class_id=class_id,\n model_dir=model_dir,\n dataset_mode = dataset_mode,\n images_per_class=images_per_class,\n adversarial_dir=adversarial_dir,\n )\n features.append(feature)\n labels.append(label)\n preds.append(pred)\n\n features = np.concatenate(features)\n labels = np.concatenate(labels)\n preds = np.concatenate(preds)\n data = {\n \"features\": features,\n \"labels\": labels,\n \"preds\": preds,\n }\n \n path = os.path.join(result_dir, f\"{attack_name}.pkl\")\n with open(path, \"wb\") as f:\n pickle.dump(data, f)\n\n\ndef save_class_avg_trace():\n \n cpu_chunksize = 1\n batch_size = 100\n \n for attack_name in [\n \"original\",\n # \"FGSM_1\", \"FGSM_2\", \"FGSM_4\", \"FGSM_8\",\n # \"DeepFoolLinf\", \"DeepFoolL2\",\n # \"JSMA\",\n # \"BIM_2\", \"BIM_4\", \"BIM_8\",\n # \"RPGD_2\", \"RPGD_4\", \"RPGD_8\",\n # \"CWL2\", \"ADef\",\n # \"SinglePixel\", \"LocalSearch\",\n # \"Boundary\", \"Spatial\", \"Pointwise\", \"GaussianBlur\",\n ]:\n save_training_trace(\n attack_name = attack_name,\n images_per_class=images_per_class,\n batch_size=batch_size,\n dataset_mode=dataset_mode,\n cpu_chunksize=cpu_chunksize,\n )\n\n\nif __name__==\"__main__\":\n tf.set_random_seed(3)\n np.random.seed(3)\n random.seed(3)\n\n # dataset_mode, images_per_class = \"train\", 1000\n # batch_size = 200\n # save_training_features(\n # images_per_class,\n # batch_size,\n # dataset_mode,\n # )\n \n dataset_mode, images_per_class = \"test\", 100\n batch_size = 100\n save_adversarial_features(\n images_per_class,\n batch_size,\n dataset_mode,\n )\n","sub_path":"submissions/available/NNSlicer/NNSlicer/eval/resnet10cifar10_save_features.py","file_name":"resnet10cifar10_save_features.py","file_ext":"py","file_size_in_byte":13017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132515107","text":"import os\nimport sys\nsys.path.append(os.getcwd())\nimport torch\nimport torchvision\nimport argparse\nimport utils\nimport models\n\nparser = argparse.ArgumentParser(description='FastDepth evaluation')\nparser.add_argument('-m', '--model', type=str, required=True, help=\"Path to model.\")\nparser.add_argument('--resnet18', action='store_true')\nparser.add_argument('--save-gpu', action='store_true')\nparser.add_argument('--nyu', action='store_true')\nargs = parser.parse_args()\n\nmodel_path = args.model\n\nif args.nyu:\n checkpoint = torch.load(args.model)\n model = checkpoint['model']\nelse:\n model_state_dict, _, _ = utils.load_checkpoint(args.model)\n model_state_dict = utils.convert_state_dict_from_gpu(model_state_dict)\n if args.resnet18:\n model = models.ResNetSkipAdd(layers=18, output_size=(224, 224), pretrained=True)\n else:\n model = models.MobileNetSkipAdd(output_size=(224, 224), pretrained=True)\n if model_state_dict:\n model.load_state_dict(model_state_dict)\n\nif args.save_gpu:\n print(\"Saving model on GPU\")\n model.to(torch.device(\"cuda:0\"))\nelse:\n print(\"Saving model on CPU\")\n model.to(torch.device(\"cpu\"))\n\nmodel_dir = os.path.join(*model_path.split('/')[:-1])\nmodel_name = model_path.split('/')[-1]\ndevice_ext = \"gpu\" if args.save_gpu else \"cpu\"\n\nsave_path = os.path.join(model_dir, 'full_model_' + model_name[:-4] + \"_\" + device_ext + \".pth\")\ntorch.save(model, save_path)\nprint(\"Saved to \", save_path)\n","sub_path":"fastdepth/scripts/save_full_model.py","file_name":"save_full_model.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645890548","text":"import pygame\n\npygame.init() # initializes the module and associated classes\n\ndisplay_width = 800\ndisplay_height = 600\nwin_dimensions = (display_width, display_height)\n\nblack = (0, 0, 0) # this tuple has no color, all 0 means no color\nwhite = (255, 255, 255) # each value is an rgb maximum\n\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\ncar_width = 1333//31\ncar_height = 2400//24\n\ngameDisplay = pygame.display.set_mode(win_dimensions) # this only accepts a tuple for the window size\npygame.display.set_caption('A bit Racey') # gives the python window a caption\nclock = pygame.time.Clock() # initializes the timer\n\ncarImg = pygame.image.load('CAR.png') # loads an image into pygame\ncarImg = pygame.transform.scale(carImg, (car_width, car_height)) # scales an image to a tuple's resolution, in this case 100 by 100\n\n\ndef car(x, y):\n gameDisplay.blit(carImg, (x, y)) # draws an image to a surface, x and y MUST be a TUPLE\n\n\ndef message_display(text):\n largText = pygame.font.Font('freesansbold.ttf', 115)\n TextSurf, TextRect = text_objects(text, largeText)\n TextRect.center = ((display_width/2), (display_height/2))\n gameDisplay.blit(TextSurf, TextRect)\n\n pygame.time.sleep(2)\n game_loop()\n\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\n\ndef crash():\n message_display('You Crashed!')\n\n\ndef game_loop():\n x = (display_width * 0.43) # display_width COULD change to different size so only multiplication\n y = (display_height * 0.8)\n\n car_speed = 5 # how fast the car car move in any direction\n x_change = 0 # how much x changes\n y_change = 0 # how much y changes\n\n gameExit = False # the car in the game hasn't crashed\n\n while not gameExit: # This will run at the start but not if the car crashes\n\n # Event Handling Loop\n for event in pygame.event.get(): # gets all the events (clicking, moving, pressing) that are happening PER FRAME\n if event.type == pygame.QUIT: # pygame.QUIT is the upper right red x in any window\n gameExit = True # not the best loop break\n\n \"\"\" OLD WAY OF MOVEMENT: (gets stuck easily if both keys are pressed)\n if event.type == pygame.KEYDOWN: # if any key is held down\n left_keys = event.key == pygame.K_LEFT or event.key == pygame.K_a\n right_keys = event.key == pygame.K_RIGHT or event.key == pygame.K_d\n if left_keys: # if the left arrow or d key is down\n x_change = -5\n elif right_keys: # same as left, but right\n x_change = 5\n \n if event.type == pygame.KEYUP: # if a key has been released:\n left_keys = event.key == pygame.K_LEFT or event.key == pygame.K_a\n right_keys = event.key == pygame.K_RIGHT or event.key == pygame.K_d\n if left_keys or right_keys: # identify released keys\n x_change = 0 # when the key is released, we no longer move so x does not change\n \"\"\"\n\n # Movement:\n # Init:\n keys = pygame.key.get_pressed() # returns a list of bool values for all keys\n left_keys = keys[pygame.K_LEFT] or keys[pygame.K_a] # define all keys meaning left (a bool value)\n right_keys = keys[pygame.K_RIGHT] or keys[pygame.K_d] # define all keys meaning right (a bool value)\n up_keys = keys[pygame.K_UP] or keys[pygame.K_w]\n down_keys = keys[pygame.K_DOWN] or keys[pygame.K_s]\n\n # HORIZONTAL:\n if left_keys: # if any left key has a value, change the x\n x_change = -car_speed\n elif right_keys: # if any right key has a value, change the x\n x_change = car_speed\n\n # Checks for both presses or no press at all\n if left_keys and right_keys: # if both keys are pressed, they negate the other's effect and no movement\n x_change = 0\n elif not left_keys and not right_keys: # if neither the left or right keys are pressed, no movement\n x_change = 0\n\n # VERTICAL:\n if down_keys:\n y_change = car_speed\n elif up_keys:\n y_change = -car_speed\n\n if down_keys and up_keys:\n y_change = 0\n elif not down_keys and not up_keys:\n y_change = 0\n\n x += x_change\n y += y_change\n\n gameDisplay.fill(white) # fills window with white color\n car(x, y) # refer to the method def\n\n if x > display_width - car_width or x < 0:\n crash()\n\n pygame.display.update() # refreshes the entire window\n # pygame.display.flip() # this updates a single part of a program instead of the whole screen\n clock.tick(60) # the arg is the window's FPS\n\n\ngame_loop()\npygame.quit() # closes out of pygame\nquit() # generic python closing line for getting out of a program\n","sub_path":"PygameExperiment.py","file_name":"PygameExperiment.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141325278","text":"import os\nimport json\n\nfrom requests import Request, Session\n\nAUTH_ENDPOINT = 'https://hh.ru/oauth/token'\nCREDENTIAL_DIR = 'credentials\\\\'\n\n\nclass AppAuth:\n \"\"\"AppAuth class wraps app authorization. It encloses auth app_credentials and\n acquires app_access_token for app specified in app_credentials file\"\"\"\n\n def __init__(self, with_file: bool = True):\n # client id and secret stored in file or passed trough env vars\n if with_file:\n cred_path = os.path.join(CREDENTIAL_DIR, 'app_credentials')\n with open(cred_path) as cred_file:\n app_credentials = json.loads(cred_file.read())\n else:\n app_credentials = os.environ\n self.client_id = app_credentials['hh_app_client_id']\n self.client_secret = app_credentials['hh_app_client_secret']\n\n # access_token stored in file only\n if 'app_access_token' in os.listdir(CREDENTIAL_DIR):\n with open(os.path.join(CREDENTIAL_DIR, 'app_access_token')) as token_file:\n access_token = token_file.read()\n if len(access_token) > 3:\n self.access_token = access_token\n else:\n self.access_token = None\n\n def has_token(self) -> bool:\n \"\"\"has_token indicates whether an app_access_token present or not\"\"\"\n return bool(self.access_token)\n\n def write_token(self):\n \"\"\"write_token writes token to separate file in credentials dir\"\"\"\n\n with open(os.path.join(CREDENTIAL_DIR, 'app_access_token'), 'w') as token_file:\n token_file.write(self.access_token)\n\n def get_new_token(self):\n \"\"\"get_new_token acquires new app_access_token (existing app_access_token will be revoked)\"\"\"\n\n request_body = '&'.join(['grant_type=client_credentials',\n f'client_id={self.client_id}',\n f'client_secret={self.client_secret}'])\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Content-Length': f'{len(request_body)}'}\n\n session = Session()\n request = Request('POST', url=AUTH_ENDPOINT, headers=headers)\n prepared_request = request.prepare()\n prepared_request.body = request_body\n response = session.send(prepared_request)\n\n if response.status_code == 400:\n print('400 Bad request')\n elif response.status_code == 403:\n print('403 Forbidden - access token have been issued in the last 5 min.')\n\n response_dict = json.loads(response.content)\n access_token = response_dict.get('app_access_token')\n if access_token is not None:\n self.access_token = access_token\n self.write_token()\n","sub_path":"headpy/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163867750","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Rename quotation templates. In the process, we move passage text outside of the template inside. For example,\n# if the --direcfile specifies 'RQ:Browne Errors ||| RQ:Browne Pseudodoxia Epidemica', we replace\n#\n# #* {{RQ:Browne Errors}}\n# #*: Preventive physic [...] preventeth sickness in the healthy, or the '''recourse''' thereof in the valetudinary.\n#\n# with:\n#\n# #* {{RQ:Browne Pseudodoxia Epidemica|passage=Preventive physic [...] preventeth sickness in the healthy, or the '''recourse''' thereof in the valetudinary.}}\n#\n# If 'RQ:Browne Errors' occurs without raw passage text following, we just replace with 'RQ:Browne Pseudodoxia Epidemica'.\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, set_template_name, msg, errmsg, site, tname, pname\n\ndef add_params_to_template(t, params, seen_from_params, pagemsg):\n pn = None\n for param in t.params:\n pn = pname(param)\n break\n for param, value in params:\n if re.search(\"^%[0-9]+$\", value): # %1, %2, ... for placeholder\n if value in seen_from_params:\n value = seen_from_params[value]\n else:\n pagemsg(\"WARNING: Unmatched placeholder %s in replacement params\" % value)\n return False\n t.add(param, value, before=pn)\n return True\n\ndef process_text_on_page(index, pagename, text):\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagename, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagename, txt))\n\n pagemsg(\"Processing\")\n\n notes = []\n\n curtext = text + \"\\n\"\n\n def reformat_template(t, from_params, totemp, to_params, text_to_incorporate=None):\n # If from-template params given, make sure they all match.\n seen_from_params = {}\n for param, value in from_params:\n if re.search(\"^%[0-9]+$\", value): # %1, %2, ... for placeholder\n curval = getparam(t, param).strip()\n if not curval:\n pagemsg(\"Skipping template because expected param %s=%s doesn't have a value: %s\" % (\n param, value, str(t)))\n return False\n seen_from_params[value] = curval\n elif getparam(t, param).strip() != value.strip():\n pagemsg(\"Skipping template because expected param %s=%s doesn't match: %s\" % (\n param, value, str(t)))\n return False\n if text_to_incorporate is not None:\n for existing_param in [\"passage\", \"text\"]:\n if getparam(t, existing_param):\n pagemsg(\"WARNING: Can't incorporate raw passage text into {{%s}} because already has %s=: %s\" %\n (fromtemp, existing_param, str(t)))\n return False\n text_to_incorporate = re.sub(r\"\\s*
\\s*\", \" / \", text_to_incorporate)\n text_to_incorporate = re.sub(r\"^''(.*)''$\", r\"\\1\", text_to_incorporate)\n t.add(\"passage\", text_to_incorporate)\n blib.set_template_name(t, totemp)\n for param, value in from_params:\n rmparam(t, param)\n if to_params:\n if not add_params_to_template(t, to_params, seen_from_params, pagemsg):\n return False\n if text_to_incorporate is not None:\n msg_template = \"reformat {{%s%s}} into {{%s%s}}, incorporating following raw passage text into passage=\"\n else:\n msg_template = \"rename {{%s%s}} to {{%s%s}}\"\n notes.append(msg_template %\n (fromtemp, \"\".join(\"|%s=%s\" % (param, value) for param, value in from_params),\n totemp, \"\".join(\"|%s=%s\" % (param, value) for param, value in to_params)))\n return True\n\n for (fromtemp, from_params), (totemp, to_params) in templates_to_rename:\n def do_reformat_template(m):\n template, text = m.groups()\n parsed = blib.parse_text(template)\n t = list(parsed.filter_templates())[0]\n origtext = m.group(0)\n if tname(t) != fromtemp:\n return origtext\n if not reformat_template(t, from_params, totemp, to_params, text_to_incorporate=text):\n return origtext\n return str(t) + \"\\n\"\n\n curtext = re.sub(r\"(\\{\\{%s.*?\\}\\})\\n#+\\*:\\s*(.*?)\\n\" % re.escape(fromtemp),\n do_reformat_template, curtext)\n\n parsed = blib.parse_text(curtext)\n for t in parsed.filter_templates():\n tn = tname(t)\n for (fromtemp, from_params), (totemp, to_params) in templates_to_rename:\n if tn != fromtemp:\n continue\n if not reformat_template(t, from_params, totemp, to_params):\n continue\n curtext = str(parsed)\n\n return curtext.rstrip(\"\\n\"), notes\n\nparser = blib.create_argparser(\"Rename and reformat quotation templates for [[User:Sgconlaw]]\",\n include_pagefile=True, include_stdin=True)\nparser.add_argument(\"--direcfile\", help=\"File containing pairs of templates to rename (without the Template: prefix), separated by ' ||| '.\",\n required=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\ntemplates_to_rename = []\nfor lineno, line in blib.iter_items_from_file(args.direcfile):\n if \" ||| \" not in line:\n msg(\"Line %s: WARNING: Saw bad line in --from-to-pagefile: %s\" % (lineno, line))\n continue\n fromtemp, totemp = line.split(\" ||| \")\n if \"|\" in fromtemp:\n fromtemp, combined_params = fromtemp.split(\"|\", 1)\n combined_params = combined_params.split(\"|\")\n from_params = []\n for combined_param in combined_params:\n if \"=\" not in combined_param:\n raise ValueError(\"Param %s doesn't have an = sign\" % combined_param)\n param, value = combined_param.split(\"=\")\n from_params.append((param, value))\n else:\n from_params = []\n if \"|\" in totemp:\n totemp, combined_params = totemp.split(\"|\", 1)\n combined_params = combined_params.split(\"|\")\n to_params = []\n for combined_param in combined_params:\n if \"=\" not in combined_param:\n raise ValueError(\"Param %s doesn't have an = sign\" % combined_param)\n param, value = combined_param.split(\"=\")\n to_params.append((param, value))\n else:\n to_params = []\n templates_to_rename.append(((fromtemp, from_params), (totemp, to_params)))\nblib.do_pagefile_cats_refs(args, start, end, process_text_on_page,\n default_refs=[\"Template:%s\" % fromtemp for (fromtemp, from_params), (totemp, to_params) in templates_to_rename],\n edit=True, stdin=True, skip_ignorable_pages=True)\n","sub_path":"fix_sgconlaw_reformat_quotation_templates.py","file_name":"fix_sgconlaw_reformat_quotation_templates.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363060756","text":"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 16 12:56:37 2020\n\n@author: jraidal\n\"\"\"\n\n\nfrom datetime import datetime\nimport itertools\nimport numpy\nfrom numpy import arange\nfrom numpy import vstack\nfrom numpy import argmax\nfrom numpy import asarray\nfrom numpy.random import normal\nfrom numpy.random import uniform\nfrom scipy.stats import norm\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom warnings import catch_warnings\nfrom warnings import simplefilter\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom sklearn.gaussian_process.kernels import Matern, ConstantKernel, RBF, ExpSineSquared, RationalQuadratic, WhiteKernel\nimport sys\n\n\n# Define objective function\n\ndef functions(x, y, function):\n '''\n Defines and evaluates functions\n\n Parameters\n ----------\n x : float\n x-coordinate.\n y : float\n y-coordinate.\n function : str\n Either the name of one of the built in functions or a custom 2D function.\n\n Returns\n -------\n TYPE float\n Returns function value based on input x and y \\.\n\n '''\n if function == 'Beale':\n #Beale\n return (1.5-x+x*y)**2+(2.25-x+x*y**2)**2+(2.625-x+x*y**3)**2\n if function == 'Goldstein-Price':\n #Goldstein-Price\n return (1+(x+y+1)**2 * (19-14*x+3*x**2-14*y+6*x*y+3*y**2))*(30+(2*x-3*y)**2 * (18-32*x+12*x**2+48*y-36*x*y+27*y**2))\n if function == 'Rosenbrock':\n #Rosenbrock\n return ((1 - x)**2 + 100*(y - x**2)**2)\n if function == 'Ackley':\n #Ackley\n return -20*numpy.exp(-0.2*numpy.sqrt(0.5*(x**2 + y**2)))-numpy.exp(0.5*(numpy.cos(2*numpy.pi*x) + numpy.cos(2*numpy.pi*y)))+numpy.exp(1)+20\n else:\n return eval(function)\n\n\n# Surrogate model\ndef surrogate(model, XY): \n '''\n Predicts the mean and standard deviation of points using Gaussian processes\n\n Parameters\n ----------\n model : sklearn.gaussian_process\n Some Gaussian process model.\n XY : numpy array\n Array of x and y coordinates.\n\n Returns\n -------\n array, array\n Returns mean and standard deviation arrays for evaluated points.\n\n '''\n return model.predict(XY, return_std=True)\n\n# Maximum probability of improvement acquisition function\ndef acquisition(XY, x_bounds, y_bounds, e, model, max_min):\n '''\n Creates sample points and finds the one most likely to improve the function when\n evaluating.\n\n Parameters\n ----------\n XY : numpy array\n Array of all points evaluated so far.\n x_bounds : list\n Two element list of x-axis boundaries for the function.\n y_bounds : list\n Two element list of y-axis boundaries for the function.\n e : float\n Exploration parameter.\n model : sklearn.gaussian_process\n Some Gaussian process model.\n max_min : str\n Specifies whether the algorithm is searching for maxima or minima.\n\n Returns\n -------\n X_best : float\n x-coordinate of point with maximum probability of improvement.\n Y_best : float\n y-coordinate of point with maximum probability of improvement.\n\n '''\n # Unpack bounds\n x1, x2 = x_bounds\n y1, y2 = y_bounds\n \n # Find the best surrogate mean found so far\n z_surrogate, _ = surrogate(model, XY)\n if max_min == 'maximum':\n best = numpy.max(z_surrogate)\n if max_min == 'minimum':\n best = numpy.min(z_surrogate)\n \n # Create random sample points\n Xsamples = ([])\n Ysamples = ([])\n for i in range(100):\n a = uniform(x1,x2)\n Xsamples.append(a)\n b = uniform(y1,y2)\n Ysamples.append(b)\n Xsamples = numpy.array(Xsamples)\n Ysamples = numpy.array(Ysamples)\n XYsamples=numpy.vstack((Xsamples, Ysamples)).T\n \n # Find the mean and standard deviation of the sample points\n mu, std = surrogate(model, XYsamples)\n \n # Calculate the maximum probability of improvement\n r=(mu-best)\n c=(r)/(std+1e-9)\n with catch_warnings():\n # Ignore scaling warnings (not true)\n simplefilter(\"ignore\")\n c= preprocessing.scale(c) \n scores=norm.cdf(c - e)\n \n # Find point with best score\n if max_min == 'maximum':\n index_max = (numpy.argwhere(scores == numpy.max(scores)))\n if max_min == 'minimum':\n index_max = (numpy.argwhere(scores == numpy.min(scores)))\n \n ix_max = index_max[0,0]\n X_max, Y_max = XYsamples[ix_max]\n X_best = float(X_max)\n Y_best = float(Y_max)\n \n return X_best, Y_best\n\n# plot real observations\ndef plot(plot_func, x_bounds, y_bounds):\n '''\n Plots the function which is optimized\n\n Parameters\n ----------\n plot_func : function\n The function to be plotted.\n x_bounds : list\n Two element list of x-axis boundaries for the function.\n y_bounds : list\n Two element list of y-axis boundaries for the function.\n\n Returns\n -------\n None.\n\n '''\n #Unpack bounds\n x1, x2 = x_bounds\n y1, y2 = y_bounds\n \n #Plot the function that is optimized\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n Xsamples = numpy.linspace(x1, x2, 500)\n Ysamples = numpy.linspace(y1, y2, 500)\n X, Y = numpy.meshgrid(Xsamples, Ysamples)\n Z = plot_func(X, Y)\n ax.plot_surface(X,Y,Z, cmap = 'jet' )\n ax.view_init(45, 45)\n plt.show()\n\n\n# Set domain and evaluations per cycle\ndef initial_points(starting_n, opt_func, x_bounds, y_bounds):\n '''\n Picks and evaluates random initial points from the function\n\n Parameters\n ----------\n starting_n : int\n Number of initial points picked.\n opt_func : function\n The function from which the points are picked.\n x_bounds : list\n Two element list of x-axis boundaries for the function.\n y_bounds : list\n Two element list of y-axis boundaries for the function.\n\n Returns\n -------\n XY : numpy array\n x and y coordinates for the inital points.\n z : numpy array\n Function values for inital points.\n\n '''\n #Unpack bounds\n x1, x2 = x_bounds\n y1, y2 = y_bounds\n \n #Pick random points within bounds\n X = ([])\n for i in range(0, starting_n):\n a = uniform(x1, x2)\n X.append(a)\n Y = ([])\n for i in range(0, starting_n):\n b = uniform(y1, y2)\n Y.append(b)\n X=numpy.array(X)\n Y=numpy.array(Y)\n XY=numpy.vstack((X, Y)).T\n z= opt_func(X, Y)\n \n return XY, z\n\n \ndef fit_model(model, data_input, data_output):\n '''\n Fits new data to the model\n\n Parameters\n ----------\n model : sklearn.gaussian_process\n Some Gaussian process model.\n data_input : numpy array\n x and y coordinates to be fitted.\n data_output : numpy array\n Corresponding function values to the x and y coordinates.\n\n Returns\n -------\n None.\n\n '''\n model.fit(data_input, data_output)\n\n \ndef optimize(opt_func, aquisition_func, starting_n, x_bounds, y_bounds, iterations, e, model, max_min):\n '''\n \n\n Parameters\n ----------\n opt_func : function\n Function that is optimized.\n aquisition_func : function\n Function used to pick points to evaluate.\n starting_n : int\n Initial number of random points evaluated.\n x_bounds : list\n Two element list of x-axis boundaries for the function.\n y_bounds : list\n Two element list of y-axis boundaries for the function.\n iterations : int\n Number of times optimization is run.\n e : float\n Exploration parameter.\n model : sklearn.gaussian_process\n Some Gaussian process model.\n max_min : str\n Specifies whether the algorithm is searching for maxima or minima.\n\n Returns\n -------\n XY : numpy array\n Array of all points evaluated.\n z : numpy array\n Value of all points evaluated.\n\n '''\n #Unpack inital points\n XY, z = initial_points(starting_n, opt_func, x_bounds, y_bounds)\n # Perform the optimization process\n \n for i in range(iterations):\n # Select the next point to sample\n x, y, *rest= aquisition_func(XY, x_bounds, y_bounds, e, model, max_min)\n XYmingi = numpy.array(([x, y]))\n XYmingi = XYmingi.reshape(1, -1)\n \n # Sample the point\n actual = opt_func(x, y)\n \n # Show process\n print(f'{i+1}/{iterations} completed')\n print(f'Currently evaluating x=%.3f y=%.3f with value of z=%.4f' % (x, y, actual))\n \n # Add the data to the dataset\n XYnew_element = numpy.array(([x, y]))\n XY=numpy.vstack((XY, XYnew_element))\n z = list(z)\n z.append(actual)\n z = numpy.array(z)\n \n #Show current best result\n if max_min == 'maximum':\n z_best = numpy.max(z)\n if max_min == 'minimum':\n z_best = numpy.min(z)\n print(f'Current {max_min} found is z = {z_best}')\n \n # Update the model with new data\n fit_model(model, XY, z)\n \n return XY, z\n \ndef results(opt_function, acquisition, starting_n, x_bounds, y_bounds, iterations, e, model, max_min): \n '''\n Returns the results of the optimization process\n\n Parameters\n ----------\n opt_function : function\n Function that is optimized.\n aquisition_func : function\n Function used to pick points to evaluate.\n starting_n : int\n Initial number of random points evaluated.\n x_bounds : list\n Two element list of x-axis boundaries for the function.\n y_bounds : list\n Two element list of y-axis boundaries for the function.\n iterations : int\n Number of times optimization is run.\n e : float\n Exploration parameter.\n model : sklearn.gaussian_process\n Some Gaussian process model.\n max_min : str\n Specifies whether the algorithm is searching for maxima or minima.\n\n Returns\n -------\n x : float\n x-coordinate of minimum/maximum found.\n y : float\n y-coordinate of minimum/maximum found.\n z_best : float\n Value of the function at x, y coordinates found.\n\n '''\n #Start timer\n startTime = datetime.now()\n #Optimize and unpack XY and corresponding z\n XY, z = optimize(opt_function, acquisition, starting_n, x_bounds, y_bounds, iterations, e, model, max_min)\n # Find best result\n if max_min == 'maximum':\n index = (numpy.argwhere(z == numpy.max(z)))\n z_best = numpy.max(z)\n ix = index[0, 0]\n if max_min == 'minimum':\n index = (numpy.argwhere(z == numpy.min(z)))\n z_best = numpy.min(z)\n ix = index[0,0]\n x, y = XY[ix]\n \n #Unpack all XY and z evaluated and plot as scatter plot\n Xfinal=([])\n Yfinal=([])\n for x, y in XY:\n Xfinal.append(x)\n Yfinal.append(y)\n Xfinal=numpy.array(Xfinal)\n Yfinal=numpy.array(Yfinal)\n Z = z\n ax = plt.axes(projection='3d')\n ax.scatter(Xfinal, Yfinal, Z, linewidth=0.5)\n ax.view_init(45, 45)\n plt.show()\n \n #Print results and time taken\n print('')\n print('The %s found is at x=%f, y=%f with a value of z=%f' % (max_min, x, y, z_best))\n print('Time elapsed', datetime.now() - startTime)\n print('Time per iteration', (datetime.now() - startTime)/iterations)\n return x, y, z_best\n\ndef Bayesian2D(x_bounds, y_bounds, starting_n, iterations, max_min, exploration, function = 'Rosenbrock' ):\n '''\n Combines all the functions in the package to find the maximum/minimum of any 2D\n function specified.\n\n Parameters\n ----------\n x_bounds : list\n Two element list of x-axis boundaries for the function.\n y_bounds : list\n Two element list of y-axis boundaries for the function.\n starting_n : int\n Initial number of random points evaluated.\n iterations : int\n Number of times optimization is run.\n max_min : str\n Specifies whether the algorithm is searching for maxima or minima.\n exploration : float\n Exploration parameter.\n function : str, optional\n Either the name of one of the built in functions or a custom 2D function. \n The default is 'Rosenbrock'.\n\n Returns\n -------\n function\n Result function.\n\n '''\n def objective(x, y):\n '''\n Returns only the function to be optimized so that it needn't be \n specified each time'\n \n Parameters\n ----------\n x : float\n x-coordinate.\n y : float\n y-coordinate.\n \n Returns\n -------\n function\n The function to be optimized.\n \n '''\n return functions(x, y, function)\n #Randomize seed\n numpy.random.seed()\n \n #Set parameters for surrogate model and aquisition function\n e=exploration\n model = GaussianProcessRegressor(kernel= Matern(), alpha = 1e-10)\n #Create initial random set of points\n XY_initial, z_initial = initial_points(starting_n, objective, x_bounds, y_bounds)\n \n #fit initial points to model\n fit_model(model, XY_initial, z_initial)\n \n #Plot function before optimization\n plot(objective, x_bounds, y_bounds)\n \n #Find results\n \n return results(objective, acquisition, starting_n, x_bounds, y_bounds, iterations, e, model, max_min)\n\n","sub_path":"Bayesian2D/scripts/Bayesian2D_scr.py","file_name":"Bayesian2D_scr.py","file_ext":"py","file_size_in_byte":13285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64210283","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom glob import glob\nfrom pathlib import Path\nimport ipyvuetify as v\nfrom traitlets import HasTraits, Unicode, List, observe, link\n\nfrom functools import partial\nfrom .styles.styles import *\n\n\nclass SepalWidget(v.VuetifyWidget):\n \n def __init__(self, **kwargs):\n \n super().__init__(**kwargs)\n self.viz = True\n \n def toggle_viz(self):\n \"\"\"toogle the visibility of the widget\"\"\"\n if self.viz:\n self.hide()\n else:\n self.show()\n \n return self\n \n def hide(self):\n \"\"\"add the d-none html class to the widget\"\"\"\n if not 'd-none' in str(self.class_):\n self.class_ = str(self.class_).strip() + ' d-none'\n self.viz = False\n \n return self\n \n def show(self):\n \"\"\" remove the d-none html class to the widget\"\"\"\n if 'd-none' in str(self.class_):\n self.class_ = str(self.class_).replace('d-none', '')\n self.viz = True\n \n return self\n\nclass Alert(v.Alert, SepalWidget):\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \n self.children = ['']\n self.type = 'info'\n self.text = True\n self.clear()\n \n def add_msg(self, msg, type_='info'):\n self.show()\n self.type = type_\n self.children = [msg]\n \n def clear(self):\n self.hide()\n self.children = ['']\n\n\nclass Btn(v.Btn, SepalWidget):\n \"\"\"\n Creates a process button filled with the provided text\n \n Returns: \n btn (v.Btn) :\n \"\"\"\n \n\n def __init__(self, text='Button', icon=None, visible=True, **kwargs):\n super().__init__(**kwargs)\n self.color='primary'\n \n if icon:\n self.children=[self.set_icon(icon), text]\n else:\n self.children=[text]\n\n if not visible:\n self.hide()\n\n\n\n def set_icon(self, icon):\n\n common_icons = {\n 'default' : 'mdi-adjust',\n 'download' : 'mdi-download'\n }\n \n if icon in common_icons.keys():\n icon = common_icons[icon]\n \n return v.Icon(left=True, children=[icon]) \n\n def disable(self):\n self.disabled = True\n \n def activate(self):\n self.loading = False\n self.disabled = False\n \n def on_loading(self):\n self.loading = True\n\n\nclass VueDataFrame(v.VuetifyTemplate):\n \"\"\"\n Vuetify DataTable rendering of a pandas DataFrame\n \n Args:\n data (DataFrame) - the data to render\n title (str) - optional title\n \"\"\"\n\n from pandas import DataFrame\n headers = List([]).tag(sync=True, allow_null=True)\n items = List([]).tag(sync=True, allow_null=True)\n search = Unicode('').tag(sync=True)\n title = Unicode('DataFrame').tag(sync=True)\n index_col = Unicode('').tag(sync=True)\n template = Unicode('''\n \n ''').tag(sync=True)\n \n def __init__(self, *args, \n data=DataFrame(), \n title=None,\n **kwargs):\n super().__init__(*args, **kwargs)\n \n from json import loads\n data = data.reset_index()\n self.index_col = data.columns[0]\n headers = [{\n \"text\": col,\n \"value\": col\n } for col in data.columns]\n headers[0].update({'align': 'left', 'sortable': True})\n self.headers = headers\n self.items = loads(\n data.to_json(orient='records'))\n if title is not None:\n self.title = title\n\nclass FileInput(v.Flex, SepalWidget, HasTraits):\n\n file = Unicode('')\n \n def __init__(self, extentions = [], folder=os.path.expanduser('~'), label='search file', v_model = None, **kwargs):\n\n self.extentions = extentions\n self.folder = folder\n \n self.selected_file = v.TextField(\n readonly = True,\n label = 'Selected file', \n class_ = 'ml-5 mt-5',\n v_model = self.file\n )\n\n self.loading = v.ProgressLinear(\n indeterminate = False, \n background_color = 'grey darken-3',\n color = COMPONENTS['PROGRESS_BAR']['color']\n )\n \n self.file_list = v.List(\n dense = True, \n color = 'grey darken-3',\n flat = True,\n max_height = '300px',\n style_ = 'overflow: auto; border-radius: 0 0 0 0;',\n children = [ \n v.ListItemGroup(\n children = self._get_items(),\n v_model = ''\n )\n ]\n )\n\n self.file_menu = v.Menu(\n min_width = 300,\n children = [self.loading, self.file_list], \n close_on_content_click = False,\n v_slots = [{\n 'name': 'activator',\n 'variable': 'x',\n 'children': Btn(icon='mdi-file-search', v_model=False, v_on='x.on', text=label)\n }])\n \n self.reload = v.Btn(\n icon = True,\n color = 'primary',\n children = [v.Icon(children=['mdi-cached'])]\n )\n \n super().__init__(\n row = True,\n class_ = 'd-flex align-center mb-2',\n align_center = True,\n children = [\n self.reload,\n self.file_menu,\n self.selected_file,\n ],\n **kwargs\n )\n \n link((self.selected_file, 'v_model'), (self, 'file'))\n link((self.selected_file, 'v_model'), (self, 'v_model'))\n\n self.file_list.children[0].observe(self._on_file_select, 'v_model')\n self.reload.on_event('click', self._on_reload)\n \n def _on_file_select(self, change):\n new_value = change['new']\n if new_value:\n if os.path.isdir(new_value):\n self.folder = new_value\n self._change_folder()\n \n elif os.path.isfile(new_value):\n self.file = new_value\n \n return\n \n def _change_folder(self):\n \"\"\"change the target folder\"\"\"\n #reset files\n self.file_list.children[0].children = self._get_items()\n \n\n def _get_items(self):\n \"\"\"return the list of items inside the folder\"\"\"\n\n self.loading.indeterminate = not self.loading.indeterminate\n \n folder = Path(self.folder)\n\n list_dir = [el for el in folder.glob('*/') if not el.name.startswith('.')]\n\n if self.extentions:\n list_dir = [el for el in list_dir if el.is_dir() or el.suffix in self.extentions]\n\n folder_list = []\n file_list = []\n\n for el in list_dir:\n \n if el.suffix in ICON_TYPES.keys():\n icon = ICON_TYPES[el.suffix]['icon']\n color = ICON_TYPES[el.suffix]['color']\n else:\n icon = ICON_TYPES['DEFAULT']['icon']\n color = ICON_TYPES['DEFAULT']['color']\n \n children = [\n v.ListItemAction(children=[v.Icon(color= color,children=[icon])]),\n v.ListItemContent(children=[v.ListItemTitle(children=[el.stem + el.suffix])]),\n ] \n\n if el.is_dir():\n folder_list.append(v.ListItem(value=str(el), children=children))\n else:\n file_size = str(round(Path(el).stat().st_size/(1024*1024),2)) + ' MB'\n children.append(v.ListItemActionText(children=[file_size]))\n file_list.append(v.ListItem(value=str(el), children=children))\n\n folder_list = sorted(folder_list, key=lambda x: x.value)\n file_list = sorted(file_list, key=lambda x: x.value)\n\n parent_path = str(folder.parent)\n parent_item = v.ListItem(\n value=parent_path, \n children=[\n v.ListItemAction(children=[v.Icon(color=ICON_TYPES['PARENT']['color'], children=[ICON_TYPES['PARENT']['icon']])]),\n v.ListItemContent(children=[v.ListItemTitle(children=[f'..{parent_path}'])]),\n ]\n )\n\n folder_list.extend(file_list)\n folder_list.insert(0,parent_item)\n\n self.loading.indeterminate = not self.loading.indeterminate\n \n return folder_list\n \n def _on_reload(self, widget, event, data):\n \n # force the update of the current folder\n self._change_folder()\n \n return","sub_path":"sepal_ui/sepalwidgets.py","file_name":"sepalwidgets.py","file_ext":"py","file_size_in_byte":10194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331848110","text":"HANGMANPICS = ['''\n +---+\n | |\n |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\ndef display_board(missed_letters, correct_letters, secret_word):\n print(HANGMANPICS[len(missed_letters)])\n print()\n print('Missed letters:', end=' ')\n for letter in missed_letters:\n print(letter, end=' ')\n print()\n blanks = '_' * len(secret_word)\n for i in range(len(secret_word)): # replace blanks with correctly guessed letters\n if secret_word[i] in correct_letters:\n blanks = blanks[:i] + secret_word[i] + blanks[i + 1:]\n for letter in blanks: # show the secret word with spaces in between each letter\n print(letter, end=' ')\n print()\n","sub_path":"graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168669650","text":"# -*- coding: utf-8 -*-\n\n# # TensorFlow 모델의 복원\n\nimport tensorflow as tf\n\n# 1. 그래프(네트워크 생성)\n# - .meta 파일의 복원\n# - tf.train.import() 함수를 이용\n# - tf.reset_default_graph() 함수를 사용하여 \n# 기존의 그래프를 초기화하는 것이 안전함!!! ************\ntf.reset_default_graph()\n\nsaver = tf.train.import_meta_graph('../save/sessModel.meta')\n\n# 2. 파라메터 복원\n# - tf.train.Saver()를 이용해서 저장된 모든 파라메터를 복원\nwith tf.Session() as sess: \n saver.restore(sess, \n tf.train.latest_checkpoint('../save/'))\n print(sess.run('w1:0'))\n print(sess.run('w2:0'))\n\n\n\n\n\n","sub_path":"dev/20190502_ml_teacher/3_tensorflow/3_save_restore/tf_29_restore_01.py","file_name":"tf_29_restore_01.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79975485","text":"import configparser\r\nimport requests\r\nimport json\r\ndef getkey():\r\n\tconfig = configparser.ConfigParser()\r\n\tconfig.read(\"config.ini\")\r\n\treturn config[\"openweathermap\"][\"api\"]\r\ndef getdata(apikey):\r\n\tdata = []\r\n\turl = \"http://api.openweathermap.org/data/2.5/weather?q=London,gb&mode=json&units=metric&appid=\"+str(apikey)\r\n\tdata = requests.get(url)\r\n\tobj1 = data.json()\r\n\t#id1 = obj['id']\r\n\turl = \"http://api.openweathermap.org/data/2.5/weather?q=Chennai,in&mode=json&units=metric&appid=\"+str(apikey)\r\n\tdata = requests.get(url)\r\n\tobj2 = data.json()\r\n\t#id2 = obj['id']\r\n\tprint('Temperature difference between '+str(obj2.get('name'))+ ' and ' + str(obj1.get('name'))+':'+str(obj2.get('main').get('temp')-obj1.get('main').get('temp')))\r\n\t\r\napikey = getkey()\r\ngetdata(apikey)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#response = requests.get(\"http://api.openweathermap.org/data/2.5/weather?id=\" + str(id) +\"&mode=json&units=metric&APPID=\"+APIKEY)\r\n#print(response.content)","sub_path":"December-16/py_ajaykrishnan23.py","file_name":"py_ajaykrishnan23.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"390298893","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.postgres.fields import JSONField\n\n\nclass Musician(models.Model):\n \"\"\"\n Represent Musician.\n \"\"\"\n\n BEGINNER, INTERMEDIATE, ADVANCED = 'beginner', 'intermediate', 'advanced'\n\n DEGREES = (\n (BEGINNER, _('Beginner')),\n (INTERMEDIATE, _('Intermediate')),\n (ADVANCED, _('Advanced'))\n )\n\n # It's redundant to set null=True on CharField.\n # There will be saved empty string.\n # Blank=True for Form only.\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n # One required case to use null=True, when we have the second (blank=True, null=True).\n # To avoid unique constraint violations.\n pseudo = models.CharField(max_length=30, unique=True, blank=True, null=True)\n # If default is not specified empty string will be saved.\n # Readable format also available thought get_degree_display() method.\n degree = models.CharField(max_length=20, choices=DEGREES)\n # We can build custom index for searching.\n # Selectivity is attitude of filtered data to all records in specific table.\n # Low selectivity -> 1, high selectivity -> 0.\n # If selectivity is High, build DB INDEX is appropriate way to optimization.\n # Set default for JSONField better way use callable dict() or some functions,\n # that will return dict. Incorrectly use simple empty {}.\n json_data = JSONField(default=dict())\n # auto_now_add=True for objects creating.\n created_at = models.DateField(auto_now_add=True)\n # auto_now=True for objects modifying.\n modified_at = models.DateField(auto_now=True)\n\n\n# Many-to-One relationship (with Foreignkey)\n# M2M\n\nclass Reporter(models.Model):\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n email = models.EmailField()\n\n def __str__(self):\n return \"%s %s\" % (self.first_name, self.last_name)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n pub_date = models.DateField()\n reporter = models.ForeignKey('Reporter', on_delete=models.CASCADE)\n publications = models.ManyToManyField('Publication', related_name='articles')\n\n def __str__(self):\n return self.headline\n\n class Meta:\n ordering = ('headline',)\n\n # We can retrieve all publications - (self.publications.all())\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ('title',)\n\n # We can retrieve all articles by - (self.articles.all())\n","sub_path":"apps/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"105445474","text":"def handle(self, request):\n user = auth.get_pending_2fa_user(request)\n if (user is None):\n return HttpResponseRedirect(auth.get_login_url())\n interfaces = Authenticator.objects.all_interfaces_for_user(user)\n if (not interfaces):\n return self.perform_signin(request, user)\n challenge = activation = None\n interface = self.negotiate_interface(request, interfaces)\n if ((request.method == 'POST') and ratelimiter.is_limited('auth-2fa:user:{}'.format(user.id), limit=5, window=60)):\n return HttpResponse('You have made too many 2FA attempts. Please try again later.', content_type='text/plain', status=429)\n if (request.method == 'GET'):\n activation = interface.activate(request)\n if ((activation is not None) and (activation.type == 'challenge')):\n challenge = activation.challenge\n elif ('challenge' in request.POST):\n challenge = json.loads(request.POST['challenge'])\n form = TwoFactorForm()\n otp = request.POST.get('otp')\n if otp:\n used_interface = self.validate_otp(otp, interface, interfaces)\n if (used_interface is not None):\n return self.perform_signin(request, user, used_interface)\n self.fail_signin(request, user, form)\n if challenge:\n response = request.POST.get('response')\n if response:\n response = json.loads(response)\n if interface.validate_response(request, challenge, response):\n return self.perform_signin(request, user, interface)\n self.fail_signin(request, user, form)\n return render_to_response([('sentry/twofactor_%s.html' % interface.interface_id), 'sentry/twofactor.html'], {\n 'form': form,\n 'interface': interface,\n 'other_interfaces': self.get_other_interfaces(interface, interfaces),\n 'activation': activation,\n }, request, status=200)","sub_path":"Data Set/bug-fixing-5/f401cf91b885e22fc36a74d2dba4ce4a6b5a6197--fix.py","file_name":"f401cf91b885e22fc36a74d2dba4ce4a6b5a6197--fix.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"9647751","text":"\n# coding: utf-8\n\n# # Import Libraries\n# \n\n# In[12]:\n\n\nfrom os import listdir\nfrom os.path import isfile , join\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom pathlib import Path\nimport glob\nimport cv2\n\n\n# # A function to show RGB images\n# \n# \n\n# In[13]:\n\n\ndef view( image ):\n plt.figure(figsize=(10,20))\n plt.imshow( image )\n\n\n# ## 1. Load Images and show them\n\n# In[14]:\n\n\nimages_files = [ join(\"./images\" , f) for f in listdir(\"images\") if isfile(join(\"images\" , f)) ]\n\nimages = [ mpimg.imread( f ) for f in images_files ]\ncolor = ('b','g','r')\nimageList = list(images)\n[view (x) for x in imageList ]\n\n\n# ## 2. Make Histogram of all three color channels for each image\n\n# In[15]:\n\n\nfor x in range(len (imageList )):\n for i,col in enumerate(color): \n histr = cv2.calcHist([imageList[x]],[i],None,[256],[0,256])\n plt.plot(histr,color = col)\n plt.xlim([0,256]) \n \n plt.show()\n\n\n# # Make a function for mouse event\n\n# In[18]:\n\n\ndef mouse_event(event, x, y, flags, param):\n cv2.EVENT_LBUTTONDOWN\n # Clear Screen\n img = cv2.imread(path,-1)\n win=cv2.rectangle(img,(x-13,y-13),(x+13,y+13),(100,50,255),0)\t\n position = \"x,y: (\"+str(x)+\",\"+str(y)+\")\"\n RGB= \"RGB :\"+str(img[y,x])\n avg=\"mean: \"+str(np.mean(win))\n var=\"variance: \"+str(np.std(win))\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img,position, (x-20,y-40),font, 0.6, (230,55,150), 1, cv2.LINE_AA)\n cv2.putText(img,RGB, (x-20,y-20),font, 0.6, (255,25,0), 1, cv2.LINE_AA)\n cv2.putText(img,avg, (x-20,y-60),font, 0.6, (50,0,50), 1, cv2.LINE_AA)\n cv2.putText(img,var, (x-20,y-80),font, 0.6, (50,0,50), 1, cv2.LINE_AA)\n cv2.imshow('original', img)\t\n\n\n# ## 1.Load some-pigeon image to try mouse event\n\n# In[19]:\n\n\npath='./images/some-pigeon.jpg'\nimg = cv2.imread(path)\ncv2.imshow('original', img)\ncv2.setMouseCallback(\"original\", mouse_event)\n\ncv2.waitKey(0)\n\ncv2.destroyAllWindows\n\n\n# ### Homogeneous because of small changes in values of variance \n\n# # Gradient\n\n# ## 1.Gradient using for loops\n\n# In[26]:\n\n\npath='./images/some-pigeon.jpg'\nimg = cv2.imread(path)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngx=[img.shape[0],img.shape[1]]\ngy=[img.shape[0],img.shape[1]]\n\nfor i in range (img.shape[0]):\n for j in range (img.shape[1]):\n gy=img[i][j]-img[i][j-1]\n gx=img[i][j]-img[i-1][j] \n #print (gx) \n gx2=np.power(gx, 2) \n #print(gx2)\n #print (gy) \n gy2=np.power(gy, 2) \n #print(gy2)\n g=np.sqrt(gx2+gy2)\n print(g)\n\n\n# ## 2.Gradient without for loop\n\n# ### Function for gray scale image\n\n# In[20]:\n\n\ndef rgb2gray(rgb_image):\n return np.dot(rgb_image[...,:3], [0.299, 0.587, 0.114])\n\n\n# ### Function for multiview images\n\n# In[21]:\n\n\ndef multi_view( images ):\n images_count = len( images )\n fig = plt.figure(figsize=(10,20))\n for row in range( images_count ):\n ax1 = fig.add_subplot( images_count , 1 , row + 1) \n ax1.imshow( images[ row ] )\n\n\n# In[24]:\n\n\npath='./images/Pyramids2.jpg'\nimg = cv2.imread(path)\ngray_image = rgb2gray( img ) \ngray_image_v = np.roll( gray_image , 1 , 0 )\ngray_image_h = np.roll( gray_image , 1 , 1 )\n\ngray_image_gv = np.abs( gray_image - gray_image_v )\ngray_image_gh = np.abs( gray_image - gray_image_h )\n\ngray_image_gv2= np.power (gray_image_gv, 2 )\ngray_image_gh2= np.power (gray_image_gh, 2 )\n\ngradient= np.sqrt( gray_image_gv2 +gray_image_gh2 )\n\ncomb = tuple( (gray_image , gray_image_gv, gray_image_gh, gradient) )\n\nmulti_view( comb )\nprint \n\n\n# ### Time execution of for loops is longer than with out using them\n","sub_path":"Part1.py","file_name":"Part1.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321708106","text":"# coding: utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nimport time\nfrom .config import *\nfrom .utils import *\nfrom .uer.utils.tokenizer import BertTokenizer\nfrom .uer.utils.vocab import Vocab\nfrom .uer.model_builder import build_model\nfrom .uer.utils.optimizers import AdamW, WarmupLinearSchedule\nfrom .uer.layers.multi_headed_attn import MultiHeadedAttention\nfrom .uer.model_saver import save_model\nfrom .uer.model_loader import load_model\nfrom .fastbert import FastBERT, FastBERT_S2, BertMiniClassifier\nfrom .fastgpt import GptMiniClassifier\n\n\ndef clip_gradient(model,\n clip_value):\n params = list(filter(lambda p: p.grad is not None, model.parameters()))\n for p in params:\n p.grad.data.clamp_(-clip_value, clip_value)\n\n\nclass GcnnMiniClassifier(BertMiniClassifier):\n\n def __init__(self,\n args,\n input_size,\n labels_num):\n super(GcnnMiniClassifier, self).__init__(args, input_size, labels_num)\n\n\nclass FastGCNN(FastBERT):\n\n MiniClassifier = GcnnMiniClassifier\n\n def __init__(self,\n kernel_name,\n labels,\n **kwargs):\n super(FastGCNN, self).__init__(kernel_name, labels, **kwargs)\n assert self.args.encoder == 'gatedcnn', 'encoder in args must be gatedcnn.'\n\n def _forward_for_loss(self,\n sentences_batch,\n labels_batch=None):\n\n self.train()\n ids_batch, masks_batch = [], []\n for sentence in sentences_batch:\n ids, masks = self._convert_to_id_and_mask(sentence)\n ids_batch.append(ids)\n masks_batch.append(masks)\n ids_batch = torch.tensor(ids_batch, dtype=torch.int64, device=self.args.device) # batch_size x seq_length\n masks_batch = torch.tensor(masks_batch, dtype=torch.int64, device=self.args.device) # batch_size x seq_length\n\n # embedding layer\n embs_batch = self.kernel.embedding(ids_batch, masks_batch) # batch_size x seq_length x emb_size\n batch_size, seq_length, emb_size = embs_batch.size()\n masks_batch = self._mask_transfer(masks_batch, embs_batch) # batch_size x seq_length x seq_length\n\n # gcnn encoder layer\n res_input_batch = torch.transpose(embs_batch.unsqueeze(3), 1, 2)\n padding_batch = torch.zeros([batch_size, self.args.kernel_size-1, \\\n emb_size]).to(embs_batch.device)\n embs_batch = torch.cat([padding_batch, embs_batch], dim=1).unsqueeze(1) # batch_size, 1, seq_length+width-1, emb_size\n\n hidden_batch = self.kernel.encoder.conv_1(embs_batch)\n gate_batch = self.kernel.encoder.gate_1(embs_batch)\n hidden_batch = hidden_batch * torch.sigmoid(gate_batch)\n\n padding_batch = torch.zeros([batch_size, self.args.hidden_size, \\\n self.args.kernel_size-1, 1]).to(embs_batch.device)\n hidden_batch = torch.cat([padding_batch, hidden_batch], dim=2)\n\n teacher_idx = self.kernel.encoder.layers_num - 2\n if labels_batch is not None:\n # training backbone of fastgcnn\n\n label_ids_batch = [self.label_map[label] for label in labels_batch]\n label_ids_batch = torch.tensor(label_ids_batch, dtype=torch.int64,\n device=self.args.device)\n\n for i in range(self.kernel.encoder.layers_num - 1):\n hidden_tmp_batch = self.kernel.encoder.conv[i](hidden_batch)\n gate_batch = self.kernel.encoder.gate[i](hidden_batch)\n hidden_batch = hidden_tmp_batch * torch.sigmoid(gate_batch)\n if (i+1) % self.args.block_size:\n if i != 0: # different to UER\n hidden_batch = hidden_batch + res_input_batch\n res_input_batch = hidden_batch\n hidden_batch = torch.cat([padding_batch, hidden_batch], dim=2)\n\n hidden_batch = hidden_batch[:,:,self.args.kernel_size-1:,:]\n output_batch = hidden_batch.transpose(1,2).contiguous().\\\n view(batch_size, seq_length, self.args.hidden_size)\n logits_batch = self.classifiers[teacher_idx](output_batch, masks_batch)\n loss = self.criterion(\n self.softmax(logits_batch.view(-1, self.labels_num)),\n label_ids_batch.view(-1))\n\n return loss\n\n else:\n\n # distilating the student classifiers\n hidden_batch_list = []\n with torch.no_grad():\n for i in range(self.kernel.encoder.layers_num - 1):\n hidden_tmp_batch = self.kernel.encoder.conv[i](hidden_batch)\n gate_batch = self.kernel.encoder.gate[i](hidden_batch)\n hidden_batch = hidden_tmp_batch * torch.sigmoid(gate_batch)\n if (i+1) % self.args.block_size:\n if i != 0:\n hidden_batch = hidden_batch + res_input_batch\n res_input_batch = hidden_batch\n hidden_batch = torch.cat([padding_batch, hidden_batch], dim=2)\n\n output_batch = hidden_batch[:,:,self.args.kernel_size-1:,:]\n output_batch = output_batch.transpose(1, 2).contiguous().\\\n view(batch_size, seq_length, self.args.hidden_size)\n hidden_batch_list.append(output_batch)\n\n teacher_logits = self.classifiers[teacher_idx](\n hidden_batch_list[teacher_idx], masks_batch\n ).view(-1, self.labels_num)\n teacher_probs = F.softmax(teacher_logits, dim=1)\n\n loss = 0\n for i in range(self.kernel.encoder.layers_num - 2):\n student_logits = self.classifiers[i](\n hidden_batch_list[i], masks_batch\n ).view(-1, self.labels_num)\n loss += self.soft_criterion(\n self.softmax(student_logits), teacher_probs)\n return loss\n\n def _fast_infer(self,\n sentence,\n speed):\n\n ids, mask = self._convert_to_id_and_mask(sentence)\n\n self.eval()\n with torch.no_grad():\n ids = torch.tensor([ids], dtype=torch.int64, device=self.args.device) # batch_size x seq_length\n mask = torch.tensor([mask], dtype=torch.int64, device=self.args.device) # batch_size x seq_length\n\n # embedding layer\n emb = self.kernel.embedding(ids, mask) # batch_size x seq_length x emb_size\n batch_size, seq_length, emb_size = emb.size()\n mask = self._mask_transfer(mask, emb) # batch_size x seq_length x seq_length\n\n # gcnn encoder layer\n res_input = torch.transpose(emb.unsqueeze(3), 1, 2)\n padding = torch.zeros([batch_size, self.args.kernel_size-1, \\\n emb_size]).to(emb.device)\n emb = torch.cat([padding, emb], dim=1).unsqueeze(1)\n\n hidden = self.kernel.encoder.conv_1(emb)\n gate = self.kernel.encoder.gate_1(emb)\n hidden = hidden * torch.sigmoid(gate)\n\n padding = torch.zeros([batch_size, self.args.hidden_size, \\\n self.args.kernel_size-1, 1]).to(emb.device)\n hidden = torch.cat([padding, hidden], dim=2)\n\n teacher_idx = self.kernel.encoder.layers_num - 2 \n exec_layer_num = self.kernel.encoder.layers_num\n for i in range(self.kernel.encoder.layers_num - 1):\n hidden_tmp = self.kernel.encoder.conv[i](hidden)\n gate = self.kernel.encoder.gate[i](hidden)\n hidden = hidden_tmp * torch.sigmoid(gate)\n if (i+1) % self.args.block_size:\n if i != 0:\n hidden = hidden + res_input\n res_input = hidden\n hidden = torch.cat([padding, hidden], dim=2)\n\n output = hidden[:, :, self.args.kernel_size-1:, :]\n output = output.transpose(1, 2).contiguous().\\\n view(batch_size, seq_length, self.args.hidden_size)\n student_logits = self.classifiers[i](\n output, mask).view(-1, self.labels_num)\n student_probs = F.softmax(student_logits, dim=1)\n uncertainty = calc_uncertainty(student_probs, labels_num=self.labels_num).item()\n \n if uncertainty < speed:\n exec_layer_num = i + 2 # not i + 1\n break\n\n label_id = torch.argmax(student_probs, dim=1).item()\n return label_id, exec_layer_num\n\n def _fine_tuning_backbone(self,\n sentences_train,\n labels_train,\n sentences_dev,\n labels_dev,\n batch_size,\n learning_rate,\n epochs_num,\n warmup,\n report_steps,\n model_saving_path,\n training_sample_rate,\n verbose=True):\n\n if verbose:\n self._print(\"Fine-tuning the backbone for {} epochs using {}.\". \\\n format(epochs_num, self.args.device))\n\n instances_num = len(sentences_train)\n dev_num = len(sentences_dev)\n train_steps = int(instances_num * epochs_num / batch_size) + 1\n steps_num = instances_num // batch_size\n\n # create optimizer\n param_optimizer = list(self.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer \\\n if not any(nd in n for nd in no_decay)], \\\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer \\\n if any(nd in n for nd in no_decay)], \\\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, \\\n correct_bias=False)\n # optimizer = torch.optim.SGD(optimizer_grouped_parameters, \\\n # lr=learning_rate, momentum=0.99, nesterov=True)\n # optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=learning_rate)\n scheduler = WarmupLinearSchedule(optimizer, \\\n warmup_steps=train_steps*warmup, t_total=train_steps)\n\n # fine-tuning\n best_acc = 0.0\n for epoch in range(epochs_num):\n sentences_train, labels_train = shuffle_pairs(\n sentences_train, labels_train)\n report_loss = 0.\n for step in range(steps_num):\n optimizer.zero_grad()\n sentences_batch = sentences_train[step*batch_size : (step+1)*batch_size]\n labels_batch = labels_train[step*batch_size : (step+1)*batch_size]\n loss = self._forward_for_loss(sentences_batch, labels_batch)\n\n report_loss += loss.item()\n if (step+1) % report_steps == 0:\n ave_loss = report_loss / report_steps\n report_loss = 0.\n if verbose:\n self._print(\"Fine-tuning epoch {}/{}\".\\\n format(epoch+1, epochs_num),\n \"step {}/{}: loss = {:.3f}\". \\\n format(step+1, steps_num, ave_loss))\n\n loss.backward()\n # clip_gradient(self, 1e-1) # prevent gradient explosion\n optimizer.step()\n scheduler.step()\n\n dev_acc, _ = self._evaluate(sentences_dev, labels_dev, speed=0.0) \\\n if dev_num > 0 else (0.0, 0.0)\n train_acc, _ = self._evaluate(sentences_train, labels_train,\n speed=0.0, sample_rate=training_sample_rate)\n if verbose:\n self._print(\"Evaluating at fine-tuning epoch {}/{}\".\\\n format(epoch+1, epochs_num),\n \": train_acc = {:.3f}, dev_acc = {:.3f}\". \\\n format(train_acc, dev_acc))\n\n if dev_num > 0:\n if dev_acc >= best_acc:\n # saving model\n if verbose:\n self._print(\"dev_acc ({}) > best_acc ({}),\".\\\n format(dev_acc, best_acc),\n \"saving model to {}.\".\\\n format(model_saving_path))\n save_model(self, model_saving_path)\n best_acc = dev_acc\n else:\n if train_acc >= best_acc:\n if verbose:\n self._print(\"train_acc ({}) > best_acc ({}),\".\\\n format(train_acc, best_acc),\n \"saving model to {}.\".\\\n format(model_saving_path))\n save_model(self, model_saving_path)\n best_acc = train_acc\n\n # loading the best model\n if verbose:\n self._print(\"Finish fine-tuning. Loading the best model from {}\".\\\n format(model_saving_path))\n load_model(self, model_saving_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pypi/fastbert/fastgcnn.py","file_name":"fastgcnn.py","file_ext":"py","file_size_in_byte":13511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160308822","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pandas\nimport numpy as np\n\n\nfile = 'titanic.csv'\nfile_col = 'PassengerId'\nglobal count_of_victims\n\n\ndef parse_date(file, file_col):\n data = pandas.read_csv(file, index_col=file_col)\n return data\n\n\ndef parse_name(row):\n if '(' in row:\n name = row.split('(')[1]\n if 'Mrs.' in row.split()[2] or 'Miss.' in row.split()[2]:\n name = row.split()[1]\n name = name.split()[0]\n name = name.strip('\"()')\n\n # print(name)\n elif 'Mrs.' in row.split()[2] or 'Miss.' in row.split()[2]:\n name = row.split()[3]\n elif 'Mr.' in row.split()[2]:\n name = row.split()[3]\n else:\n name = row.split()[2]\n name = name.strip('\"()')\n return name\n\n\ndef count_of_genders(col):\n count_of_man, count_of_women = 0, 0\n for i in col:\n if i == 'male':\n count_of_man += 1\n else:\n count_of_women += 1\n print(count_of_man, count_of_women)\n # print('count of man:', count_of_man, '\\ncount of women:', count_of_women)\n global count_of_victims\n count_of_victims = count_of_man + count_of_women\n\n\ndef part_of_survived(col):\n count_of_survs = 0\n for i in col:\n if i == 1:\n count_of_survs += 1\n # print('part of survived:', round(\n # count_of_survs / count_of_victims * 100, 2), '%')\n print(round(\n count_of_survs / count_of_victims * 100, 2))\n\n\ndef part_of_firstclass(col):\n count_of_firstclass = 0\n for i in col:\n if i == 1:\n count_of_firstclass = +1\n # print('part of passengers of first class:', round(\n # count_of_firstclass / count_of_victims * 100, 2), '%')\n print(round(\n count_of_firstclass / count_of_victims * 100, 2))\n\n\ndef counts_of_age(col):\n total_age = 0\n count_nan = 0\n for i in col:\n if pandas.isnull(i):\n total_age += 0\n count_nan = +1\n else:\n total_age += i\n age_lst = col.dropna().tolist()\n age_lst.sort()\n # print('average age:', round(total_age / (count_of_victims - count_nan)))\n if (len(age_lst) % 2) == 0:\n median = (age_lst[int(len(age_lst) / 2)] +\n age_lst[int(len(age_lst) / 2) + 1]) / 2\n else:\n median = age_lst[int(len(age_lst) / 2)]\n # print('median of age', median)\n print(round(total_age / (count_of_victims - count_nan)), median)\n\n\ndef correlatton_of_pierse(col1, col2):\n total_x, total_y, total_dx2, total_dy2, total_mul_dx_dy = 0, 0, 0, 0, 0\n d_x, d_y, d_x_2, d_y_2 = [], [], [], []\n for i in col1:\n total_x += i\n for i in col2:\n total_y += i\n mx = total_x / len(col1)\n my = total_y / len(col2)\n for i in col1:\n d_x.append(i - mx)\n d_x_2.append((i - mx)**2)\n total_dx2 += ((i - mx)**2)\n for i in col2:\n d_y.append(i - my)\n d_y_2.append((i - my)**2)\n total_dy2 += ((i - my)**2)\n for i in range(len(d_x)):\n total_mul_dx_dy += (d_x[i] * d_y[i])\n rxy = total_mul_dx_dy / (total_dx2 * total_dy2)**0.5\n # print('correlation of pierse', round(rxy, 2))\n print(round(rxy, 2))\n\n\ndef dic_sort(dic):\n for key in sorted(dic, key=lambda x: dic[x][1]):\n print('key', key, 'value', dic[key])\n\n\ndef get_key(dic, value):\n for k, v in dic.items():\n if v == value:\n return k\n\n\ndef adding_to_dic(dic, name):\n if name not in dic:\n dic.update({name: 1})\n else:\n count = dic.get(name)\n count += 1\n dic.update({name: count})\n\n\ndef max_value_of_dic(dic):\n max_value = 1\n for i in dic:\n if max_value < dic[i]:\n max_value = dic[i]\n return max_value\n\n\ndef what_is_popular_name(col1, col2):\n dic_of_women_names = {}\n count = 0\n for k, i in enumerate(col1):\n count += 1\n if col2[count] == 'female':\n current_name = parse_name(i)\n adding_to_dic(dic_of_women_names, current_name)\n print('popular women names:')\n for i in range(1, 4):\n max_value = max_value_of_dic(dic_of_women_names)\n top = get_key(dic_of_women_names, max_value)\n print(top, dic_of_women_names[top])\n dic_of_women_names.pop(top)\n\n\ndef main():\n data = parse_date(file, file_col)\n data = data[['Pclass', 'Fare', 'Age', 'Sex']]\n print(data[:10])\n # data.apply((lambda x: x if not np.isnan(x)))\n data.dropna(data['Age'])\n print(data[:10])\n # print(round(len((data['Pclass'] == 1) & (data['Survived'] == 1)), 2))\n # print(data['Pclass'] == 1)\n # print(len((data[data['Pclass'] == 1])) / len(data))\n # print(data['Age'].mean())\n # print(data['Age'].median())\n # analyze_em(data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week1/tree/titanic_tree.py","file_name":"titanic_tree.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"151959017","text":"#encoding=utf-8\n\nimport re\n\nret = re.match(\"\\w*\",\"HHAHawaswHAHHAHaaaaaakkkkkk\")\nif ret != None:\n print(ret.group())\n\n\n\nret = re.match(\"[a-zA-Z0-9_]{6,100}\",\"wdwdwdwadw121213131313\")\nif ret != None:\n print(ret.group())\n\n\nret = re.match(\"[a-zA-Z_]+[\\w]*\",\"naennennen_2_wdwa\")\nif ret != None:\n print(ret.group())\n\n\n\nret = re.match(\"[1-9]?[0-9]\",\"120\")\nif ret != None:\n print(ret.group())\n\n\nret = re.match(\"[\\w]{4,20}@163\\.com$\",\"wudezhi@163.com\")\nif ret != None:\n print(ret.group())\n\n\nret = re.match(\"\\w{4,20}@(163|126|qq)\\.com\",\"494712390@qq.com\")\nif ret != None:\n print(ret.group())\n\nret = re.match(\"<[a-zA-Z]*>\\w*<[a-zA-Z]*>\",\"hahhah\")\nif ret != None:\n print(ret.group())\n\n\nret = re.findall(r\"\\d+\",\"python = 9999, c = 7890, c++ = 12345\")\nif ret != None:\n print(ret)\n print(ret[0])\n\n\nret = re.sub(r\"\\d+\",\"mySico\",\"python === 123\")\nif ret != None:\n print(ret)\n\n\ns = \"
\" \\\n \"

岗位职责:

\" \\\n \"

完成推荐算法、数据统计、接口、后台等服务器端相关工作

\" \\\n \"


\" \\\n \"

必备要求:

\" \\\n \"

良好的自我驱动力和职业素养,工作积极主动、结果导向

\" \\\n \"

 

\" \\\n \"

技术要求:

\" \\\n \"

1、一年以上 Python 开发经验,掌握面向对象分析和设计,了解设计模式

\" \\\n \"

2、掌握HTTP协议,熟悉MVC、MVVM等概念以及相关WEB开发框架

\" \\\n \"

3、掌握关系数据库开发设计,掌握 SQL,熟练使用 MySQL/PostgreSQL 中的一种

\" \\\n \"

4、掌握NoSQL、MQ,熟练使用对应技术解决方案

\" \\\n \"

5、熟悉 Javascript/CSS/HTML5,JQuery、React、Vue.js

\" \\\n \"

 

\" \\\n \"

加分项:

\" \\\n \"

大数据,数理统计,机器学习,sklearn,高性能,大并发。

\" \\\n \"
\"\n\n\nprint(\"s === %s\"%s)\n\nprint(\"===================\\n\")\n\nret = re.sub(\"\",\"\",s)\nprint(ret )\n\n\nret = re.split(r\":| \",\"info:dezhi 27 hubei\")\nif ret != None:\n print(ret)\n ","sub_path":"code/正则表达式/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"521731838","text":"\n# заготовка для домашней работы\n# прочитайте про glob.glob\n# https://docs.python.org/3/library/glob.html\n\n# Задание\n# мне нужно отыскать файл среди десятков других\n# я знаю некоторые части этого файла (на память или из другого источника)\n# я ищу только среди .sql файлов\n# 1. программа ожидает строку, которую будет искать (input())\n# после того, как строка введена, программа ищет её во всех файлах\n# выводит список найденных файлов построчно\n# выводит количество найденных файлов\n# 2. снова ожидает ввод\n# поиск происходит только среди найденных на этапе 1\n# 3. снова ожидает ввод\n# ...\n# Выход из программы программировать не нужно.\n# Достаточно принудительно остановить, для этого можете нажать Ctrl + C\n\n# Пример на настоящих данных\n\n# python3 find_procedure.py\n# Введите строку: INSERT\n# ... большой список файлов ...\n# Всего: 301\n# Введите строку: APPLICATION_SETUP\n# ... большой список файлов ...\n# Всего: 26\n# Введите строку: A400M\n# ... большой список файлов ...\n# Всего: 17\n# Введите строку: 0.0\n# Migrations/000_PSE_Application_setup.sql\n# Migrations/100_1-32_PSE_Application_setup.sql\n# Всего: 2\n# Введите строку: 2.0\n# Migrations/000_PSE_Application_setup.sql\n# Всего: 1\n\n# не забываем организовывать собственный код в функции\n# на зачёт с отличием, использовать папку 'Advanced Migrations'\n\nimport glob\nimport os.path\n\ndef chdir():\n\tos.chdir(os.path.abspath(os.path.dirname(__file__)))\n\ndef get_file_list(name_dir):\n\tmigrations = name_dir\n\tfiles = glob.glob(os.path.join(migrations, \"*.sql\"))\n\treturn files\n\ndef finder (name_dir='Migrations'):\n\tfile_list = get_file_list(name_dir)\n\twhile True:\n\t\tuser_input = input('Введите текст(q для выхода): ').lower()\n\t\tfiles_with_user_text = []\n\t\tif user_input == 'q':\n\t\t\tbreak\n\t\telse:\n\t\t\tfor file_with_text in file_list:\t\n\t\t\t\twith open(file_with_text) as opened_sql_file:\n\t\t\t\t\tif user_input.lower() in opened_sql_file.read().lower():\n\t\t\t\t\t\tfiles_with_user_text.append(file_with_text)\n\t\t\t\t\t\t#print(file_with_text)\n\t\t\tif len(files_with_user_text) == 0:\n\t\t\t\tprint('Список файлов: {0}{1}Кол-во найденных результатов: {2}{1}Совпадений с {3} не найдено! Попробуйте снова!'.format('\\n'.join(file_list), '\\n', len(file_list), user_input))\n\t\t\telse:\n\t\t\t\tfile_list = files_with_user_text\n\t\t\t\tprint('Список файлов: {0}{1}Кол-во найденных результатов: {2}'.format('\\n'.join(file_list), '\\n', len(file_list)))\n\nchdir()\nfinder('Advanced Migrations')\n\n\n","sub_path":"homework/2.3-paths/find_procedure.py","file_name":"find_procedure.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"276633618","text":"path = \"/home/wqt/char_model/data/MSRA/msra_train_bioes\"\npath2 = \"/home/wqt/char_model/data/MSRA/msra_train_bioes_2\"\nimport codecs\ntotal = [0]*100\nwith codecs.open(path,'r','utf-8') as fin:\n #with codecs.open(path2,'w','utf-8') as fout:\n sent_len = 0\n lines = fin.readlines()\n for i,line in enumerate(lines):\n if line not in ['\\n', '\\r\\n']:\n sent_len += 1\n # word_label = line.strip().split()\n # if len(word_label) >= 2:\n # fout.write('\\t'.join(word_label))\n # fout.write('\\n')\n # if sent_len >= 240 and lines[i+1] not in ['\\n', '\\r\\n']:\n # fout.write(\"\\n\")\n # sent_len = 0\n elif line in ['\\n', '\\r\\n']:\n #fout.write('\\n')\n total[sent_len//50] += 1\n sent_len = 0\n print(total)","sub_path":"data/MSRA/static2.py","file_name":"static2.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306808282","text":"import time\nfrom threading import Thread\n\ntry:\n from transacao import Transacao\n from banco_helper import BancoHelper\nexcept ImportError:\n if __package__ == \"banco\":\n from banco.transacao import Transacao\n from banco.banco_helper import BancoHelper\n\nclass Conta:\n\n def __init__(self, numero, agencia, titular):\n self._tipo = \"\"\n self._numero = numero\n self._num_str = BancoHelper.codigo_to_string(self._numero)\n self._agencia = agencia\n self._ag_str = BancoHelper.codigo_to_string(self._agencia)\n self._titular = titular\n self._saldo = 0\n self._limite = None\n self._extrato = []\n self._poupanca = None\n\n def __repr__(self):\n tipo_formatado = self._tipo.lower().capitalize()\n retorno = f\"\\tConta {tipo_formatado}\\n\"\n retorno += f\"\\tNumero: {self._num_str}\\n\\tAgencia: {self._ag_str}\\n\\tTitular: {self._titular.get_nome()}\\n\"\n return retorno\n\n def info(self):\n return f\"{self._num_str}/{self._ag_str}\"\n\n def deposita(self, deposito):\n operacao = \"DEPOSITO\"\n novo_saldo = self._saldo + deposito\n saldo_antigo = self._saldo\n self._saldo = novo_saldo\n return self._salva_transacao(operacao, saldo_antigo, deposito, novo_saldo)\n\n def saca(self, saque):\n if self._saldo - saque < 0:\n raise Exception(f\"Operacao de saque invalida. Voce tem {self._saldo} maximo para sacar\")\n\n operacao = \"SAQUE\"\n novo_saldo = self._saldo - saque\n saldo_antigo = self._saldo\n self._saldo = novo_saldo\n return self._salva_transacao(operacao, saldo_antigo, saque, novo_saldo)\n\n def transfere(self, destino_info, valor):\n if self._saldo - valor < 0:\n raise Exception(f\"Operacao de transferencia invalida. Voce tem {self._saldo} maximo para transferir\")\n\n operacao = \"TRANSFERENCIA\"\n novo_saldo = self._saldo - valor\n saldo_antigo = self._saldo\n self._saldo = novo_saldo\n info = \"DESTINO.\" + destino_info\n return self._salva_transacao(operacao, saldo_antigo, valor, novo_saldo, info)\n\n def recebe_transferencia(self, origem_info, valor):\n operacao = \"TRANSFERENCIA RECEBIDA\"\n novo_saldo = self._saldo + valor\n saldo_antigo = self._saldo\n self._saldo = novo_saldo\n info = \"ORIGEM.\" + origem_info\n return self._salva_transacao(operacao, saldo_antigo, valor, novo_saldo, info)\n\n def _salva_transacao(self, operacao, saldo, valor, total, transferencia_info=None):\n transacao = Transacao(operacao, saldo, valor, total, transferencia_info)\n self._extrato.append(transacao)\n return transacao.to_dictionary()\n\n def get_extrato(self):\n retorno = f\"{self}\\n\"\n retorno += \"\\t========== EXTRATO =========\\n\"\n retorno += f\"\\tSaldo atual: {self._saldo}\\n\"\n for transacao in reversed(self._extrato):\n retorno += transacao.__repr__()\n retorno += \"\\t====== FIM === EXTRATO ====\\n\"\n return retorno\n\n def get_codigo(self):\n return self._numero\n\n def get_titular(self):\n return self._titular\n\n\nclass Corrente(Conta):\n \n def __init__(self, numero, agencia, titular, limite=1000.0, poupanca=None):\n Conta.__init__(self, numero, agencia, titular)\n self._tipo = \"CORRENTE\"\n self._limite = limite\n self._poupanca = poupanca\n\n def saca(self, saque):\n if self._limite and saque > self._limite:\n raise Exception(f\"Valor maximo para saque e de {self._limite}\")\n\n return Conta.saca(self, saque)\n\n def transfere(self, destino, valor):\n if self._limite and valor > self._limite:\n raise Exception(f\"Valor maximo para transferencia e de {self._limite}\")\n\n return Conta.transfere(self, destino, valor)\n\n def anexa_poupanca(self, numero):\n self._poupanca = Poupanca(numero, self._agencia, self._titular)\n\n def poupa(self, valor):\n if not self._poupanca:\n raise Exception(f\"Voce nao tem uma conta poupanca anexada a esta conta corrente {self._num_str}\") \n\n operacao = \"TRANSFERENCIA PARA POUPANCA\"\n saldo_antigo = self._saldo\n novo_saldo = self._saldo - valor\n self._saldo = novo_saldo\n self._poupanca.deposita(valor)\n self._salva_transacao(operacao, saldo_antigo, valor, novo_saldo)\n\n def get_poupanca(self):\n return self._poupanca\n\n\nclass Poupanca(Conta):\n\n def __init__(self, numero, agencia, titular, valor_inicial=0.0):\n Conta.__init__(self, numero, agencia, titular)\n self._tipo = \"POUPANCA\"\n self._saldo = valor_inicial\n # thread = Thread(target=self._rende())\n # thread.start()\n # thread.join()\n \n def _rende(self):\n if self._saldo and self._saldo > 0:\n sec = 0\n while sec < 10:\n valor = self._saldo * 0.1\n self._saldo += valor\n sec += 1\n time.sleep(1)\n\n def get_extrato(self):\n retorno = f\"{self}\"\n retorno += \"\\t========= POUPANCA =========\\n\"\n retorno += f\"\\tSaldo atual: {self._saldo}\\n\"\n retorno += \"\\t===== FIM === POUPANCA =====\\n\"\n return retorno\n","sub_path":"banco/conta.py","file_name":"conta.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202242152","text":"# Synchronously running\n\n# Concurrency: CPU tasks and IO tasks can be running at the same time\n\n# Using thread is not actually running the code of different tasks at the same time\n# Using thread you can run the code without waiting for IO tasks are completed\n\n# Some program may run slower when using threading because of the cost when create and destroy different threads\n# If the task is CPU bound, we may want using multi processing and run it in parallel instead\n#\n# Manual way: using threading module\nimport threading\n# More easier and efficient way (Python > 3.2): using threads pool executor\nimport concurrent.futures\n \n\n\nimport time\n\n# Performance counter for benchmarking\nstart1 = time.perf_counter()\n\ndef do_something(seconds):\n time.sleep(seconds)\n return f\"Done sleeping in {seconds} second(s) ...\"\n\n# Round1: Basic test\nt1 = threading.Thread(target=do_something, args=[1.5])\nt2 = threading.Thread(target=do_something, args=[1.5])\nt1.start()\nt2.start()\n\n# Make threads complete before execute codes below\nt1.join()\nt2.join()\nfinish1 = time.perf_counter()\n\nprint(f\"Round1: Finished in {round(finish1-start1, 2)} second(s)\")\n\n# Round2: Testing with loop\nthreads = []\nstart2 = time.perf_counter()\nfor _ in range(10):\n t = threading.Thread(target=do_something, args=[1.5])\n t.start()\n threads.append(t)\n\nfor thread in threads:\n thread.join()\n\nfinish2 = time.perf_counter()\nprint(f\"Round2: Finished in {round(finish2-start2, 2)} second(s)\")\n\n# Round3: Using concurrent.futures\nstart3 = time.perf_counter()\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n # You can use futures Object to check if it's running or it's done and it's result\n secs = [5,4,3,2,1]\n # Using list comprehension and submit method to run a loop of threads\n # Submit method return a future object\n results1 = [executor.submit(do_something, sec) for sec in secs]\n for f in concurrent.futures.as_completed(results1):\n print(f.result())\n\n # Using map() method, it return the result\n # map() method auto join our threads\n # And we will have a list of results in order of it started\n results2 = executor.map(do_something, secs)\n\n for result in results2:\n print(result)\n\nfinish3 = time.perf_counter()\nprint(f\"Round3: Finished in {round(finish3-start3, 2)} second(s)\")","sub_path":"day3/my_threading.py","file_name":"my_threading.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90492931","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: amt.py\n# Author: Akanksha Saran \n\nimport os\nimport gzip\nimport numpy as np\nfrom six.moves import range, zip, map\nimport cv2\nimport json\nimport random\nimport pickle as pkl\n\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.fs import download, get_dataset_path\nfrom tensorpack.utils.timer import timed_operation\nfrom tensorpack.dataflow.base import RNGDataFlow\n\n__all__ = ['dataset']\n\nclass Dataset(RNGDataFlow):\n \"\"\"\n Produces [feature, bb, label] in AMT annotated dataset,\n features are 4096x1 in the range [0,1], bb are float, label is an int.\n \"\"\"\n def __init__(self, pathfile, train_or_test, shuffle=False):\n \"\"\"\n Args:\n train_or_test (str): either 'train' or 'test'\n shuffle (bool): shuffle the dataset\n \"\"\"\n assert os.path.isfile(pathfile)\n assert train_or_test in ['train', 'test']\n self.name = train_or_test\n self.shuffle = shuffle\n\n with open(pathfile) as datafile:\n data = json.load(datafile)\n \n #self.images = []\n self.features = []\n self.labels = []\n self.bb = []\n\n idxs = np.arange(len(data))\n if self.shuffle:\n #self.rng.shuffle(idxs)\n random.shuffle(idxs)\n\n #print(data[0]['feat_path'])\n feats = pkl.load(open(data[0]['feat_path'],'rb'))\n #print(feats)\n\n for k in idxs:\n element = data[k]\n #im = cv2.imread(element['img_path'], cv2.IMREAD_COLOR)\n #assert im is not None, element['img_path']\n #if im.ndim == 2:\n # im = np.expand(element['img_path'],2).repeat(3,2)\n #im = cv2.resize(im,(224,224))\n #self.images.append(im)\n\n img_name = element['img_name']\n self.features.append(feats[img_name])\n self.labels.append(int(element['label']))\n self.bb.append(np.array(element['bb']))\n\n def size(self):\n return len(self.labels)\n \n def get_data(self):\n idxs = np.arange(len(self.features))\n if self.shuffle:\n #self.rng.shuffle(idxs)\n random.shuffle(idxs)\n\n for k in idxs:\n yield [self.features[k], self.bb[k], self.labels[k]]\n\nif __name__ == '__main__':\n ds = Dataset('/home/asaran/research/tensorpack/examples/SimilarityLearning/data/amt_train.json', 'train',\n shuffle=True)\n ds.reset_state()\n for k in ds.get_data():\n from IPython import embed\n embed()\n break\n","sub_path":"examples/SimilarityLearning/utils/amt.py","file_name":"amt.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"252235431","text":"import argparse\nimport logging, logging.handlers\nimport os, sys\nimport win32api\nimport _winapi\nimport shutil\n\nRAM_DRIVE = \"R:\"\nLOGFILE = f\"{RAM_DRIVE}/Logs/ram-disk.log\"\nlogger = logging.getLogger(\"\")\ndef init_logger():\n logger.setLevel(logging.INFO)\n if \"DEBUG\" in os.environ:\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s: %(levelname)s - %(message)s\")\n\n fileHandler = logging.handlers.RotatingFileHandler(LOGFILE)\n fileHandler.setFormatter(formatter)\n fileHandler.setLevel(logging.INFO)\n logger.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(formatter)\n consoleHandler.setLevel(logging.DEBUG)\n logger.addHandler(consoleHandler)\n\ndef get_parser():\n parser = argparse.ArgumentParser('ramdisk')\n subparsers = parser.add_subparsers(title='commands')\n\n migrate_parser = subparsers.add_parser('migrate', help='Migrate a folder to RAM disk')\n migrate_parser.add_argument('source', help='Source folder path')\n migrate_parser.add_argument('target', help='Target folder path')\n migrate_parser.set_defaults(func=migrate)\n return parser\n\ndef migrate_folder(source, target):\n try:\n create_link = False\n if not os.path.isdir(source):\n logger.error(f\"[{source}] does not exist or is not a directory.\")\n return\n\n source_backup=f\"{source}.sav\"\n # seems Python does not support dictory junction well (os.path.islink is always False)\n # use isdir AND !exists to guess dictory junction condition\n if os.path.exists(source):\n # not a junction, rename source first before copying\n logger.info(f\"Renaming {source} to {source_backup}...\")\n os.rename(source, source_backup)\n create_link = True\n\n # copy folder content only if target not created yet\n if not os.path.exists(target):\n logger.info(f\"Copying content from {source_backup} to {target}...\")\n shutil.copytree(source_backup, target, copy_function = shutil.copy)\n\n if create_link:\n logger.info(f\"Creating link from {source} to {source_backup}...\")\n #win32api.CreateSymbolicLink(source, target)\n _winapi.CreateJunction(target, source)\n\n except Exception:\n logger.error(\"Failed to migrate from {source} to {target}.\", exc_info=True)\n\ndef migrate(args):\n logger.debug(f\"CMD - Migrate folder {args.source} to {args.target}\")\n migrate_folder(args.source, args.target)\n\ninit_logger()\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n if len(sys.argv) > 1:\n args.func(args)\n","sub_path":"scripts/python/ram-disk.py","file_name":"ram-disk.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"439804675","text":"import jwt\nimport time\nimport unittest\nfrom unittest.mock import patch\nfrom secrets import token_urlsafe\nimport asyncio\nfrom treillage import Credential, TokenManager\n\n\nasync def mock_json():\n key = 'some secret for testing'\n now = int(time.time())\n access_payload = {\n \"userId\": \"12345\",\n \"orgId\": \"4321\",\n \"keyId\": \"fvpk_00000000-0000-0000-0000-000000000000\",\n \"scopes\": \"core:api\",\n \"iat\": now,\n \"exp\": now + 900,\n \"aud\": \"filevine.io\",\n \"iss\": \"filevine.io\",\n \"sub\": \"12345\"\n }\n body = dict()\n body['accessToken'] = jwt.encode(access_payload, key, algorithm='HS256')\n body['refreshToken'] = jwt.encode({}, token_urlsafe(), algorithm='HS256')\n body['refreshTokenExpiry'] = now + 86400\n body['refreshTokenTtl'] = '24 hours'\n body['userId'] = '12345'\n body['orgId'] = 6355\n return body\n\n\nclass TestTokenManager(unittest.TestCase):\n def setUp(self) -> None:\n self.base_url = 'http://127.0.0.1'\n self.credentials = Credential(key='', secret='')\n patcher = patch('treillage.token_manager.ClientSession.post')\n mock_post = patcher.start()\n mock_response = mock_post.return_value.__aenter__.return_value\n mock_response.json.side_effect = mock_json\n mock_response.status = 200\n self.addCleanup(patcher.stop)\n\n def test_async_context_manager(self):\n async def test():\n async with TokenManager(self.credentials, self.base_url) as tm:\n self.assertIsNotNone(tm.access_token)\n self.assertIsNotNone(tm.access_token_expiry)\n self.assertIsNotNone(tm.refresh_token)\n asyncio.run(test())\n\n def test_async_create(self):\n async def test():\n tm = await TokenManager.create(self.credentials, self.base_url)\n self.assertIsNotNone(tm.access_token)\n self.assertIsNotNone(tm.access_token_expiry)\n self.assertIsNotNone(tm.refresh_token)\n asyncio.run(test())\n\n def test_refresh_access_token(self):\n async def test():\n async with TokenManager(self.credentials, self.base_url) as tm:\n old_access_token = tm.access_token\n old_access_token_expiry = tm.access_token_expiry\n old_refresh_token = tm.refresh_token\n await asyncio.sleep(5)\n await tm.refresh_access_token()\n self.assertNotEqual(old_access_token, tm.access_token)\n self.assertNotEqual(old_refresh_token, tm.refresh_token)\n self.assertLess(\n old_access_token_expiry, tm.access_token_expiry\n )\n asyncio.run(test())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_token_manger.py","file_name":"test_token_manger.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229366115","text":"#!/usr/bin/env python3\n\n# Created by Jonathan Komar\n# 2016-09\n# Description:\n# Enterprise solution for automated documentation generation.\n#\nimport threading\nimport queue\nimport time\nimport subprocess\nimport os\nimport shutil\nimport sys\nimport logging,logging.handlers\nimport re\nimport getpass\nimport configparser\nimport signal\nimport argparse\n\napp_name = 'texgen'\n\n# Create main (root) logging object\nlogger = logging.getLogger('{}'.format(__file__))\n\n# Log messages (lowest to highest: DEBUG,INFO,WARN,ERROR,CRITICAL)\nlogger.setLevel(logging.DEBUG)\n\n# Create console handler that writes logs to terminal early in process\nconsoleh = logging.StreamHandler(sys.stdout)\n\n# Set Log message level (lowest to highest: DEBUG,INFO,WARN,ERROR,CRITICAL)\nconsoleh.setLevel(logging.INFO)# default set again in setLogs()\n\n# Define Formats\nformatter_file = logging.Formatter('%(asctime)s %(name)s PID: %(process)d TID: %(thread)d %(levelname)s \\n ==> %(message)s',datefmt='%Y-%m-%d at %H:%M:%S.%s')\nformatter_console = logging.Formatter('\\033[0;94m%(asctime)s\\033[0m \\033[32m%(name)s\\033[0m PID: \\033[0;32m%(process)d\\033[0m TID: \\033[0;32m%(thread)d\\033[0m %(levelname)s \\n \\033[0m==>\\033[0m\\033[1;31m %(message)s\\033[1;0m',datefmt='%Y-%m-%d at %H:%M:%S')\n\n# Tell console handler to use this format\nconsoleh.setFormatter(formatter_console)\n\n# Add Log Handler Objects to the Local Root Logger\nlogger.addHandler(consoleh)\n\nclass TeXGen(): # Class containing methods separate from threader class\n\n def __init__(self,arg_struct):\n self.arg_pack = arg_struct\n self.thread_pool = []\n self.run()\n\n def run(self):\n self.argCheck()# Do this early on e.g. output check\n if self.arg_pack.daemon == True:\n logger.debug('Daemon Mode: {}.'.format(self.arg_pack.daemon))\n self.goDaemon()\n else: # Oneshot Mode\n logger.debug('Daemon Mode: {}.'.format(self.arg_pack.daemon))\n self.goOneShot()\n\n class SignalHandler:\n def __init__(self, arg_pack, thread_pool):\n self.arg_pack = arg_pack\n self.thread_pool = thread_pool # not in arg_pack, set by TeXGen obj later\n\n def __call__(self, signum, frame):# implicitly run when object called, obj then requires 2 args\n logger.info('\\nSignalHandler Received SIGINT. Stopping all threads ASAP!')\n self.arg_pack.stop_flag.set()\n# for job in self.arg_pack.tex_queue:\n# logger.info('SignalHandler: Detected stopflag. Removing {} from the Queue.'.format(job))\n# job.task_done()# force empty queue to unblock join\n# logger.debug('SignalHandler: Queue is empty: {}'.format(self.arg_pack.tex_queue.empty()))\n for thread in self.thread_pool: # loop thru all threads\n if thread.isAlive(): # redundant check. alive threads rejoin in run method of TypesetThread at while self.arg_pack.stop_flag.isSet() == False\n logger.info('SignalHandler: Detected stopflag. Rejoining {} to the main thread.'.format(thread))\n thread.join()# join thread (should not exist due to while loop)\n else: # most likely the case\n logger.info('SignalHandler: Detected stopflag. Removing {} from the thread pool.'.format(thread))\n self.arg_pack.exitcode = 1\n\n def argCheck(self):\n if self.arg_pack.output != None:\n if not os.path.isdir(self.arg_pack.output):\n logger.warning('argCheck: Missing output -o directory: \"{}\"'.format(self.arg_pack.output))\n self.arg_pack.exitcode = 1\n else:\n self.arg_pack.output = os.path.abspath(self.arg_pack.output)\n\n def goOneShot(self):\n logger.debug('goOneShot called.')\n tex_input_list = [] # init (arg_pack.input contains either dirs or files, but this list must contain only files)\n logger.debug('goOneShot: Recursive check: {}\\n Depth: {}'.format(self.arg_pack.recursive,self.arg_pack.depth))\n if self.arg_pack.input != None: # should have a dir or file, otherwise help is shown by argparse\n if self.arg_pack.recursive is True:# First decide how to make tex_input_list, recursive, then dirs.\n for item in self.arg_pack.input:\n if os.path.isdir(item):# ensure item is a dir\n tex_input_list = self.getFilesRecursively('.*\\.tex',item,self.arg_pack.depth[0])\n else:\n logger.warning('goOneShot: Input with recursion -r must be a directory: \"%s\"'% (item))\n else: # input is a list of files\n tex_input_list = self.arg_pack.input\n logger.debug('goOneShot: tex_input_list: {}'.format(tex_input_list))\n for file in tex_input_list:\n if not os.path.isfile(file): # File check\n logger.error('goOneShot: File not found: \"%s\"' % (file))\n self.arg_pack.exitcode = 1\n return\n else:\n logger.debug('goOneShot: Found file: %s' % file)\n logger.info('Files to process: %s' % (tex_input_list))\n else: # no input given\n self.arg_pack.exitcode = 1\n logger.warning('goOneShot: No files to process.')\n return\n for tex_file in tex_input_list:\n if not os.path.isfile(tex_file): # redundant file check\n logger.error('goOneShot: TeX file does not exist: {}'.format(tex_file))\n self.arg_pack.exitcode = 1\n return\n else: # populate tex queue\n self.arg_pack.tex_queue.put(tex_file)\n # Start the business\n if self.arg_pack.iterations[0] > 0:\n if self.arg_pack.prehook != None:\n self.preHook() # Inject hook\n #self.preProcess() # Disabled for the time being. Orig purpose was to grab svn/git repos\n signal_handler = self.SignalHandler(self.arg_pack,self.thread_pool)# create signal handler, needs stop_flag,thread_pool\n signal.signal(signal.SIGINT, signal_handler)# Connect SIGINT to signal_handler\n logger.info('Starting main typesetting phase.')\n self.runThreads()\n logger.info('Starting postprocessing phase.')\n self.postProcess()\n if self.arg_pack.posthook != None:\n self.postHook() # Inject hook\n else:\n logger.info(\"goOneShot: {} iterations, therefore nothing to do\".format(self.arg_pack.iterations[0]))\n\n def runThreads(self): # uses self.thread_pool to group of threads. Uses to self.arg_pack.thread_limit know how many thread objects to make\n logger.debug('runThreads called.')\n #logger.debug('runThreads: thread limit: {}'.format(self.arg_pack.thread_limit[0]))\n for item in range(self.arg_pack.thread_limit[0]): # Create as many threads objects as allowed by limit\n self.thread_pool.append(TypesetThread(self.arg_pack))# Pass entire arg_pack including tex_queue and stop_flag\n logger.info('Thread Pool:\\n {}'.format(self.thread_pool))\n for i,thread in enumerate(self.thread_pool):# Start threads\n logger.debug('runThreads: Starting thread {}:\\n {}'.format(i+1,thread))\n thread.start()\n self.arg_pack.tex_queue.join()# join on Queue object blocks until all items in queue have been fetched and processes i.e. queue is empty. use this to wait before running join loop on thread list.\n for i,thread in enumerate(self.thread_pool): # loops number of thread objects\n logger.info('runThreads: Joining thread object: {}'.format(thread))\n thread.join()# Join thread one by one (None is special)\n del self.thread_pool[:] # Empty thread pool list at end of every runThreads call to ensure threads are only started once.\n\n def goDaemon(self):\n logger.info('goDaemon called.')\n logger.warning('Daemon mode -d has not yet been implemented.')\n self.arg_pack.exitcode = 1\n\n def cpFilesToOutput(self):# Stupid mover for lists of files\n logger.debug('cpFilesToOutput called.')\n clone_list = []\n if os.path.isdir(self.arg_pack.output):# First check destination\n if self.arg_pack.clean == True:\n logger.info('cpFilesToOutput: Clean parameter true. Deleting all files in output directory:\\n {}'.format(self.arg_pack.output))\n for base,dirs,files in os.walk(self.arg_pack.output, topdown=False):\n for file_name in files:\n logger.debug('cpFilesToOutput: Removing:\\n {}'.format(os.path.join(base,file_name)))\n os.remove(os.path.join(base,file_name))\n for dir_name in dirs:\n logger.debug('cpFilesToOutput: Removing:\\n {}'.format(os.path.join(base,dir_name)))\n os.rmdir(os.path.join(base,dir_name))\n self.arg_pack.completed_queue.put('Sentinel')# Use to force stop iteration of queue\n for item_tuple in iter(self.arg_pack.completed_queue.get, 'Sentinel'):# Stop when get yields None\n logger.debug('cpFilesToOutput: Temporarily removing from self.arg_pack.completed_queue: {}'.format(item_tuple))\n clone_list.append(item_tuple)\n item = item_tuple[0]# conv to string\n logger.debug('cpFilesToOutput: Processing item: {}'.format(item))\n if os.path.isfile(item):# Second check file\n if item.endswith('.log') and self.arg_pack.movelogs == False:\n logger.debug('cpFilesToOutput: Skipping log: {}'.format(item))\n else:\n logger.info('Copying \"{}\" to \"{}\"'.format(item,self.arg_pack.output))\n shutil.copy(item,self.arg_pack.output)\n else:\n logger.warning('cpFilesToOutput: Could not copy, file not found:\\n {}'.format(item))\n for item in clone_list:\n logger.debug('cpFilesToOutput: Restoring to self.arg_pack.completed_queue: {}'.format(item))\n self.arg_pack.completed_queue.put(item)\n else:\n logger.error('cpFilesToOutput: Destination not found:\\n {}'.format(self.arg_pack.output))\n\n def getFilesRecursively(self,regex,base_dir_input='os.path.dirname(os.path.realpath(__file__))',depth=0):\n logger.debug('getFilesRecursively: Recursion depth: %s' % (depth))\n results_list = [] # init\n regex_obj = re.compile(regex) # make regex object\n logger.debug('getFilesRecursively:\\n Regular Expression: %s\\n Base: \"%s\"\\n Depth: %s' % (regex_obj.pattern,base_dir_input,depth))\n if os.path.isdir(base_dir_input):\n base_dir_input.rstrip(os.path.sep)# ensure depth counter is accurate by removing extraneous /\n base_depth = base_dir_input.count(os.path.sep)# set initial base depth\n for base, dirs, files in os.walk(base_dir_input): # Collect all tex files recursively\n if depth != 0:\n try:\n depth = int(depth) # redundant test for integer\n except:\n logger.warning('getFilesRecursively: depth not an integer: %s' % (depth))\n current_depth = base.count(os.path.sep)\n if current_depth - base_depth <= depth:\n for file in files:\n file_name = os.path.basename(os.path.normpath(file))\n if re.match(regex_obj,file_name):\n logger.debug('Appending to results_list: \"%s\"' % (os.path.join(base,file)))\n results_list.append(os.path.join(base,file))\n else:# depth is infinity\n logger.debug('getFilesRecursively: Depth is 0 (infinity).')\n for file in files:\n file_name = os.path.basename(os.path.normpath(file))\n if re.match(regex_obj,file_name):\n logger.debug('Appending to results_list: \"%s\"' % (os.path.join(base,file)))\n results_list.append(os.path.join(base,file))\n logger.debug('Results: %s' % (results_list))\n return results_list\n else:\n logger.warning('getFilesRecursively: Not found: \"%s\"' % (base_dir_input))\n self.arg_pack.exitcode = 1\n\n def preProcess(self):\n logger.debug('preProcess started. Nothing to do.')\n\n def postProcess(self): # This groups postprocessing routines\n logger.debug('postProcess: Called.')\n if self.arg_pack.stop_flag.isSet() == False:\n # Check parameters for things to be done after compilation complete\n if len(self.arg_pack.error_report) > 0:\n if os.path.isfile(self.arg_pack.error_report[0]):\n self.genErrorReport()# auto adds tasks to tex_queue\n else:\n logger.error(\"postProcess: Error Report regular expressions file not found: {}\".format(self.arg_pack.error_report[0]))\n if len(self.arg_pack.warning_report) > 0:\n if os.path.isfile(self.arg_pack.warning_report[0]):\n self.genWarningReport()# auto adds tasks to tex_queue\n else:\n logger.error(\"postProcess: Warning Report regular expressions file not found: {}\".format(self.arg_pack.warning_report[0]))\n logger.debug('postProcess: Setting arg_pack.iterations to 2 for report generation.')\n self.arg_pack.iterations = [2]\n self.runThreads()\n if self.arg_pack.output == None:\n logger.debug('postProcess: Not moving anything because no output -o directory has been specified.')\n else: # Process the move to output dir\n if os.path.isdir(self.arg_pack.output):# Redundant output check\n logger.debug('postProcess: Detected: output -o detected: \"%s\"'% (self.arg_pack.output))\n else:\n logger.error('postProcess: Output -o not found. Creating directory: {}'.format(self.arg_pack.output))\n try:\n os.makedirs(self.arg_pack.output)\n except:\n logger.error('postProcess: Could not create output directory: {}'.format(self.arg_pack.output))\n self.arg_pack.exitcode = 1\n self.cpFilesToOutput()# No need to send list. All needed info contained in self.arg_pack\n\n\n def preHook(self):\n # Adds possibility to run external script beforehand\n logger.debug('preHook called.')\n for script in self.arg_pack.prehook:\n command = \"{}\".format(script)\n logger.info('preHook Executing with 15 second timeout: \\n /bin/sh -c {}'.format(command))\n process = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)# timeout in seconds\n process.wait(timeout=15)\n proc_stdout, proc_stderr = process.communicate()\n if process.returncode == 0:\n logger.info('preHook: Process complete: \\n Command: /bin/sh -c {}\\n STDOUT: \"{}\"\\n STDERR: \"{}\"\\n Exit Code: {}\\n at: {}'.format(command,proc_stdout.decode('utf8').strip(),proc_stderr.decode('utf8').strip(),process.returncode,time.time()))\n else:\n self.arg_pack.exitcode = 1\n logger.error('preHook: Process failed: \\n Command: /bin/sh -c {}\\n STDOUT: \"{}\"\\n STDERR: \"{}\"\\n Exit Code: {}\\n at: {}'.format(command,proc_stdout.decode('utf8').strip(), proc_stderr.decode('utf8').strip(),process.returncode,time.time()))\n\n def postHook(self):\n # Adds possibility to run external script beforehand\n logger.debug('postHook called.')\n for script in self.arg_pack.posthook:\n command = \"{}\".format(script)\n logger.info('postHook Executing with 15 second timeout: \\n /bin/sh -c {}'.format(command))\n process = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)# timeout in seconds\n process.wait(timeout=15)\n proc_stdout, proc_stderr = process.communicate()\n if process.returncode == 0:\n logger.info('postHook: Process complete: \\n Command: /bin/sh -c {}\\n STDOUT: \"{}\"\\n STDERR: \"{}\"\\n Exit Code: {}\\n at: {}'.format(command,proc_stdout.decode('utf8').strip(),proc_stderr.decode('utf8').strip(),process.returncode,time.time()))\n else:\n self.arg_pack.exitcode = 1\n logger.error('postHook: Process failed: \\n Command: /bin/sh -c {}\\n STDOUT: \"{}\"\\n STDERR: \"{}\"\\n Exit Code: {}\\n at: {}'.format(command,proc_stdout.decode('utf8').strip(), proc_stderr.decode('utf8').strip(),process.returncode,time.time()))\n\n def genErrorReport(self):\n logger.debug('genErrorReport: Called.')\n log_list = []\n error_list = []# Append to this to add __ErrorReport__ content\n completed_queue_clone_list = []#init\n failed_list = []#init\n failed_queue_clone_list = []#init\n self.arg_pack.completed_queue.put('Sentinel')\n for item_tuple in iter(self.arg_pack.completed_queue.get,'Sentinel'):# Stop when get yields None\n #logger.debug('genErrorReport: Temporarily removing from self.arg_pack.completed_queue: {}'.format(item_tuple))\n completed_queue_clone_list.append(item_tuple)# clone for adding back to completed_queue\n item = item_tuple[0]# string conversion\n logger.debug('genErrorReport processing:\\n \"{}\"'.format(item))\n if item.endswith('.log'):\n logger.debug('genErrorReport found log: {}'.format(item))\n log_list.append(item)\n logger.info('genErrorReport found logs: {}'.format(log_list))\n for item in completed_queue_clone_list:# restore completed_queue\n self.arg_pack.completed_queue.put(item)\n self.arg_pack.failed_queue.put('Sentinel')\n for item_tuple in iter(self.arg_pack.failed_queue.get,'Sentinel'):\n failed_queue_clone_list.append(item_tuple)# clone for adding back to failed_queue\n item = item_tuple[0]\n failed_list.append(item)\n logger.debug('genErrorReport processing failed .tex file:\\n \"{}\"'.format(item))\n for item in failed_queue_clone_list:# restore failed_queue\n self.arg_pack.failed_queue.put(item)\n # Start process failed_list\n if len(failed_list) > 0: # Process failed tex docs\n for failed_item in failed_list:# First see whether any docs failed to build\n #logger.debug('genErrorReport: Processing failed document: {}'.format(os.path.splitext(failed_item)[0]))#\n logger.info('genErrorReport: Processing failed document: {}'.format(failed_item))\n # rm failed tex from log_list\n logger.debug('genErrorReport: Grabbing log of failed document log from log_list.\\n File: {}'.format(failed_item))\n failed_item_log = '{}.log'.format(os.path.splitext(failed_item)[0])\n failed_item_escaped = self.escapeTexString(os.path.basename(failed_item))\n #log_list.remove(failed_item)\n # stuff to implement: grab corresponding log from file from log_list\n # print failed document + log to __ErrorReport__\n error_list.append('\\n\\\\section{%s}\\n\\\\subsection{Typesetting Failure}' % (failed_item_escaped))# Add headings\n error_list.append('\\n\\\\begin{treateachletterasword}')\n with open(os.path.splitext(failed_item)[0]+'.log') as texlog_handle:\n for line in texlog_handle:\n line_escaped = self.escapeTexString(line)\n error_list.append('\\n%s' % (line_escaped.rstrip()))# Add log.\n error_list.append('\\n\\\\end{treateachletterasword}')\n # End process failed_list\n logger.debug('genErrorReport: arg_pack.error_report: {}'.format(self.arg_pack.error_report[0]))\n logger.debug('genErrorReport: Using external pattern file.')\n error_pattern_handler = open(self.arg_pack.error_report[0],'r')\n pattern_list = []# init\n for i,line in enumerate(error_pattern_handler):\n pattern_list.append(re.compile(line.rstrip()))\n logger.info('genErrorReport: Pattern list: {}'.format(pattern_list))\n logger.debug('genErrorReport: Log list: {}'.format(log_list))\n if len(log_list) > 0: # For both cases, scan logs\n for item in log_list:\n if not os.path.isfile(item):\n logger.error('genErrorReport: File not found: {}'.format(item))\n else:\n titled_already = False# Item needs section heading\n texlog_handle = open(item,'r')\n for i,line in enumerate(texlog_handle):\n for pattern in pattern_list:\n #logger.debug('Searching for pattern: \"{}\"'.format(pattern.pattern))\n for match in re.finditer(pattern,line):\n logger.debug('genErrorReport matched: {}'.format(pattern.pattern))\n match_found = True\n line_escaped = self.escapeTexString(line)\n pattern_escaped = self.escapeTexString(pattern.pattern)\n if titled_already == False:\n file_name_escaped = self.escapeTexString(item)\n error_list.append('\\n\\\\section{%s}\\n\\\\subsection{Regular Expression Match}' % (os.path.basename(file_name_escaped)))\n titled_already = True\n error_list.append('\\n\\\\begin{itemize}\\n\\\\item Log output line %s is: \\\\begin{treateachletterasword}%s\\\\end{treateachletterasword} which matches regular expression \\\\begin{treateachletterasword}%s\\\\end{treateachletterasword}\\n\\\\end{itemize}' % (i+1, line_escaped,pattern_escaped) + '\\n')\n texlog_handle.close()\n pre_doc = \"\"\"\\\\documentclass{article}\n\\\\usepackage{fontspec}\n\\\\newfontfamily\\\\monofont{FreeMono.otf}\n\\\\usepackage[includeheadfoot,top=3cm,left=2cm,right=2cm,bottom=2cm]{geometry}\n\\\\usepackage{datetime}\n\\\\usepackage[hidelinks]{hyperref}\n\\\\long\\\\def\\\\treateachletterasword{%\n \\\\bgroup\n \\\\XeTeXinterchartokenstate=1% Enable Character Classes (unique to xelatex) 0=off 1=on\n \\\\XeTeXinterchartoks 0 0 = {\\\\penalty0\\\\relax}% Set token to be inserted between interchar class 0 and interchar class 0\n \\\\monofont% Set monospaced font\n \\\\setlength{\\parindent}{0pt}% Remove new paragraph indentation\n \\\\obeylines% \\\\catcode`\\\\^^M\\\\active \\\\let ^^M\\\\par\n}%\n\\\\def\\\\endtreateachletterasword{\\\\egroup}\n\\\\title{Error Report}\n\\\\date{Typeset \\\\today{} at \\\\currenttime{}}\n\\\\author{TeXGen System}\n\\\\begin{document}\n\\\\maketitle{}\n\\\\tableofcontents\n\\\\newpage\n\"\"\"\n post_doc = \"\"\"\n\\\\end{document}\"\"\"\n if len(error_list) > 0:\n self.arg_pack.exitcode = 1 # ErrorReport only. We want exit of 1 when exists.\n logger.error('genErrorReport: Typesetting errors detected. Setting exit code to: {}'.format(self.arg_pack.exitcode))\n if self.arg_pack.recursive == True:# Input must be directory\n base_dir = os.path.abspath(self.arg_pack.input[0])\n error_report_dir = os.path.join(base_dir,'__ErrorReport__')\n error_report_file = os.path.join(error_report_dir, '__ErrorReport__.tex')\n else:# Non-recursive, file list input, currently same as recursive\n base_dir = os.path.abspath(os.path.join(self.arg_pack.input[0],os.pardir))\n error_report_dir = os.path.join(base_dir,'__ErrorReport__')\n error_report_file = os.path.join(error_report_dir, '__ErrorReport__.tex')\n if os.path.isdir(error_report_dir) == True:\n logger.warning('genErrorReport: \"{}\" directory already exists. Directory name adjusted using current time for unique name.'.format(error_report_dir))\n error_report_dir = error_report_dir + '{}'.format(time.time())\n error_report_file = os.path.join(error_report_dir, '__ErrorReport__{}.tex'.format(time.time()))\n os.makedirs(error_report_dir)\n f1 = open(error_report_file,'w')\n f1.write(pre_doc)\n for line in error_list:\n f1.write(line)\n f1.write(post_doc)\n f1.close()\n self.arg_pack.tex_queue.put(error_report_file)\n\n def genWarningReport(self):\n logger.debug('genWarningReport: Called.')\n log_list = []\n error_list = []\n clone_list = []#init\n self.arg_pack.completed_queue.put('Sentinel')\n for item_tuple in iter(self.arg_pack.completed_queue.get,'Sentinel'):# Stop when get yields None\n #logger.debug('genWarningReport: Temporarily removing from self.arg_pack.completed_queue: {}'.format(item_tuple))\n clone_list.append(item_tuple)# clone for adding back to completed_queue\n item = item_tuple[0]# string conversion\n logger.debug('genWarningReport processing:\\n \"{}\"'.format(item))\n if item.endswith('.log'):\n logger.debug('genWarningReport found log: {}'.format(item))\n log_list.append(item)\n logger.info('genWarningReport found logs: {}'.format(log_list))\n for item in clone_list:\n #logger.debug('genWarningReport: Restoring to self.arg_pack.completed_queue: {}'.format(item))\n self.arg_pack.completed_queue.put(item)\n #if self.arg_pack.error_report:# use default patterns\n # pattern_list = [\n # re.compile(r'^Invalid UTF-8 byte or sequence.*'),\n # re.compile(r'^Overfull .*'),\n # re.compile(r'^Underfull .*'),\n # re.compile(r'.*Warning:.*'),\n # ]\n #else:# use external pattern file\n logger.debug('Using external pattern file:\\n \"{}\"'.format(self.arg_pack.warning_report[0]))\n warning_pattern_handler = open(self.arg_pack.warning_report[0],'r')\n pattern_list = []# init\n for i,line in enumerate(warning_pattern_handler):\n pattern_list.append(re.compile(line.rstrip()))# There are issues at this step.\n logger.info('genWarningReport pattern list: {}'.format(pattern_list))\n logger.debug('genWarningReport log list: {}'.format(log_list))\n if len(log_list) > 0: # For both cases, scan logs\n for item in log_list:\n if not os.path.isfile(item):\n logger.error('genWarningReport: File not found: {}'.format(item))\n else:\n titled_already = False# Item needs section heading\n texlog_handle = open(item,'r')\n for i,line in enumerate(texlog_handle):\n for pattern in pattern_list:\n #logger.debug('genWarningReport: Searching for pattern: \"{}\"'.format(pattern.pattern))\n for match in re.finditer(pattern,line):\n logger.debug('genWarningReport: Matched: {}'.format(pattern.pattern))\n match_found = True\n line_escaped = self.escapeTexString(line)\n pattern_escaped = self.escapeTexString(pattern.pattern)\n if titled_already == False:\n file_name_escaped = self.escapeTexString(item)\n error_list.append('\\n\\\\section{%s}\\n\\\\subsection{Regular Expression Match}' % (os.path.basename(file_name_escaped)))\n titled_already = True\n error_list.append('\\n\\\\begin{itemize}\\n\\\\item Log output line %s is: \\\\begin{treateachletterasword}%s\\\\end{treateachletterasword} which matches regular expression \\\\begin{treateachletterasword}%s\\\\end{treateachletterasword}\\n\\\\end{itemize}' % (i+1, line_escaped,pattern_escaped) + '\\n')\n texlog_handle.close()\n pre_doc = \"\"\"\\\\documentclass{article}\n\\\\usepackage{fontspec}\n\\\\newfontfamily\\\\monofont{FreeMono.otf}\n\\\\usepackage[includeheadfoot,top=3cm,left=2cm,right=2cm,bottom=2cm]{geometry}\n\\\\usepackage{datetime}\n\\\\usepackage[hidelinks]{hyperref}\n\\\\long\\\\def\\\\treateachletterasword{%\n \\\\bgroup\n \\\\XeTeXinterchartokenstate=1% Enable Character Classes (unique to xelatex) 0=off 1=on\n \\\\XeTeXinterchartoks 0 0 = {\\\\penalty0\\\\relax}% Set token to be inserted between interchar class 0 and interchar class 0\n \\\\monofont% Set monospaced font\n \\\\setlength{\\parindent}{0pt}% Remove new paragraph indentation\n \\\\obeylines% \\\\catcode`\\\\^^M\\\\active \\\\let ^^M\\\\par\n}%\n\\\\def\\\\endtreateachletterasword{\\\\egroup}\n\\\\title{Warning Report}\n\\\\date{Typeset \\\\today{} at \\\\currenttime{}}\n\\\\author{TeXGen System}\n\\\\begin{document}\n\\\\maketitle{}\n\\\\tableofcontents\n\\\\newpage\n\"\"\"\n post_doc = \"\"\"\n\\\\end{document}\"\"\"\n if len(error_list) > 0:\n if self.arg_pack.recursive == True:# Input must be directory\n base_dir = os.path.abspath(self.arg_pack.input[0])\n error_report_dir = os.path.join(base_dir,'__WarningReport__')\n error_report_file = os.path.join(error_report_dir, '__WarningReport__.tex')\n else:# Non-recursive, file list input, currently same as recursive\n base_dir = os.path.abspath(os.path.join(self.arg_pack.input[0],os.pardir))\n error_report_dir = os.path.join(base_dir,'__WarningReport__')\n error_report_file = os.path.join(error_report_dir, '__WarningReport__.tex')\n if os.path.isdir(error_report_dir) == True:\n logger.warning('\"{}\" directory already exists. Directory name adjusted using current time for unique name.'.format(error_report_dir))\n error_report_dir = error_report_dir + '{}'.format(time.time())\n error_report_file = os.path.join(error_report_dir, '__WarningReport__{}.tex'.format(time.time()))\n os.makedirs(error_report_dir)\n f1 = open(error_report_file,'w')\n f1.write(pre_doc)\n for line in error_list:\n f1.write(line)\n f1.write(post_doc)\n f1.close()\n self.arg_pack.tex_queue.put(error_report_file)\n\n def escapeTexString(self,string): # Returns TeX-friendly string\n logger.debug('Running escapeTexString')\n rep = { # define desired replacements in this dictionary (mapping)\n '&': '\\\\&',\n '%': '\\\\%',\n '#': '\\\\#',\n '_': '\\\\_',\n '{': '\\\\{', # REGEX Special\n '}': '\\\\}', # REGEX Special\n '~': '\\\\char\"007E{}', # LaTeX Special\n '$': '\\\\$', # REGEX Special\n '\\\\': '\\\\char\"005C{}', # REGEX/LaTeX Special\n '^': '\\\\char\"005E{}', # REGEX/LaTeX Special\n '\"': '\\\\char\"FF02{}',\n '[': '\\\\char\"005B{}', # Left Square Bracket\n ']': '\\\\char\"005D{}', # Left Square Bracket\n }\n # use these two lines to do the replacement (could be shortened to one line)\n pattern = re.compile(\"|\".join(map(re.escape,rep.keys()))) # Create single pattern object (key to simultaneous replacement)\n new_string = pattern.sub(lambda match: rep[match.group(0)], string)\n return new_string\n\nclass TypesetThread(threading.Thread):# was called \"TypesetDocuments\"\n def __init__(self, arg_pack):\n super().__init__()\n self.arg_pack = arg_pack # self makes var available across class\n\n def run(self): # called by threadingobject.start() (thread.start())\n while self.arg_pack.stop_flag.isSet() == False: # Check stop_flag at thread start\n try: # if queue is empty, run queue.Empty\n item = self.arg_pack.tex_queue.get_nowait()# thread pulls item from task queue\n self.typesetFile(item)\n except queue.Empty:\n break # exit while when queue is empty\n else:\n self.arg_pack.tex_queue.task_done()\n while self.arg_pack.stop_flag.isSet() == True: # loop thru remaining tasks. I opted to have threads do this themselves rather than SignalHandler\n try: #\n item = self.arg_pack.tex_queue.get_nowait()# thread pulls item from task queue\n logger.debug('TypesetThread: Detected stopflag. Removing job \"{}\" from the Queue.'.format(item))\n except queue.Empty:\n break\n else:\n self.arg_pack.tex_queue.task_done()\n if self.arg_pack.stop_flag.is_set() == True:# Check stop_flag again for log message\n logger.info('TypesetThread: Detected stopflag. Rejoining thread {} to main thread.'.format(threading.get_ident()))\n logger.debug('TypesetThread: Queue empty? {}'.format(self.arg_pack.tex_queue.empty())) # join waits for queue to be empty\n # self.arg_pack.tex_queue.task_done() # unblock Queue so thread can return\n #return # optional, by default all methods end in return\n\n def typesetFile(self,tex_file):\n parent_dir = os.path.dirname(os.path.abspath(tex_file))\n tex_file_name = os.path.basename(tex_file)\n # -halt-on-error: ensure that xelatex stops on errors without presenting a console\n # -interaction=nonstopmode:\n # -file-line-error: makes it easier to identify errors in inputted files\n # command = 'xelatex -interaction=nonstopmode -halt-on-error -file-line-error \"{}\"'.format(tex_file_name)\n command = '{} {}'.format(self.arg_pack.typesetter, tex_file_name)\n thread = threading.current_thread()\n round_step = 0\n timeout = 300\n logger.debug('typesetFile: interations: {}'.format(self.arg_pack.iterations[0]))\n for round_step in range(self.arg_pack.iterations[0]):\n if self.arg_pack.stop_flag.is_set() == False:\n logger.info('Starting: \\n {}\\n Thread ID: {} Round: {} at: {} Timeout: {} seconds'.format(command,self.ident,round_step+1,time.time(),timeout))\n process = subprocess.Popen(command,stdout=subprocess.PIPE,shell=True,cwd=parent_dir)\n try:\n proc_stdout = process.communicate(timeout=timeout)[0].strip()\n except:\n process.kill()\n logger.error('Killed process due to timeout:\\n Timeout: {} seconds\\n {}'.format(timeout,command))\n if process.returncode != 0:\n logger.error('Failed: \\n {}\\n Thread ID: {} Round: {} at: {}'.format(command,self.ident,round_step+1,time.time()))\n self.arg_pack.failed_queue.put((tex_file, )) # add tuple\n self.arg_pack.exitcode = 1\n break # escape next highest loop\n else:\n logger.info('Completed: \\n {}\\n Thread ID: {} Round: {} at: {}'.format(command,self.ident,round_step+1,time.time()))\n if (round_step + 1) == self.arg_pack.iterations[0] and process.returncode < 1: # only add after iterations finished\n self.arg_pack.completed_queue.put((os.path.splitext(tex_file)[0]+'.pdf', ))\n if (round_step + 1) == self.arg_pack.iterations[0]:\n self.arg_pack.completed_queue.put((os.path.splitext(tex_file)[0]+'.log', ))\n\nclass ShowHelpOnNoArgsParser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('error: %s\\n' % (message))\n self.print_help()\n self.exitcode = 2\n sys.exit(self.exitcode)\n\nclass Struct(object): # Abstract class for struct\n def __init__ (self, **argd):\n self.__dict__.update(argd) # Update using dictionary\n\nclass ArgStruct(Struct): # Here all types are given that must match argparse types\n input = []\n recursive = False\n daemon = False\n depth = 0\n output = \"\"\n clean = False\n typeset = ''\n log = ''\n movelogs = False\n warning_report = False\n error_report = False\n thread_limit = 0\n iterations = 0\n prehook = \"\"\n posthook = \"\"\n verbosity = 'INFO'\n # Following do not take CLI args\n tex_queue = queue.Queue() # List of files to process\n stop_flag = threading.Event() # Object for signaling threads\n completed_queue = queue.Queue() # Optional for getting results from each thread\n failed_queue = queue.Queue() # Object for failed tex files\n exitcode = 0\n\ndef setupArgPack(parser,arg_struct): # here parser values are assigned to struct values\n arg_struct.input = vars(parser.parse_args()).get('input')\n arg_struct.recursive = vars(parser.parse_args()).get('recursive')\n# arg_struct.daemon = vars(parser.parse_args()).get('daemon')\n arg_struct.depth = vars(parser.parse_args()).get('depth')\n arg_struct.output = vars(parser.parse_args()).get('output')\n arg_struct.clean = vars(parser.parse_args()).get('clean')\n arg_struct.typesetter = vars(parser.parse_args()).get('typesetter')\n arg_struct.log = vars(parser.parse_args()).get('log')\n arg_struct.movelogs = vars(parser.parse_args()).get('movelogs')\n arg_struct.warning_report = vars(parser.parse_args()).get('warning_report')\n arg_struct.error_report = vars(parser.parse_args()).get('error_report')\n arg_struct.thread_limit = vars(parser.parse_args()).get('thread_limit')\n arg_struct.iterations = vars(parser.parse_args()).get('iterations')\n arg_struct.prehook = vars(parser.parse_args()).get('prehook')\n arg_struct.posthook = vars(parser.parse_args()).get('posthook')\n arg_struct.verbosity = vars(parser.parse_args()).get('verbosity')\n arg_struct.exitcode = 0\n return arg_struct\n\ndef parseArgs(): # Grab args and return argparse obj\n parser = ShowHelpOnNoArgsParser() #argparse.ArgumentParser(description='Process optional arguments.')\n parser.add_argument('input',nargs='+', help='Input is required. Input TeX file(s), or a path when using recursive -r')\n parser.add_argument('-r', '--recursive',action='store_true', help='Recursively find .tex files to compile. Depth can be set by depth -D.')\n# parser.add_argument('-d', '--daemon',action='store_true', help='Run in daemon mode.')\n parser.add_argument('-D', '--depth',action='store', nargs=1, type=int, default=[0], help='Set recursive depth. Default: 0 (infinity)')\n parser.add_argument('-o', '--output',action='store', dest='output', help='Move PDFs to specified directory. (when combined with -l, move logs also)')\n parser.add_argument('-c', '--clean', action='store_true', help='Remove everything in output directory. CAREFUL.')\n parser.add_argument('-l', '--log',action='store', nargs=1, default=[], help='Specify log output file. Directory will be created automatically. Existing logs with the same name will be overwritten. Log level is set to INFO unless explicitly set. Debug log is created automatically.')\n parser.add_argument('-L', '--movelogs',action='store_true', help='Move logs with pdfs to -o directory.')\n parser.add_argument('-w', '--warning_report',action='store', nargs=1, default=[], help='Generate warning report of all logs using a file containing a newline-delimited list of regular expressions to match. When input consists of individual files, the report will be created within the parent directory of the first file.')\n parser.add_argument('-e', '--error_report',action='store', nargs=1, default=[], help='Generate error report of all logs using a sfile containing a newline-delimited list of regular expressions to match. When input consists of individual files, the report will be created within the parent directory of the first file. Documents that fail to typeset correctly will also be included.')\n parser.add_argument('-t', '--thread_limit', action='store', nargs=1, type=int, default=[4], help='Limit number of simultaneous threads, otherwise use the maximum possible.')\n parser.add_argument('-T', '--typesetter',action='store', dest='typesetter', default='xelatex -interaction=nonstopmode -halt-on-error -file-line-error', help='Explicitly set typeset command to be called. e.g. \"xelatex -interaction=nonstopmode -halt-on-error -file-line-error\"')\n parser.add_argument('-i', '--iterations', action='store', nargs=1, type=int, default=[3], help='Specify number of times the TeX file should be typeset/compiled. Default: 3.')\n parser.add_argument('-p', '--prehook', action='store', nargs='+', dest='prehook', help='Call external script before processing documents.')\n parser.add_argument('-P', '--posthook', action='store', nargs='+', dest='posthook', help='Call external script after processing documents.')\n parser.add_argument('-V', '--verbosity',action='store', nargs=1, default=[], help='Set console log message level (lowest to highest: DEBUG,INFO,WARN,ERROR,CRITICAL) or QUIET to supress all messages.')\n args_given = vars(parser.parse_args())\n logger.debug('Args provided: %s' % (args_given))\n return parser # Return parser object\n\ndef setLogs(arg_pack):\n if len(arg_pack.verbosity) > 0:\n logger.info('setLogs: Setting log verbosity.\\n Verbosity: {}'.format(arg_pack.verbosity[0]))\n for acceptable_log_level in ['DEBUG','INFO','ERROR','WARN','CRITICAL','QUIET']: # set log_level\n if acceptable_log_level == arg_pack.verbosity[0]:\n log_level = acceptable_log_level\n else:# set default\n logger.warning('Invalid log level set: {}\\n Level: INFO'.format(acceptable_log_level))\n log_level = 'INFO'\n else:# set default\n log_level = 'INFO'\n # At this point, log_level contains one valid level\n log_level_method = getattr(logging, log_level) # convert log_level to method\n\n # Set console logger level\n if log_level == 'QUIET':\n logger.debug('Setting console log level to QUIET.')\n logger.removeHandler(consoleh)\n else:# if not QUIET\n logger.debug('Setting log level to {}.'.format(log_level))\n consoleh.setLevel(log_level_method)\n\n # Set file logger level\n if arg_pack.log != []:# Determine whether log file out desired\n log_dir = os.path.abspath(os.path.join(arg_pack.log[0],os.pardir))\n try:\n os.makedirs(log_dir,exist_ok=True)\n except:\n raise\n # Add file log handler\n log_file = logging.FileHandler('{}'.format(arg_pack.log[0]), mode='w', encoding=None, delay=False)# Base logger\n log_file.setFormatter(formatter_file)\n log_file.setLevel(log_level_method)# set to default\n logger.addHandler(log_file)\n # Add file log debug handler\n log_debug_file = logging.FileHandler('{}'.format('{}.debug.log'.format(arg_pack.log[0])), mode='w', encoding=None, delay=False)\n log_debug_file.setFormatter(formatter_file)\n log_debug_file.setLevel(logging.DEBUG)\n logger.addHandler(log_debug_file)\n logger.info('Log level set to: {}'.format(log_level))\n\n\ndef convertToHumanTime(total_time):\n days = total_time // 86400\n hours = total_time // 3600 % 24\n minutes = total_time // 60 % 60\n seconds = total_time % 60\n human_readable_total_time = '%s hours; %s minutes; %s seconds' % (hours,minutes,seconds)\n return human_readable_total_time\n\ndef main():\n# arg_pack = parseArgsAndBuildPack() # Parse input args and build struct object package to send to texgen\n parser_obj = parseArgs()\n arg_struct = ArgStruct()\n arg_pack = setupArgPack(parser_obj,arg_struct)\n setLogs(arg_pack)# As early as possible\n start_time = time.time()\n texgen = TeXGen(arg_pack)\n end_time = time.time()\n total_time = end_time - start_time\n human_readable_total_time = convertToHumanTime(total_time)\n logger.info('Process has ended. Exit code: {}. Time elapsed: {}'.format(arg_pack.exitcode,human_readable_total_time))\n sys.exit(arg_pack.exitcode)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"texgen.py","file_name":"texgen.py","file_ext":"py","file_size_in_byte":41079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"476868127","text":"import es\r\nimport popuplib \r\nimport sqlite3\r\n\r\n# Get the paths for the sqlite database.\r\naddonPath = es.ServerVar('eventscripts_addondir')\r\nplayersDBpath = addonPath + '\\pvkii_shop\\players.sqlite'\r\n\r\nconn = sqlite3.connect(playersDBpath)\r\nc = conn.cursor()\r\n\r\ndef load():\r\n c.execute(\"CREATE TABLE IF NOT EXISTS 'players' ('steamid' TEXT PRIMARY KEY NOT NULL , 'coins' INTEGER NOT NULL DEFAULT 0, 'rate' INTEGER NOT NULL DEFAULT 10)\")\r\n conn.commit()\r\n \r\n es.regsaycmd('/shop', 'pvkii_shop/sendPopup', 'Shows Shop Popup')\r\n\r\ndef unload():\r\n es.unregsaycmd('/shop')\r\n\r\ndef es_player_validated(ev):\r\n c.execute(\"SELECT COUNT(*) from players where steamid = ?\", (ev['networkid'],))\r\n check = c.fetchone()\r\n \r\n if (check[0] == 0):\r\n c.execute(\"INSERT INTO players (steamid) VALUES (?)\", (ev['networkid'],))\r\n conn.commit()\r\n\r\ndef player_death(ev):\r\n c.execute(\"UPDATE players SET coins = coins + rate WHERE steamid = ?\" , (ev['es_attackersteamid'],))\r\n conn.commit()\r\n\r\ndef sendPopup():\r\n user = es.getcmduserid()\r\n steamid = es.getplayersteamid(user)\r\n \r\n c.execute(\"SELECT coins FROM players WHERE steamid = ?\", (steamid,))\r\n coins = c.fetchone()\r\n \r\n coinPopup = popuplib.create('coinPopup')\r\n coinPopup.addline('PVKII Shop Menu')\r\n coinPopup.addline('-----------------')\r\n coinPopup.addline(' ')\r\n coinPopup.addline('Coins: %s' % (coins[0]))\r\n \r\n coinPopup.send(user)","sub_path":"pvkii_shop.py","file_name":"pvkii_shop.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"557446846","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/grey/.pyenv/versions/cm/lib/python2.7/site-packages/tests/cm_libcloud/test_libcloud_aws.py\n# Compiled at: 2017-04-23 10:30:41\n\"\"\" run with\n\npython setup.py install; nosetests -v --nocapture tests/cm_libcloud/test_libcloud_api.py:Test_image.test_001\n\nnosetests -v --nocapture tests/libcloud/test_libcloud_api.py\n\nor\n\nnosetests -v tests/test_image.py\n\n\"\"\"\nfrom __future__ import print_function\nfrom pprint import pprint\nfrom time import sleep\nfrom libcloud.compute.providers import get_driver\nfrom libcloud.compute.types import Provider\nfrom cloudmesh_client.common.ConfigDict import ConfigDict\n\nclass Test_libcloud_aws:\n \"\"\"\n This class tests the lib cloud connection to aws\n \"\"\"\n\n def test_001(self):\n self.conf = ConfigDict('cloudmesh.yaml')\n self.credentials = self.conf['cloudmesh']['clouds']['aws']['credentials']\n self.default = self.conf['cloudmesh']['clouds']['aws']['default']\n pprint(self.credentials)\n self.cls = cls = get_driver(Provider.EC2_US_EAST)\n self.driver = cls(self.credentials['EC2_ACCESS_KEY'], self.credentials['EC2_SECRET_KEY'])\n assert True\n\n def test_002(self):\n \"\"\"list VMs\"\"\"\n self.nodes = self.driver.list_nodes()\n print(self.nodes)\n assert True\n\n def test_003(self):\n \"\"\"list images\"\"\"\n self.images = self.driver.list()\n assert True\n\n def test__004(self):\n \"\"\"list flavors\"\"\"\n self.sizes = self.driver.list_sizes()\n assert True\n\n def test_005(self):\n self.myflavor = self.default['flavor']\n self.myimage = self.default['image']\n assert True\n\n def test_006(self):\n size = [ s for s in self.sizes if s.id == self.myflavor ][0]\n image = [ i for i in self.images if i.id == self.myimage ][0]\n assert True\n\n def test_007(self):\n \"\"\"launch a new VM\"\"\"\n name = ('{:}-libcloud').format(self.credentials['userid'])\n self.node = self.driver.create_node(name=name, image=self.myimage, size=self.myflavor)\n assert True\n\n def test_008(self):\n \"\"\"check if the new VM is in the list\"\"\"\n nodes = self.driver.list_nodes()\n print(nodes)\n assert True\n\n def test_009(self):\n \"\"\"public ip\"\"\"\n sleep(10)\n elastic_ip = self.driver.ex_allocate_address()\n self.driver.ex_associate_address_with_node(self.node, elastic_ip)\n nodes = self.driver.list_nodes()\n print(nodes)\n assert True\n\n def test_010(self):\n \"\"\"remove node\"\"\"\n self.node.destroy()\n assert True","sub_path":"pycfiles/cloudmesh_client-4.7.3.macosx-10.12-x86_64.tar/test_libcloud_aws.py","file_name":"test_libcloud_aws.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"394094338","text":"import os\nfrom django.conf import settings\n\nbase_dir = settings.GIT_REPOS_ROOT\n\nimport logging\n\nlogger = logging.getLogger(__file__)\n\nfrom pathlib import Path\n\ndef list_paths(root_tree, path=Path(\".\")):\n for blob in root_tree.blobs:\n yield path / blob.name\n for tree in root_tree.trees:\n yield from list_paths(tree, path / tree.name)\n\ndef _create_userdir(user):\n path = os.path.join(base_dir, user)\n\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == 17:\n logger.warning(\"User already created\")\n return False\n\n\ndef _safe_to_create(path):\n if os.path.exists(path) and os.path.isdir(path):\n if not os.listdir(path):\n return True\n # print(\"Directory is empty\")\n else:\n return False\n # print(\"Directory is not empty\")\n else:\n return True\n # print(\"Given Directory don't exists\")\n\n\ndef create_repository(user, repository, git_init=True):\n _create_userdir(user)\n path = os.path.join(base_dir, user, repository, \".git\")\n\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == 17:\n logger.warning(\"Directory already exist\")\n return False\n\n from git import Repo\n\n if _safe_to_create(path) and git_init:\n try:\n repo = Repo.init(path, bare=True)\n except Exception as e:\n logger.error(\"Could not initialize a bare git repository\")\n return False\n\n _set_permissions(os.path.join(base_dir, user))\n\n return True\n\ndef _set_permissions(path):\n import os\n try:\n for root, dirs, files in os.walk(path):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o0777)\n for f in files:\n os.chmod(os.path.join(root, f), 0o0777)\n except Exception as e:\n print(\"Failed to set permissions! {}\".format(e))\n\ndef delete_repository(user, repository):\n pass\n","sub_path":"components/studio/files/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422147683","text":"'''\nCreated on Nov 27, 2015\n\n@author: BDII1222\n'''\n\nclass employee():\n employeecount=0\n def __init__(self,name,salary):\n self.name=name\n self.salary=salary\n employee.employeecount+=1\n \n def display(self):\n print (\"employee name :\", self.name, \"employee salaray: \",self.salary)\n def empcountdisplay(self):\n print (\"total employee count : \", self.employeecount)\n \nif __name__=='__main__' :\n emp1= employee(\"amit\", 200)\n emp2=employee(\"sara\",300)\n emp1.display()\n emp2.empcountdisplay()","sub_path":"untitled/ClassMethodUtility.py","file_name":"ClassMethodUtility.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"62771414","text":"# @before-stub-for-debug-begin\nfrom python3problem116 import *\nfrom typing import *\n# @before-stub-for-debug-end\n\n#\n# @lc app=leetcode id=116 lang=python3\n#\n# [116] Populating Next Right Pointers in Each Node\n#\n\n# @lc code=start\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if root is None:\n return root\n a = []\n root.next=None\n b = []\n if root.left is not None: b.append(root.left)\n if root.right is not None: b.append(root.right)\n a.append(b)\n while len(a)>0:\n b = a.pop(0)\n c = []\n for idx,node in enumerate(b):\n if node.left is not None: c.append(node.left)\n if node.right is not None: c.append(node.right)\n if idx0:\n a.append(c)\n return root\n# @lc code=end\n\n","sub_path":"116.populating-next-right-pointers-in-each-node.py","file_name":"116.populating-next-right-pointers-in-each-node.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"290700829","text":"import json\nimport math\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom catalog.models import Category, Product, SubCategory\nfrom django.template import RequestContext\nfrom django.core import urlresolvers\nfrom cart import cart\nfrom django.http import HttpResponseRedirect\nfrom cart.forms import ProductAddToCartForm\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom decimal import Decimal\ndef index(request, template_name=\"catalog/index.html\"):\n\tcurrency_symbol = request.session.get('currency_symbol','$')\n\tpage_title = 'Musical Instruments and Sheet Music for Musicians'\n\t# getdata = request.GET.copy()\n\tgetdata = request.GET.copy().urlencode()\n\treturn render_to_response(template_name, locals(),context_instance=RequestContext(request))\n\n# def stage(request, template_name=\"catalog/index.html\"):\n# \tpage_title = 'Musical Instruments and Sheet Music for Musicians'\n# \treturn render_to_response(template_name, locals(), context_instance=RequestContext(request))\ndef show_category(request, category_slug, template_name=\"tags/category_list.html\"):\n\tcurrency_symbol = request.session.get('currency_symbol','$');\n\t# getdata = request.GET.copy()\n\tgetdata = request.GET.copy().urlencode()\n\tc = get_object_or_404(Category, slug=category_slug)\n\tsubcategory = c.subcategory_set.all()\n\t# print(subcategory[0].slug)\n\t# for p_item in subcategory:\n\t# page_title = c.name\n\t# meta_keywords = c.meta_keywords\n\t# meta_description = c.meta_description\n\t# path = [{'name' : c.name, \"slug\" : c.get_absolute_url}];\n\t# active_category = c.slug\n\n\t# currency_symbol = request.session.get('currency_symbol','$')\n\t# currency_rate = request.session.get('currency_rate',1)\n\t# show_list_num = [3,6]\n\t# show_num = 3\n\t# subc = get_object_or_404(SubCategory, slug=subcategory[0].slug)\n\t# products = subc.product_set.all()\n\t# for p_item in products:\n\t# \tp_item.price/=Decimal(currency_rate)\n\t# \tp_item.price = math.floor(p_item.price*100)/100\n\t# url = urlresolvers.reverse('catalog_subcategory/'+subcategory[0].slug)\n\treturn show_subcategory(request,subcategory[0].slug,template_name=\"catalog/subcategory.html\")\n\t# url = urlresolvers.reverse('catalog_subcategory',subcategory[0].slug)\n\t# return HttpResponseRedirect(url)\n\t# return render_to_response(template_name, locals(),context_instance=RequestContext(request))\n\ndef show_subcategory(request, subcategory_slug, template_name=\"catalog/subcategory.html\"):\n\tgetdata = request.GET.copy().urlencode()\n\tcurrency_symbol = request.session.get('currency_symbol','$')\n\tcurrency_rate = request.session.get('currency_rate',1)\n\tshow_list_num = [3,6]\n\tshow_num = 3\n\tsubc = get_object_or_404(SubCategory, slug=subcategory_slug)\n\tproducts = subc.product_set.all()\n\tfor p_item in products:\n\t\tp_item.price/=Decimal(currency_rate)\n\t\tp_item.price = math.floor(p_item.price*100)/100\n\t\t#wordwrap\n\t\tif len(p_item.description)>100:\n\t\t\tre_description = p_item.description\n\t\t\tp_item.description = re_description[0:100]+'...'\n\t\t\tprint(len(p_item.description))\n\tpage=request.GET.get('page')\n\tif(page == None):\n\t\tpage=1\n\tpaginator_page_range=[]\n\tview_style = 'grid-view'\n\n\tif request.method == 'POST':\n\t\tpostdata = request.POST.copy()\n\t\tprint(postdata)\n\t\tif(postdata['mode']=='add'):\n\t\t\tview_style = postdata['view_style']\n\t\t\tshow_num = postdata['show_num']\n\t\t\tcart.add_to_cart(request)\n\t\t\tadded_product = get_object_or_404(Product, slug=postdata.get('product_slug',''))\n\t\t\taction_mode = \"add\"\n\t\t\t# if request.session.test_cookie_worked():\n\t\t\t# \trequest.session.delete_test_cookie()\n\t\telif(postdata['mode']=='show_num'):\n\t\t\tview_style = postdata['view_style']\n\t\t\tshow_num = postdata['show_num']\n\t\t\tpage=1\n\t\t\tmode = \"show_num\"\n\telse:\t\n\t\tmode = \"view\"\n\t\tif(request.GET.get('view_style')==None):\n\t\t\tview_style = 'grid-view'\n\t\telse:\n\t\t\tview_style =request.GET.get('view_style')\t\n\t\trequest.session.set_test_cookie()\n\t\tif(request.GET.get('show_num')==None):\n\t\t\tshow_num = 3\n\t\telse:\n\t\t\tshow_num = request.GET.get('show_num')\n\tpaginator = Paginator(products, show_num)\n\ttry:\n\t\tproducts=paginator.page(page)\n\t\tcurrent_page = int(page)\n\t\tprint(current_page)\n\t\tprint('------------page_range-------------------')\n\t\t####################################\n\t\tif(current_page<3 or paginator.num_pages<3):\n\t\t\tp=1\n\t\t\twhile current_page-p>0:\n\t\t\t\tpaginator_page_range.append(p)\n\t\t\t\tp+=1\n\t\telse:\n\t\t\tpaginator_page_range.append(current_page-2)\n\t\t\tpaginator_page_range.append(current_page-1)\n\t\tif(paginator.num_pages>0):\n\t\t\tpaginator_page_range.append(current_page)\n\t\tprint(paginator_page_range)\n\t\tif(paginator.num_pages0:\n\t\t\t\tpaginator_page_range.append(p)\n\t\t\t\tp+=1\n\t\telse:\n\t\t\tpaginator_page_range.append(current_page-2)\n\t\t\tpaginator_page_range.append(current_page-1)\n\t\tif(paginator.num_pages>0):\n\t\t\tpaginator_page_range.append(current_page)\n\t\tif(paginator.num_pages
System TitleAcronym/Short NameDate
Ancillary Financial Applications (AFA)AFA [PDF - 80 KB]Aug 2017
\n \n \n \n \n \n \n \n \n \n
{}
\n \n {}\n
\n
\n \n {}\n
\n
>'''\n\n class_name_template = (\n '
{}.{} {}
'\n )\n row_key_value_template = '{}: {}'\n row_key_template = '{}'\n\n empty_row = ''\n base_classes = ', '.join(\n [_get_fullname(c) for c in _get_base_classes(entity)]\n )\n\n if base_classes != '':\n base_classes = '({})'.format(base_classes)\n\n class_structure = _get_class_structure(entity)\n\n class_name = class_name_template.format(\n entity.__module__, entity.__qualname__, base_classes\n )\n attributes = ''.join(\n [\n row_key_value_template.format(k, v)\n for k, v in class_structure['fields'].items()\n ]\n )\n\n methods = ''.join(\n [row_key_template.format(k) for k in class_structure['methods']]\n )\n\n return class_template.format(\n class_name,\n attributes if attributes else empty_row,\n methods if methods else empty_row,\n )\n\n\ndef _search_modules(target: str, exclude_pattern=['__pycache__']) -> list:\n results = []\n for f in glob.glob('{}/**/*'.format(target), recursive=True):\n skip = False\n for x in exclude_pattern:\n if x in f:\n skip = True\n break\n if not skip and f.endswith('.py'):\n results.append(f)\n return results\n\n\ndef _extract_filename(filename: str) -> str:\n return filename.split(os.sep)[-1].split('.')[0]\n\n\ndef _get_classes_from_module(module_path: str) -> list:\n spec = importlib.util.spec_from_file_location(\n _extract_filename(module_path), module_path\n )\n module = importlib.util.module_from_spec(spec)\n\n classes_list = []\n\n try:\n spec.loader.exec_module(module) # type: ignore\n for o in module.__dir__():\n if o.startswith('__'):\n continue\n klass = getattr(module, o)\n if inspect.isclass(klass):\n classes_list.append(klass)\n except Exception as e:\n print(' {} '.format(module_path).center(80, '='))\n print(e)\n print('.' * 80)\n return classes_list\n\n\ndef create_class_diagram(classes_list: list, verbose: bool = False):\n g = gv.Digraph(comment='Graph')\n g.attr('node', shape='none', rankdir='BT')\n\n edges = []\n for c in classes_list:\n g.node(_get_fullname(c), _get_entity_class_html(c))\n\n for b in _get_base_classes(c):\n edges.append((_get_fullname(b), _get_fullname(c)))\n\n if verbose:\n print('[II]', _get_fullname(c), '- included.')\n\n g.edges(edges)\n return g\n\n\ndef create_class_diagram_from_source(source: str, verbose: bool = False):\n classes_list = []\n\n if not os.path.exists(source):\n raise Exception('Path \"{}\" doesn\\'t exist.'.format(source))\n if os.path.isdir(source):\n sys.path.insert(0, source)\n\n for f in _search_modules(source):\n classes_list.extend(_get_classes_from_module(f))\n return create_class_diagram(classes_list, verbose=verbose)\n","sub_path":"pyuml/class_graph.py","file_name":"class_graph.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23710714","text":"\"\"\"empty message\n\nRevision ID: 5390577ddf3c\nRevises: a4f30b54d366\nCreate Date: 2021-03-17 05:50:50.137057\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5390577ddf3c'\ndown_revision = 'a4f30b54d366'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('post', sa.Column('featured_img', sa.String(length=128), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('post', 'featured_img')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/5390577ddf3c_.py","file_name":"5390577ddf3c_.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"237510457","text":"import winreg\nimport hashlib\nfrom datetime import timedelta\nfrom codes.databases.integrity_db_func import *\nfrom codes.databases.registry_db import *\n\n# Define registry key\nDEF_WINREG = {\n 'HKEY_LOCAL_MACHINE': winreg.HKEY_LOCAL_MACHINE,\n 'HKEY_CURRENT_USER': winreg.HKEY_CURRENT_USER,\n 'HKEY_CLASSES_ROOT': winreg.HKEY_CLASSES_ROOT,\n 'HKEY_CURRENT_CONFIG': winreg.HKEY_CURRENT_CONFIG,\n 'HKEY_USERS': winreg.HKEY_USERS,\n 'HKEY_DYN_DATA': winreg.HKEY_DYN_DATA,\n 'HKEY_PERFORMANCE_DATA': winreg.HKEY_PERFORMANCE_DATA\n}\n\n# Global variable\ncurrent_registry = {}\ndatabase_registry = {}\n\nread_key = 0\nread_value = 0\nunread_key = 0\n\nres = {}\n\n\n# return False if hash error\n# else return hex string hash 1 result\ndef hash1Str(str_value):\n try:\n hash_alo = hashlib.sha256()\n hash_alo.update(str_value.encode())\n except Exception as e:\n print(e, 123)\n return ERROR_CODE\n return hash_alo.hexdigest()\n\n\n# loopkup key store in currRegistry\ndef tryLookupKey(path, hkey):\n global read_value, unread_key, read_key, current_registry\n read_key = read_key + 1\n try:\n hkeyInfo = winreg.QueryInfoKey(hkey)\n current_registry[path] = (hash1Str(''), hkeyInfo[2], 0)\n # read value in key\n for i in range(hkeyInfo[1]):\n n, v, t = winreg.EnumValue(hkey, i)\n name = path + '->' + n\n\n # hash n+t+v as string\n strv = n + str(t) + str(v)\n ret = hash1Str(strv)\n if ret != ERROR_CODE:\n current_registry[name] = (ret, hkeyInfo[2])\n read_value = read_value + 1\n\n # lookup sub key\n for i in range(hkeyInfo[0]):\n subKeyName = winreg.EnumKey(hkey, i)\n try:\n subKey = winreg.OpenKey(hkey, subKeyName, access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY)\n except Exception as e:\n print(e, \"123a\")\n try:\n subKey = winreg.OpenKey(hkey, subKeyName, access=winreg.KEY_READ | winreg.KEY_WOW64_64KEY)\n except Exception as e:\n print(e, \"123b\")\n unread_key = unread_key + 1\n continue\n\n tryLookupKey(path + '\\\\' + subKeyName, subKey)\n return SUCCESS_CODE\n except EnvironmentError as e:\n print(e, \"123c\")\n return ERROR_CODE\n\n\n# convert tuple to dictionary by use name as key\ndef fromTupeToDic(src):\n if src is None or len(src) == 0:\n return {}\n ret = {}\n for i in range(len(src)):\n tmp = src[i]\n key = tmp[1]\n value = (tmp[0], tmp[2], tmp[3])\n ret[key] = value\n return ret\n\n\n# return rootHkey, pathToKey such as winreg.HKEY_LOCAL_MACHINE, bla bla and path to key from rootHkey\n# if not found return None, ''\ndef getHKeyRoot(hkeyStr):\n spl = hkeyStr.split(\"\\\\\", 1)\n if spl is None or len(spl) == 0:\n return None, ''\n pathKey = ''\n if len(spl) == 2:\n pathKey = spl[1]\n rkey = spl[0]\n rHkey = None\n\n if rkey in DEF_WINREG:\n rHkey = DEF_WINREG[rkey]\n return rHkey, pathKey\n\n\n# Do read with one keyPath, and all of key it contain\n# Retun SUCCESS_CODE if not error\n# Else retun ERROR_CODE\ndef doReadReg(keyPath):\n global read_key, read_value, unread_key\n rHkey, path = getHKeyRoot(keyPath)\n\n if rHkey is None:\n return ERROR_CODE\n try:\n hkey = winreg.OpenKey(rHkey, path)\n hkeyInfo = winreg.QueryInfoKey(hkey)\n except WindowsError as e:\n print(e, \"123d\")\n return ERROR_CODE\n tryLookupKey(keyPath, hkey)\n # db_print(\"All key: {} All value {} All unread {}\".format(readedKey, readedValue, unreadKey))\n\n\n# load registry to currRegistry\n# Return list key config\ndef readRegistry(regKeyList):\n for key in regKeyList:\n doReadReg(key)\n\n\n# load registry stored in db\n# read registry from db to dbRegistry\ndef loadRegistry(listKey=None):\n global res, database_registry\n res = get_registry_by_key_list(listKey)\n if res == ERROR_CODE:\n res = []\n database_registry = fromTupeToDic(res)\n # db_print(\"Db reg: {}\".format(len(dbRegistry)))\n\n\n# check if is key or value\ndef isKey(s):\n return not ('->' in s)\n\n\ndef insert_log(delList, updateList, insert_list, scan_time):\n # db: id, time, status, path\n iList = []\n for reg in delList:\n if isKey(reg[1]):\n iList.append((None, scan_time) + (KEY_DEL, reg[1]))\n else:\n iList.append((None, scan_time) + (VALUE_DEL, reg[1]))\n\n insert_many_registry_log(iList)\n # if(ret == SUCCESS_CODE):\n # # db_print(\"Insert log del success\")\n # db_print(iList)\n\n del iList\n iList = []\n for reg in updateList:\n iList.append((None, scan_time, VALUE_CHANGE, reg[1]))\n insert_many_registry_log(iList)\n # if(ret == SUCCESS_CODE):\n # db_print(\"Update log update success\")\n # db_print(iList)\n\n del iList\n iList = []\n for reg in insert_list:\n if isKey(reg[1]):\n iList.append((None, scan_time, KEY_ADD, reg[1]))\n else:\n iList.append((None, scan_time, VALUE_ADD, reg[1]))\n\n insert_many_registry_log(iList)\n # if(ret == SUCCESS_CODE):\n # db_print(\"Insert log insert success\")\n # db_print(iList)\n\n\ndef getDateTimeReadable(timeDelta):\n timeDeltaMilisec = timeDelta / 10\n return datetime(1601, 1, 1) + timedelta(microseconds=timeDeltaMilisec)\n\n\ndef doCheck(time_scan, insert_alert=True):\n global read_key, unread_key, read_value\n insertList = []\n updateList = []\n reUpdateList = []\n delIdList = []\n global database_registry, current_registry\n # {name : (hash_str, last_change)}\n for curName in current_registry:\n curVal = current_registry[curName]\n if curName in database_registry:\n dbVal = database_registry[curName]\n dbCmpVal = (dbVal[1], dbVal[2])\n if dbCmpVal != curVal:\n\n if isKey(curName):\n updateList.append((curVal[0], curVal[1], dbVal[0]))\n elif dbCmpVal[0] != curVal[0]:\n # (hash_str, name_registry, last_change, id_registry)s\n updateList.append((curVal[0], curVal[1], dbVal[0]))\n reUpdateList.append((dbVal[0], curName, curVal[0], curVal[1]))\n del database_registry[curName]\n\n else:\n insertList.append((None, curName, curVal[0], curVal[1]))\n delList = []\n for dname in database_registry:\n dval = database_registry[dname]\n delIdList.append((dval[0],))\n\n delList.append((dval[0], dname) + dval[1:3])\n\n del database_registry\n\n dret = delete_many_registry_hash(delIdList)\n\n uret = update_many_registry_hash(updateList)\n\n iret = insert_many_registry_hash(insertList)\n\n del updateList\n updateList = reUpdateList\n if insert_alert:\n insert_log(delList, updateList, insertList, time_scan)\n # data read from windows system\n\n sumary = {\n \"readed_key_num\": read_key,\n \"unread_key_num\": unread_key,\n \"readed_value_num\": read_value,\n \"update_num\": len(updateList),\n \"update_status\": uret == SUCCESS_CODE,\n \"insert_num\": len(insertList),\n \"insert_status\": iret == SUCCESS_CODE,\n \"delete_num\": len(delIdList),\n \"delete_status\": dret == SUCCESS_CODE\n }\n # if len(updateList) > 10:\n # updateList = updateList[:9]\n # if len(insertList) > 10:\n # insertList = insertList[:9]\n # if len(delList) > 10:\n # delList = delList[:9]\n # detail = {\n # \"update_list\": updateList,\n # \"insert_list\": insertList,\n # \"delete_list\": delList\n # }\n # res = {'sumary': sumary, 'detail': detail}\n return sumary\n\n\ndef doReset():\n global read_value, read_key, unread_key, current_registry, database_registry\n read_value = 0\n read_key = 0\n unread_key = 0\n current_registry = {}\n database_registry = {}\n\n\n# Reset global variable\ndef reset_global_var():\n global read_key, read_value, unread_key, current_registry, database_registry\n current_registry = {}\n database_registry = {}\n\n read_key = 0\n read_value = 0\n unread_key = 0\n\n\n# Scan integrity for registry key\ndef scan_registry_key(path_registry, current_time):\n print('### \\nStarting check integrity for registry key ...')\n\n error_msg = 'The error connect to database.'\n info_sys_check = get_info_sys_check_object(REGISTRY_TYPE, path_registry)\n\n if info_sys_check == ERROR_CODE:\n return ERROR_CODE, error_msg\n\n if info_sys_check is None:\n error_msg = 'The registry is not in check list.'\n return ERROR_CODE, error_msg\n\n reset_global_var()\n insert_alert_flag = info_sys_check[3] != SYS_CHECK_OBJECT_NEW\n\n list_key = [path_registry]\n\n readRegistry(list_key)\n loadRegistry(list_key)\n\n ret = doCheck(current_time, insert_alert_flag)\n\n if ret != ERROR_CODE and insert_alert_flag is False:\n update_state_sys_check_object_by_id(info_sys_check[0])\n\n msg = 'Done check integrity for registry.'\n print(msg)\n return SUCCESS_CODE, msg\n\n\ndef scan(registry, path_key):\n try:\n raw_key = winreg.OpenKey(registry, path_key)\n sub_key_count, values_count, last_modified = winreg.QueryInfoKey(raw_key)\n sub_key = winreg.OpenKey(registry, path_key)\n print(path_key + \"\\\\\")\n for i in range(values_count):\n name, value, type_value = winreg.EnumValue(sub_key, i)\n print(\"So i = \", i, ' ;ten = ', name, ' ;Gia tri = ', value, ' ;Loai = ', type_value)\n winreg.CloseKey(sub_key)\n\n for i in range(sub_key_count):\n sub_key_name = winreg.EnumKey(raw_key, i)\n path_sub_key = path_key + \"\\\\\" + sub_key_name\n scan(registry, path_sub_key)\n winreg.CloseKey(raw_key)\n except WindowsError as e:\n if e.winerror == 5:\n print(ACCESS_DENIED_CODE)\n\n # Access denied = 5\n print(e.winerror)\n print('Loi')\n # winreg.CloseKey(raw_key)\n\n\ndef test_registry():\n # DEF_WINREG = {'HKEY_LOCAL_MACHINE': winreg.HKEY_LOCAL_MACHINE}\n registry = winreg.ConnectRegistry(None, DEF_WINREG['HKEY_LOCAL_MACHINE'])\n path_key = r\"SYSTEM\\ActivationBroker\"\n scan(registry, path_key)\n","sub_path":"codes/windows/integrity/handle_registry.py","file_name":"handle_registry.py","file_ext":"py","file_size_in_byte":10319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124886065","text":"def coin(l,s1,n):\r\n dp=[[0 for i in range(n)] for j in range(s1+1) ]\r\n\r\n for i in range(n):\r\n dp[0][i]=1\r\n \r\n for i in range(1,s1+1):\r\n for j in range(n):\r\n x=dp[i][j-1] if j>0 else 0\r\n y=dp[i-l[j]][j] if (i-l[j])>=0 else 0\r\n dp[i][j]=x+y\r\n return dp[s1][n-1]\r\nfor _ in range(int(input())):\r\n n=int(input())\r\n l=list(map(int, input().split()))\r\n s1=int(input())\r\n print(coin(l,s1,n))\r\n\r\n\r\n# another solution\r\n# for _ in range(int(input())):\r\n# n,ar,m=int(input()),list(map(int,input().split())),int(input())\r\n# dp=[0]*(m+1)\r\n# dp[0]=1\r\n# for i in range(n):\r\n# for j in range(ar[i],m+1):\r\n# dp[j]+=dp[j-ar[i]]\r\n# print(dp[m]) ","sub_path":"coin_problem.py","file_name":"coin_problem.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"585487578","text":"import os\nos.sys.path.insert(0,'../../engine')\nos.sys.path.insert(0, 'plugins/modem/')\nos.sys.path.insert(0, 'plugins/modem/pygsm')\nfrom as_hlp import *\n\nclass asModem():\n\n\tdef __init__(self, parent=None, tid=None, carrier=None, debug=False):\n\n\t\tif debug:\n\t\t\tself.debug = 'DEBUG'\n\t\telse:\n\t\t\tself.debug = ''\n\n\t\tself.ccid = None\n\t\tself.dev = None\n\t\tself.parent = parent\n\t\tself.tid = tid\n\t\tself.sms = [] # тексты смсок\n\t\tself.carrier = carrier\n\t\tself.hlp = parent.hlp\n\n\tdef log(self, text, ctype='  '):\n\n\t\tself.parent.log(text, ctype, self.tid)\n\n\tdef set_carrier_by_ccid(self, ccid):\n\t\t'''\n\t\tопределяет оператора по ccid симки\n\t\thttp://ru.wikipedia.org/wiki/SIM-%EA%E0%F0%F2%E0#ICCID\n\t\t@str/int ccid\n\t\t@return bool\n\t\t'''\n\n\t\tccid = str(ccid)\n\n\t\tif ccid.startswith('8970199'):\n\t\t\tself.carrier = 'beeline'\n\t\telif ccid.startswith('8970101'):\n\t\t\tself.carrier = 'mts'\n\t\telif ccid.startswith('8970102'):\n\t\t\tself.carrier = 'megafon'\n\t\telif ccid.startswith('8970120'):\n\t\t\tself.carrier = 'tele2'\n\t\telif ccid.startswith('8970103'):\n\t\t\tself.carrier = 'rostelekom'\n\t\telse:\n\t\t\tself.log('ccid неизвестного оператора {0}'.format(ccid), '!')\n\t\t\treturn False\n\n\t\tself.log('установили опе��атора {0}'.format(self.carrier), '+')\n\t\treturn True\n\n\tdef execute(self, cmd):\n\n\t\tif self.debug:\n\n\t\t\tlogfile = 'plugins/modem/logs/{0}.txt'.format(self.dev)\n\t\t\tif not hlp.exists(logfile):\n\t\t\t\thlp.f_touch(logfile)\n\n\t\t\ttee = '| tee -a {}'.format(logfile)\n\t\telse:\n\t\t\ttee = ''\n\n\t\tcommand = 'python2 plugins/modem/modem.py {0} {1} {2} 2>&1 {3}'.format(self.dev, cmd, self.debug, tee)\n\n\t\tself.log('выполняем {0}'.format(command))\n\t\tres = hlp.execute(command, returnFull=True)\n\n\t\tif 'COMMAND_END' not in res:\n\t\t\tself.log('команда {} выполнена не до конца: {}'.format(cmd, res), '!')\n\t\t\treturn False\n\n\t\tif self.debug:\n\t\t\tprint(res)\n\n\t\tif 'ERROR' in res:\n\t\t\tself.log('команда {} НЕ выполнена'.format(cmd), '!')\n\t\t\treturn False\n\n\t\treturn res\n\n\tdef connect(self, dev):\n\t\t'''\n\t\tподключение к устройству\n\t\tпроверка на работоспособность\n\t\tпроверка жива ли симка\n\t\t@str dev - устройство, 0-15\n\t\t@return bool\n\t\t'''\n\n\t\tself.dev = dev\n\t\tself.log('цепляемся к /dev/ttyUSB{0}'.format(self.dev))\n\n\t\tdevfile = '/dev/ttyUSB{}'.format(dev)\n\t\tif not hlp.exists(devfile):\n\t\t\tself.log('устройство {0} не найдено'.format(devfile), '!')\n\t\t\treturn False\n\n\t\tres = self.get_serial_number()\n\t\tif res is False:\n\t\t\treturn False\n\n\t\tres = self.set_carrier_by_ccid(self.ccid)\n\t\tif res is False:\n\t\t\treturn False\n\n\t\treturn True\n\n\tdef get_serial_number(self):\n\n\t\tself.log('запрашиваем ccid')\n\n\t\tres = self.execute('ccid')\n\t\tif res is False:\n\t\t\treturn False\n\n\t\tif '+CCID: \"' not in res:\n\t\t\tself.log('нет ccid в ответе', '!')\n\t\t\treturn False\n\n\t\tccid = hlp.parse(r'\\+CCID: \"([0-9]+)', res)\n\t\tif ccid is False:\n\t\t\tself.log('не смогли достать ccid', '!')\n\t\t\treturn False\n\n\t\tself.log('достали ccid: {0}'.format(ccid), '+')\n\t\tself.ccid = ccid\n\t\treturn ccid\n\n\tdef delete_sms(self):\n\n\t\tself.log('удаляем смски')\n\t\tres = self.execute('delete')\n\n\t\tif res is False:\n\t\t\tself.log('не удалось удалить смски', '!')\n\t\t\treturn False\n\t\telse:\n\t\t\tself.log('удалили', '+')\n\t\t\treturn True\n\n\tdef check_sim_status(self):\n\t\t# return true/false/'bad_sim'\n\t\t'''\n\t\tresponse: ,\n\n\t\t\n\t\t0: Disable network registration unsolicited result code (default)\n\t\t1: Enable network registration code result code +CREG : \n\t\t2: Enable network registration and location information unsolicited result code +CREG:\n\t\t,, if there is a change of network cell.\n\n\t\t\n\t\t0: not registered, ME is not currently searching for a new operator.\n\t\t1: registered, home network.\n\t\t2: not registered, ME currently searching for a new operator to register to.\n\t\t3: registration denied.\n\t\t4: unknown.\n\t\t5: registered, roaming.\n\t\t'''\n\n\t\tself.log('проверяем регистрацию симки')\n\t\tres = self.execute('status')\n\n\t\tif res is False:\n\t\t\tself.log('не удалось узнать статус', '!')\n\t\t\treturn False\n\n\t\tif '+CREG: 0,1' in res or '+CREG: 1,1' in res or '+CREG: 2,1' in res:\n\t\t\tself.log('симка в порядке', '+')\n\t\t\treturn True\n\n\t\tself.log('ошибка регистрации в сети', '!')\n\t\treturn 'bad_sim'\n\n\tdef request_number(self):\n\n\t\tself.log('запрашиваем свой номер')\n\t\tres = self.execute('{}_num'.format(self.carrier))\n\n\t\tif res is False:\n\t\t\tself.log('не удалось запросить номер', '!')\n\t\t\treturn False\n\t\telse:\n\t\t\tself.log('запросили номер', '+')\n\t\t\treturn True\n\n\tdef load_sms(self):\n\n\t\tself.log('читаем смски')\n\t\tres = self.execute('sms')\n\t\tif res is False:\n\t\t\tself.log('ошибка чтения смс', '!')\n\t\t\treturn False\n\n\t\tsms = res.split('\\n')\n\t\tsms = [m.strip() for m in sms if m.strip()]\n\t\tself.sms = sms\n\t\tself.log('всего собрано {0} сообщений'.format(len(self.sms)))\n\t\treturn sms\n\n\tdef parse_number(self, msg):\n\t\t'''\n\t\tпарсит номер из сообщений разных операторов\n\t\t@str msg - смска\n\t\t@return\n\t\t'''\n\n\t\tnum = hlp.parse(r'(9[0-9]{9})', msg)\n\t\tif num is False:\n\t\t\tself.log('не смогли достать номер', '-')\n\t\t\treturn False\n\n\t\tself.log('достали номер: {0}'.format(num), '+')\n\t\treturn num\n\n\tdef reboot(self):\n\n\t\tself.log('перезагружаем модем')\n\t\tres = self.execute('reboot')\n\t\tif res is False:\n\t\t\tself.log('ошибка перезагрузки', '!')\n\t\t\treturn False\n\n\t\tself.hlp.slp(20, log=self.log)\n\t\tself.log('перезапустили')\n\t\treturn True\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"fream/plugins/modem/as_modem.py","file_name":"as_modem.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347442839","text":"import json\nfrom lettuce import step, world\nfrom bigml.api import HTTP_CREATED\n\n@step(r'I create a prediction for \"(.*)\"')\ndef i_create_a_prediction(step, data=None):\n if data is None:\n data = \"{}\"\n model = world.model['resource']\n data = json.loads(data)\n resource = world.api.create_prediction(model, data)\n world.status = resource['code']\n assert world.status == HTTP_CREATED\n world.location = resource['location']\n world.prediction = resource['object']\n world.predictions.append(resource['resource'])\n\n@step(r'the prediction for \"(.*)\" is \"(.*)\"')\ndef the_prediction_is(step, objective, prediction):\n assert world.prediction['prediction'][objective] == prediction\n","sub_path":"tests/features/create_prediction-steps.py","file_name":"create_prediction-steps.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"234728444","text":"import torch\r\nimport torchvision\r\nfrom torch import nn, optim\r\n\r\nimport torch.nn.functional as F\r\nfrom torchsummary import summary\r\n\r\n# Model Configs\r\nbatch_size = 64\r\nlearning_rate = 0.01\r\ncross_entropy = nn.CrossEntropyLoss()\r\n\r\n# Data Loader\r\ntransform = torchvision.transforms.ToTensor()\r\ntrain_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST(\r\n 'mnist_data', train=True, download=True, transform=transform\r\n ), batch_size=batch_size\r\n)\r\nval_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST(\r\n 'mnist_data', train=False, download=True, transform=transform\r\n ), batch_size=batch_size\r\n)\r\n\r\n# Validation function\r\ndef validate(model, data):\r\n total = 0\r\n correct = 0\r\n for i, (images, labels) in enumerate(data):\r\n images = images\r\n labels = labels\r\n y_pred = model(images)\r\n value, pred = torch.max(y_pred, 1)\r\n total += y_pred.size(0)\r\n correct += torch.sum(pred == labels)\r\n return correct * 100 / total\r\n\r\n# Training Function\r\ndef train(model,epochs=5) :\r\n optimizer = optim.Adam(model.parameters(),lr=learning_rate) \r\n for n in range(epochs) :\r\n for i , (images , labels) in enumerate(train_data) :\r\n images = images\r\n labels = labels\r\n optimizer.zero_grad()\r\n prediction = model(images)\r\n loss = cross_entropy(prediction, labels)\r\n loss.backward()\r\n optimizer.step()\r\n accuracy = float(validate(model, val_data))\r\n print(\"Epoch:\", n+1, \"Loss: \", float(loss.data), \"Accuracy:\", accuracy)\r\n\r\n# Model\r\nclass CNNWithPoolRelu(nn.Module) :\r\n def __init__(self):\r\n super(CNNWithPoolRelu,self).__init__()\r\n self.conv_1 = nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3)\r\n self.conv_2 = nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3)\r\n \r\n self.pool_1 = nn.MaxPool2d(2)\r\n self.pool_2 = nn.MaxPool2d(2)\r\n\r\n self.dense_1 = nn.Linear(in_features=800,out_features=256)\r\n self.dense_2 = nn.Linear(in_features=256,out_features=10)\r\n\r\n self.relu = nn.ReLU()\r\n def forward(self,x) :\r\n x = self.relu(self.conv_1(x))\r\n x = self.pool_1(x)\r\n x = self.relul(self.conv_2(x))\r\n x = self.pool_2(x)\r\n x = x.view(x.shape[0],-1)\r\n x = self.relu(self.dense_1(x))\r\n x = self.dense_2(x)\r\n # output = self.tanh(x)\r\n output = F.log_softmax(x, dim=1)\r\n\r\n return output\r\n\r\nmodel = CNNWithPoolRelu()\r\n\r\nsummary(model, (1, 28, 28))\r\n\r\ntrain(model,epochs=5)","sub_path":"S7/AI/Assignment_2/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236140803","text":"\"\"\"\nThis part of code is the DQN brain, which is a brain of the agent.\nAll decisions are made in here.\nUsing Tensorflow to build the neural network.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\nUsing:\nTensorflow: 1.0\ngym: 0.7.3\n\"\"\"\n\nimport logging\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport itertools\n\nlogger = logging.getLogger(__name__)\n# np.random.seed(1)\n# tf.set_random_seed(1)\n\n\n# Deep Q Network off-policy\nclass DeepQNetwork:\n def __init__(\n self,\n batch_size=32,\n dueling=True,\n e_greedy_increment=None,\n e_greedy=0.2,\n learning_rate=0.01,\n memory_size=500,\n num_actions=4,\n num_features=2,\n output_graph=False,\n replace_target_iter=300,\n reward_decay=0.8,\n ):\n\n self.batch_size = batch_size\n self.cost_history = []\n self.dueling = dueling\n self.epsilon = 0 if e_greedy_increment is not None else e_greedy\n self.epsilon_increment = e_greedy_increment\n self.epsilon_max = e_greedy\n self.gamma = reward_decay\n self.learn_step_counter = 0\n self.lr = learning_rate\n self.memory_counter = 0\n self.memory_size = memory_size\n self.num_actions = num_actions\n self.num_features = num_features\n self.replace_target_iter = replace_target_iter\n\n # [state_old, a, r, state_new]\n self.memory = np.zeros((self.memory_size, 2 * num_features + 2))\n\n # consist of [target_net, evaluate_net]\n self._build_net()\n t_params = tf.get_collection(\"target_net_params\")\n e_params = tf.get_collection(\"eval_net_params\")\n self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n\n if output_graph:\n # $ tensorboard --logdir=logs\n # tf.train.SummaryWriter soon be deprecated, use following\n tf.summary.FileWriter(\"logs/\", self.sess.graph)\n\n self.sess.run(tf.global_variables_initializer())\n\n # Two nets have the same structure but different parameters.\n # One with parameters eval_net_params, another with target_net_params.\n def _build_net(self):\n def build_layers(s, c_names, n_l1, w_initializer, b_initializer):\n with tf.variable_scope(\"l1\"):\n w1 = tf.get_variable(\n \"w1\",\n [self.num_features, n_l1],\n initializer=w_initializer,\n collections=c_names,\n )\n b1 = tf.get_variable(\n \"b1\", [1, n_l1], initializer=b_initializer, collections=c_names\n )\n l1 = tf.nn.relu(tf.matmul(s, w1) + b1)\n\n if self.dueling:\n # Dueling DQN\n with tf.variable_scope(\"Value\"):\n w2 = tf.get_variable(\n \"w2\", [n_l1, 1], initializer=w_initializer, collections=c_names\n )\n b2 = tf.get_variable(\n \"b2\", [1, 1], initializer=b_initializer, collections=c_names\n )\n self.V = tf.matmul(l1, w2) + b2\n\n with tf.variable_scope(\"Advantage\"):\n w2 = tf.get_variable(\n \"w2\",\n [n_l1, self.num_actions],\n initializer=w_initializer,\n collections=c_names,\n )\n b2 = tf.get_variable(\n \"b2\",\n [1, self.num_actions],\n initializer=b_initializer,\n collections=c_names,\n )\n self.A = tf.matmul(l1, w2) + b2\n\n with tf.variable_scope(\"Q\"):\n out = self.V + (\n self.A - tf.reduce_mean(self.A, axis=1, keep_dims=True)\n ) # Q = V(s) + A(s,a)\n else:\n with tf.variable_scope(\"Q\"):\n w2 = tf.get_variable(\n \"w2\",\n [n_l1, self.num_actions],\n initializer=w_initializer,\n collections=c_names,\n )\n b2 = tf.get_variable(\n \"b2\",\n [1, self.num_actions],\n initializer=b_initializer,\n collections=c_names,\n )\n out = tf.matmul(l1, w2) + b2\n\n return out\n\n # ------------------ build evaluate_net ------------------\n self.state_old = tf.placeholder(\n tf.float32, [None, self.num_features], name=\"state_old\"\n ) # input\n self.q_target = tf.placeholder(\n tf.float32, [None, self.num_actions], name=\"Q_target\"\n ) # for calculating loss\n\n with tf.variable_scope(\"eval_net\"):\n # c_names(collections_names) are the collections to store variables\n c_names, n_l1, w_initializer, b_initializer = (\n [\"eval_net_params\", tf.GraphKeys.GLOBAL_VARIABLES],\n 32,\n tf.random_normal_initializer(0.0, 0.3),\n tf.constant_initializer(0.1),\n ) # config of layers, 0 is mean, 0.3 is stddev\n\n # first layer. collections is used later when assign to target net\n with tf.variable_scope(\"l1\"):\n w1 = tf.get_variable(\n \"w1\",\n [self.num_features, n_l1],\n initializer=w_initializer,\n collections=c_names,\n )\n b1 = tf.get_variable(\n \"b1\", [1, n_l1], initializer=b_initializer, collections=c_names\n )\n l1 = tf.nn.relu(tf.matmul(self.state_old, w1) + b1)\n\n # second layer. collections is used later when assign to target net\n with tf.variable_scope(\"l2\"):\n w2 = tf.get_variable(\n \"w2\",\n [n_l1, self.num_actions],\n initializer=w_initializer,\n collections=c_names,\n )\n b2 = tf.get_variable(\n \"b2\",\n [1, self.num_actions],\n initializer=b_initializer,\n collections=c_names,\n )\n self.q_eval = tf.matmul(l1, w2) + b2\n\n with tf.variable_scope(\"loss\"):\n # loss = [(q_target-q_eval)^2]/n where n = len(q_target)\n self.loss = tf.reduce_mean(\n tf.math.squared_difference(self.q_target, self.q_eval)\n )\n with tf.variable_scope(\"train\"):\n self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n\n # ------------------ build target_net ------------------\n self.state_new = tf.placeholder(\n tf.float32, [None, self.num_features], name=\"state_new\"\n )\n\n with tf.variable_scope(\"target_net\"):\n # c_names(collections_names) are the collections to store variables\n c_names = [\"target_net_params\", tf.GraphKeys.GLOBAL_VARIABLES]\n\n # first layer. collections is used later when assign to target net\n with tf.variable_scope(\"l1\"):\n w1 = tf.get_variable(\n \"w1\",\n [self.num_features, n_l1],\n initializer=w_initializer,\n collections=c_names,\n )\n b1 = tf.get_variable(\n \"b1\", [1, n_l1], initializer=b_initializer, collections=c_names\n )\n l1 = tf.nn.relu(tf.matmul(self.state_new, w1) + b1)\n\n # second layer. collections is used later when assign to target net\n with tf.variable_scope(\"l2\"):\n w2 = tf.get_variable(\n \"w2\",\n [n_l1, self.num_actions],\n initializer=w_initializer,\n collections=c_names,\n )\n b2 = tf.get_variable(\n \"b2\",\n [1, self.num_actions],\n initializer=b_initializer,\n collections=c_names,\n )\n self.q_next = tf.matmul(l1, w2) + b2\n\n def store_transition(self, state_old, a, r, state_new):\n logger.debug(\"Storing transition...\")\n logger.debug(f\"state_old - {state_old}\")\n logger.debug(f\"[a, r] - {[a, r]}\")\n logger.debug(f\"state_new - {state_new}\")\n\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = [*state_old, a, r, *state_new]\n self.memory_counter += 1\n\n def choose_action(self, state):\n logger.debug(\"Choosing action...\")\n\n if np.random.uniform() >= self.epsilon:\n logger.debug(\"Choosing random action\")\n return np.random.randint(0, self.num_actions)\n\n # Make a batch of one sample to match expected dimensions\n state = np.array([state], dtype=np.float32)\n action_values = self.sess.run(self.q_eval, feed_dict={self.state_old: state})\n logger.debug(f\"Action values - {action_values}\")\n return np.argmax(action_values)\n\n def learn(self):\n # check to replace target parameters\n logger.debug(\"Starting agent training...\")\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.replace_target_op)\n logger.info(\"Target_params_replaced\")\n\n # sample batch memory from all memory\n memory_size = min(self.memory_counter, self.memory_size)\n sample_index = np.random.choice(memory_size, size=self.batch_size)\n batch_memory = self.memory[sample_index, :]\n\n q_next, q_eval = self.sess.run(\n [self.q_next, self.q_eval],\n feed_dict={\n self.state_new: batch_memory[:, -self.num_features :], # fixed params\n self.state_old: batch_memory[:, : self.num_features], # newest params\n },\n )\n\n # change q_target w.r.t q_eval's action\n q_target = q_eval.copy()\n\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n eval_act_index = batch_memory[:, self.num_features].astype(int)\n reward = batch_memory[:, self.num_features + 1]\n\n q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(\n q_next, axis=1\n ) # q_next is q_target\n\n \"\"\"\n For example in this batch I have 2 samples and 3 actions:\n q_eval =\n [[1, 2, 3],\n [4, 5, 6]]\n\n q_target = q_eval =\n [[1, 2, 3],\n [4, 5, 6]]\n\n Then change q_target with the real q_target value w.r.t the q_eval's action.\n For example in:\n sample 0, I took action 0, and the max q_target value is -1;\n sample 1, I took action 2, and the max q_target value is -2:\n q_target =\n [[-1, 2, 3],\n [4, 5, -2]]\n\n So the (q_target - q_eval) becomes:\n [[(-1)-(1), 0, 0],\n [0, 0, (-2)-(6)]]\n\n We then backpropagate this error w.r.t the corresponding action to network,\n leave other action as error=0 cause we didn't choose it.\n \"\"\"\n\n # train eval network\n _, self.cost = self.sess.run(\n [self._train_op, self.loss],\n feed_dict={\n self.state_old: batch_memory[:, : self.num_features],\n self.q_target: q_target,\n },\n )\n self.cost_history.append(self.cost)\n\n if self.epsilon_increment != None and self.epsilon < self.epsilon_max:\n self.epsilon = min(self.epsilon + self.epsilon_increment, self.epsilon_max)\n self.learn_step_counter += 1\n\n def plot_cost(self):\n import matplotlib.pyplot as plt\n\n plt.plot(np.arange(len(self.cost_history)), self.cost_history)\n plt.ylabel(\"Cost\")\n plt.xlabel(\"training steps\")\n plt.show()\n","sub_path":"app/src/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":12243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"362120634","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis script 'setup' builds the AsciiDoc3 package to be installed via\npip / pip3 from 'https://pypi.org/project/asciidoc3/'.\nPip makes AsciiDoc3 available both on GNU/Linux (POSIX) and\nWindows.\nTo complete the installation it is strongly recommended (or to say\nit in a more accurate word: compelling) to run 'asciidoc3_postinstall'\nfrom the command line immediately subsequently after\n'pip3 install --user asciidoc3'. This arranges some reasonable\nsymlinks for convenient usage.\nSee https://asciidoc3.org/pypi.html for more information.\n\nCopyright (C) 2018-2020 by Berthold Gehrke \nFree use of this software is granted under the terms of the\nGNU General Public License Version 2 or higher (GNU GPLv2+).\n\"\"\"\n\nfrom os import name\nfrom sys import version\nfrom setuptools import setup, find_packages\n\n# find current version and location of executable\n# e.g.: version = '3.7.2+ (default, Feb 2 2019, 14:31:48) \\n[GCC 8.2.0]'\n# PRE = '37'\n# PRE='Python37/site-packages/'\nPRE = version[:1] + version[2:3]\nPRE = 'Python' + PRE + '/site-packages/'\n\n# this is to assure right-installing on Windows *and* GNU/Linux\nPREFIX_TUPLE = tuple()\nif name == 'nt':\n PREFIX_TUPLE = ('', PRE)\nelif name == 'posix':\n PREFIX_TUPLE = ('/',)\nelse: # guess\n PREFIX_TUPLE = ('/',)\n\ndatafiles = list()\nfor dirprefix in PREFIX_TUPLE:\n datafiles.append((dirprefix+'asciidoc3',\n ['asciidoc3.conf',\n 'COPYING',\n 'COPYRIGHT',\n 'docbook45.conf',\n 'docbook51.conf',\n 'help.conf',\n 'html4.conf',\n 'html5.conf',\n 'lang-de.conf',\n 'latex.conf',\n 'asciidoc3.py',\n 'a2x3.py',\n 'asciidoc3api.py',\n 'LICENSE',\n 'README.md',\n 'slidy.conf',\n 'setup.py',\n 'text.conf',\n 'xhtml11.conf',\n 'BUGS.txt',\n 'CHANGELOG',\n 'CHANGELOG.txt',\n 'COPYING',\n 'COPYRIGHT',\n 'INSTALL',\n 'UNINSTALL',\n 'lang-cs.conf',\n 'lang-el.conf',\n 'lang-es.conf',\n 'lang-fr.conf',\n 'lang-hu.conf',\n 'lang-it.conf',\n 'lang-ja.conf',\n 'lang-nl.conf',\n 'lang-pt-BR.conf',\n 'lang-ro.conf',\n 'lang-ru.conf',\n 'lang-se.conf',\n 'lang-zh-CN.conf',\n 'lang-uk.conf',\n 'lang-en.conf',\n 'asciidoc3_postinstall.py', ]))\n datafiles.append((dirprefix+'asciidoc3/stylesheets',\n ['stylesheets/asciidoc3.css',\n 'stylesheets/docbook-xsl.css',\n 'stylesheets/pygments.css',\n 'stylesheets/slidy.css',\n 'stylesheets/toc2.css', ]))\n datafiles.append((dirprefix+'asciidoc3/dblatex',\n ['dblatex/asciidoc3-dblatex.sty',\n 'dblatex/asciidoc3-dblatex.xsl',\n 'dblatex/dblatex-readme.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/docbook-xsl',\n ['docbook-xsl/asciidoc3-docbook-xsl.txt',\n 'docbook-xsl/chunked.xsl',\n 'docbook-xsl/common.xsl',\n 'docbook-xsl/epub.xsl',\n 'docbook-xsl/fo.xsl',\n 'docbook-xsl/htmlhelp.xsl',\n 'docbook-xsl/manpage.xsl',\n 'docbook-xsl/text.xsl',\n 'docbook-xsl/xhtml.xsl', ]))\n datafiles.append((dirprefix+'asciidoc3/images',\n ['images/1.png',\n 'images/2.png',\n 'images/3.png',\n 'images/empty.png',\n 'images/helloworld.jpg',\n 'images/highlighter.png',\n 'images/highlight.jpg',\n 'images/logo_asciidoc3.png',\n 'images/redsquare.jpg',\n 'images/smallnew.png',\n 'images/tiger.png', ]))\n datafiles.append((dirprefix+'asciidoc3/images/icons',\n ['images/icons/caution.png',\n 'images/icons/example.png',\n 'images/icons/home.png',\n 'images/icons/important.png',\n 'images/icons/next.png',\n 'images/icons/note.png',\n 'images/icons/prev.png',\n 'images/icons/README',\n 'images/icons/tip.png',\n 'images/icons/up.png',\n 'images/icons/warning.png', ]))\n datafiles.append((dirprefix+'asciidoc3/images/icons/callouts',\n ['images/icons/callouts/1.png',\n 'images/icons/callouts/2.png',\n 'images/icons/callouts/3.png',\n 'images/icons/callouts/4.png',\n 'images/icons/callouts/5.png',\n 'images/icons/callouts/6.png',\n 'images/icons/callouts/7.png',\n 'images/icons/callouts/8.png',\n 'images/icons/callouts/9.png',\n 'images/icons/callouts/10.png',\n 'images/icons/callouts/11.png',\n 'images/icons/callouts/12.png',\n 'images/icons/callouts/13.png',\n 'images/icons/callouts/14.png',\n 'images/icons/callouts/15.png', ]))\n datafiles.append((dirprefix+'asciidoc3/doc',\n ['doc/asciidoc3api.txt',\n 'doc/asciidoc3.conf',\n 'doc/customers.csv',\n 'doc/article.txt',\n 'doc/article_docbook51.txt',\n 'doc/article-docinfo.xml',\n 'doc/article_docbook51-docinfo.xml',\n 'doc/asciimathml.txt',\n 'doc/book-multi.txt',\n 'doc/book-multi_docbook51.txt',\n 'doc/book.txt',\n 'doc/book_containing_an_abstract.txt',\n 'doc/cheatsheet.txt',\n 'doc/epub-notes.txt',\n 'doc/faq.txt',\n 'doc/latex-backend.txt',\n 'doc/latex-bugs.txt',\n 'doc/latex-filter.txt',\n 'doc/latexmathml.txt',\n 'doc/latexmath.txt',\n 'doc/music-filter.txt',\n 'doc/publishing-ebooks-with-asciidoc3.txt',\n 'doc/quickstart.txt',\n 'doc/readme.txt',\n 'doc/readme_docbook51.txt',\n 'doc/slidy-example.txt',\n 'doc/slidy.txt',\n 'doc/source-highlight-filter.txt',\n 'doc/test.txt',\n 'doc/a2x3.1.gz',\n 'doc/a2x3.1.txt',\n 'doc/asciidoc3.1.gz',\n 'doc/asciidoc3.1.txt',\n 'doc/userguide.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/javascripts',\n ['javascripts/asciidoc3.js',\n 'javascripts/ASCIIMathML.js',\n 'javascripts/LaTeXMathML.js',\n 'javascripts/slidy.js',\n 'javascripts/toc.js', ]))\n datafiles.append((dirprefix+'asciidoc3/filters/code',\n ['filters/code/code-filter.conf',\n 'filters/code/code-filter.py',\n 'filters/code/code-filter-readme.txt',\n 'filters/code/code-filter-test.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/filters/graphviz',\n ['filters/graphviz/asciidoc3-graphviz-sample.txt',\n 'filters/graphviz/graphviz2png.py',\n 'filters/graphviz/graphviz-filter.conf', ]))\n datafiles.append((dirprefix+'asciidoc3/filters/latex',\n ['filters/latex/latex2png.py',\n 'filters/latex/latex-filter.conf', ]))\n datafiles.append((dirprefix+'asciidoc3/filters/music',\n ['filters/music/music-filter.conf',\n 'filters/music/example_music-filter.txt',\n 'filters/music/music-filter-test.txt',\n 'filters/music/music2png.py', ]))\n datafiles.append((dirprefix+'asciidoc3/filters/source',\n ['filters/source/source-highlight-filter.conf',\n 'filters/source/source-highlight-filter-test.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/themes/flask',\n ['themes/flask/flask.css', ]))\n datafiles.append((dirprefix+'asciidoc3/themes/volnitsky',\n ['themes/volnitsky/volnitsky.css', ]))\n datafiles.append((dirprefix+'asciidoc3/vim',\n ['vim/readme-vim.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/vim/syntax',\n ['vim/syntax/asciidoc3.vim', ]))\n datafiles.append((dirprefix+'asciidoc3/man',\n ['doc/a2x3.1.gz',\n 'doc/a2x3.1.txt',\n 'doc/asciidoc3.1.gz',\n 'doc/asciidoc3.1.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/tests',\n ['tests/a2x3_multitest.py',\n 'tests/readme-tests.txt',\n 'tests/testa2x3.py',\n 'tests/testasciidoc3.conf',\n 'tests/testasciidoc3.py', ]))\n datafiles.append((dirprefix+'asciidoc3/tests/data',\n ['tests/data/customers.csv',\n 'tests/data/testcases_docbook45.conf',\n 'tests/data/testcases_docbook51.conf',\n 'tests/data/lang-cs-test.txt',\n 'tests/data/lang-en-test.txt',\n 'tests/data/lang-fr-test.txt',\n 'tests/data/lang-it-test.txt',\n 'tests/data/lang-pt-BR-test.txt',\n 'tests/data/lang-ru-test.txt',\n 'tests/data/newtables.txt',\n 'tests/data/newtables_docbook51.txt',\n 'tests/data/utf8-examples.txt',\n 'tests/data/filters-test.txt',\n 'tests/data/lang-de-man-test.txt',\n 'tests/data/lang-es-man-test.txt',\n 'tests/data/lang-hu-man-test.txt',\n 'tests/data/lang-nl-man-test.txt',\n 'tests/data/lang-ro-man-test.txt',\n 'tests/data/lang-uk-man-test.txt',\n 'tests/data/nonvalid_docbook51.txt',\n 'tests/data/testcases_docbook45.txt',\n 'tests/data/testcases_docbook51.txt',\n 'tests/data/lang-de-test.txt',\n 'tests/data/lang-es-test.txt',\n 'tests/data/lang-hu-test.txt',\n 'tests/data/lang-ja-test.txt',\n 'tests/data/lang-nl-test.txt',\n 'tests/data/lang-ro-test.txt',\n 'tests/data/lang-se-test.txt',\n 'tests/data/lang-uk-test.txt',\n 'tests/data/open-block-test.txt',\n 'tests/data/lang-cs-man-test.txt',\n 'tests/data/lang-en-man-test.txt',\n 'tests/data/lang-fr-man-test.txt',\n 'tests/data/lang-it-man-test.txt',\n 'tests/data/lang-pt-BR-man-test.txt',\n 'tests/data/lang-ru-man-test.txt',\n 'tests/data/rcs-id-marker-test.txt',\n 'tests/data/utf8-bom-example.txt',\n 'tests/data/utf8-bom-test.txt', ]))\n datafiles.append((dirprefix+'asciidoc3/tests/docbook_validation',\n ['tests/docbook_validation/asciidoc3_docbook45_validation.py',\n 'tests/docbook_validation/asciidoc3_docbook51_relaxng_validation.py',\n 'tests/docbook_validation/asciidoc3_docbook51_schematron_validation.py',\n 'tests/docbook_validation/asciidoc3_docbook51_w3cxml_validation.py',\n 'tests/docbook_validation/rng-docbook51.rng', ]))\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"asciidoc3\",\n version=\"3.2.0\",\n description=\"\"\"AsciiDoc3 Python3 GNU/Linux Windows AsciiDoc - see https://asciidoc3.org/pypi.html BEFORE installing\"\"\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=['text', 'markup', 'Windows', 'asciidoc', 'asciidoc3', 'python3'],\n author=\"Berthold Gehrke\",\n author_email=\"berthold.gehrke@gmail.com\",\n url=\"https://asciidoc3.org\",\n project_urls={\n \"Source\": \"https://gitlab.com/asciidoc3/asciidoc3\",\n \"Funding\": \"https://asciidoc3.org/contact.html\"\n },\n license='GPLv2+',\n packages=find_packages(),\n entry_points={'console_scripts':\n ['asciidoc3=asciidoc3.asciidoc3:main',\n 'a2x3=asciidoc3.a2x3:main',\n 'asciidoc3_postinstall=asciidoc3.asciidoc3_postinstall:main']},\n include_package_data=True,\n # 'data_files' do not contain the symlinks to dir 'images' and to 'asciidoc3api.py'\n # from ./tests: so run 'asciidoc3_postinstall' after 'pip install --user asciidoc3'\n data_files=datafiles,\n zip_safe=False,\n python_requires=\">= 3.5\",\n classifiers=[ # for a list of valid classifiers see https://pypi.org/classifiers/\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: Markup',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n ],\n)\n","sub_path":"pypi_install_script/asciidoc3-3.2.0-py3-none-any/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":15017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192269421","text":"import random\n\ndef rand(start, end):\n return int(random.random()*(end-start+1)) + start\n #return number\n\n# def main():\n# start = 1\n# end = 6\n# number = rand(start, end)\n# print(number)\n#\n# main()\n\ndef main():\n number = rand(1,100)\n\n for i in range(1,6):\n num = int(input(str(i)+\"번째 추측값: \"))\n result = number -num\n if result ==0:\n print(\"정답입니다.\")\n break\n elif result > 0:\n print(num,\"보다는 큽니다.\")\n else:\n print(num,\"보다는 작습니다.\")\n if result != 0:\n print('실패했습니다.\\n정답은 ',number)\n\n\n\n\n\n\n\n\n # answer = False\n # cnt=1\n # number = rand(start, end)\n # print(number)\n # while answer==False:\n # print(cnt,end=\"\")\n # num = int(input(\"번째 추측값:\"))\n # if cnt == 5:\n # answer = True\n # print(\"실패했습니다\")\n # elif number == num:\n # print(\"정답입니다\")\n # answer = True\n # elif number > num:\n # print(num,\"보다는 큽니다\")\n # cnt+=1\n # elif number < num:\n # print(num,\"보다는 작습니다\")\n # cnt+=1\n\nmain()\n\n\n","sub_path":"01_python/chapter8/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"152989959","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nPython script for cron.\n\"\"\"\n\nimport urllib2\n# pylint: disable=relative-import\nfrom config import (\n USERS_XML_REMOTE_FILE,\n USERS_XML_LOCAL_FILE\n)\n\n\ndef get_users_xml():\n \"\"\"\n Download actual users data XML.\n \"\"\"\n f_remote = urllib2.urlopen(USERS_XML_REMOTE_FILE)\n data = f_remote.read()\n with open(USERS_XML_LOCAL_FILE, \"w\") as f_local:\n f_local.write(data)\n f_remote.close()\n return\n\n\nif __name__ == '__main__':\n get_users_xml()\n","sub_path":"src/presence_analyzer/users_cron.py","file_name":"users_cron.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379577055","text":"import requests\nimport bs4\nimport datetime\n\nhtml = requests.get('https://www.naver.com').text\nsoup = bs4.BeautifulSoup(html, 'html.parser')\n\nranks = soup.select('.PM_CL_realtimeKeyword_rolling .ah_item .ah_k')\nnow = datetime.datetime.now()\n\nwith open('naver_rank.txt', 'w', encoding = 'utf-8') as f:\n f.write(f'{now} 기준 네이버 검색어 순위\\n')\n for i, rank in enumerate(ranks): # [(1, 'a), (2, 'b'), (3, 'c'), ...]\n f.write(f'{i+1}. {rank.text}\\n')","sub_path":"python_190527/naver_rank.py","file_name":"naver_rank.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"147804257","text":"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for NovoGrad Optimizer.\"\"\"\n\nimport sys\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.optimizers import NovoGrad\nfrom tensorflow_addons.utils import test_utils\n\n\n@test_utils.run_all_in_graph_and_eager_modes\nclass NovoGradTest(tf.test.TestCase):\n def run_dense_sample(self, iterations, expected, optimizer):\n var_0 = tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)\n var_1 = tf.Variable([3.0, 4.0], dtype=tf.dtypes.float32)\n\n grad_0 = tf.constant([0.1, 0.2], dtype=tf.dtypes.float32)\n grad_1 = tf.constant([0.3, 0.4], dtype=tf.dtypes.float32)\n\n grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))\n\n if tf.executing_eagerly():\n for _ in range(iterations):\n optimizer.apply_gradients(grads_and_vars)\n else:\n update = optimizer.apply_gradients(grads_and_vars)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n for _ in range(iterations):\n self.evaluate(update)\n\n self.assertAllClose(var_0.read_value(), expected[0], atol=2e-4)\n self.assertAllClose(var_1.read_value(), expected[1], atol=2e-4)\n\n def run_sparse_sample(self, iterations, expected, optimizer):\n var_0 = tf.Variable([1.0, 2.0])\n var_1 = tf.Variable([3.0, 4.0])\n\n grad_0 = tf.IndexedSlices(\n tf.constant([0.1, 0.2]), tf.constant([0, 1]), tf.constant([2])\n )\n grad_1 = tf.IndexedSlices(\n tf.constant([0.3, 0.4]), tf.constant([0, 1]), tf.constant([2])\n )\n\n grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))\n\n if tf.executing_eagerly():\n for _ in range(iterations):\n optimizer.apply_gradients(grads_and_vars)\n else:\n update = optimizer.apply_gradients(grads_and_vars)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n for _ in range(iterations):\n self.evaluate(update)\n\n self.assertAllClose(var_0.read_value(), expected[0], atol=2e-4)\n self.assertAllClose(var_1.read_value(), expected[1], atol=2e-4)\n\n def test_dense_sample(self):\n self.run_dense_sample(\n iterations=1,\n expected=[[0.9552786425, 1.9105572849], [2.9400000012, 3.9200000016]],\n optimizer=NovoGrad(lr=0.1, epsilon=1e-8),\n )\n\n def test_sparse_sample(self):\n self.run_sparse_sample(\n iterations=1,\n expected=[[0.9552786425, 1.9105572849], [2.9400000012, 3.9200000016]],\n optimizer=NovoGrad(lr=0.1, epsilon=1e-8),\n )\n\n def test_dense_sample_with_weight_decay(self):\n self.run_dense_sample(\n iterations=1,\n expected=[[0.945278642, 1.8905572849], [2.9100000012, 3.8800000016]],\n optimizer=NovoGrad(lr=0.1, weight_decay=0.1, epsilon=1e-8),\n )\n\n def test_sparse_sample_with_weight_decay(self):\n self.run_sparse_sample(\n iterations=1,\n expected=[[0.945278642, 1.8905572849], [2.9100000012, 3.8800000016]],\n optimizer=NovoGrad(lr=0.1, weight_decay=0.1, epsilon=1e-8),\n )\n\n def test_dense_sample_with_grad_averaging(self):\n self.run_dense_sample(\n iterations=2,\n expected=[[0.9105572849, 1.8211145698], [2.8800000024, 3.8400000032]],\n optimizer=NovoGrad(lr=0.1, grad_averaging=True, epsilon=1e-8),\n )\n\n def test_sparse_sample_with_grad_averaging(self):\n self.run_sparse_sample(\n iterations=2,\n expected=[[0.9105572849, 1.8211145698], [2.8800000024, 3.8400000032]],\n optimizer=NovoGrad(lr=0.1, grad_averaging=True, epsilon=1e-8),\n )\n\n def test_fit_simple_linear_model(self):\n np.random.seed(0x2020)\n tf.random.set_seed(0x2020)\n\n x = np.random.standard_normal((100000, 3))\n w = np.random.standard_normal((3, 1))\n y = np.dot(x, w) + np.random.standard_normal((100000, 1)) * 1e-5\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Dense(input_shape=(3,), units=1))\n model.compile(NovoGrad(), loss=\"mse\")\n\n model.fit(x, y, epochs=2)\n\n x = np.random.standard_normal((100, 3))\n y = np.dot(x, w)\n predicted = model.predict(x)\n\n max_abs_diff = np.max(np.abs(predicted - y))\n self.assertLess(max_abs_diff, 1e-2)\n\n def test_get_config(self):\n opt = NovoGrad(lr=1e-4, weight_decay=0.0, grad_averaging=False)\n config = opt.get_config()\n self.assertEqual(config[\"learning_rate\"], 1e-4)\n self.assertEqual(config[\"weight_decay\"], 0.0)\n self.assertEqual(config[\"grad_averaging\"], False)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__]))\n","sub_path":"tensorflow_addons/optimizers/novograd_test.py","file_name":"novograd_test.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"632856198","text":"\"\"\"djangoApi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\n\nfrom django.contrib import admin\nfrom django.urls import path, include\n\n\nfrom backend.views import index, AuthView, TableView, VisitView, MessageView, FreqView, CinemasView, RecommendationsView\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/visit/////', VisitView.as_view()),\n path('api/cinemas/', CinemasView.as_view()),\n path('api/message/', MessageView.as_view()),\n path('api/freq///', FreqView.as_view()),\n path('api/table/', TableView.as_view()),\n path('api/rec/', RecommendationsView.as_view()),\n path('', index),\n]\n\n\n","sub_path":"djangoApi/djangoApi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462982666","text":"#!/usr/bin/env python\n'''yolo ROS Node'''\n# license removed for brevity\nfrom __future__ import print_function\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom core import CvBridge, CvBridgeError\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\n\nfrom test.msg import position\nfrom test.msg import multipositions\n\nimport colorsys\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport sys\nros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'\n\nif ros_path in sys.path:\n\n sys.path.remove(ros_path)\n\nimport cv2\nimport time\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import image_preporcess\nfrom yolo_class import YOLO\nimport pyrealsense2 as rs\nfrom random import randint\nimport math\n\ntrackerTypes = ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\ncamera_fx = 383.599\ncamera_fy = 383.599\ncamera_cx = 320.583\ncamera_cy = 238.327\n\ndef createTrackerByName(trackerType):\n # Create a tracker based on tracker name\n if trackerType == trackerTypes[0]:\n tracker = cv2.TrackerBoosting_create()\n elif trackerType == trackerTypes[1]: \n tracker = cv2.TrackerMIL_create()\n elif trackerType == trackerTypes[2]:\n tracker = cv2.TrackerKCF_create()\n elif trackerType == trackerTypes[3]:\n tracker = cv2.TrackerTLD_create()\n elif trackerType == trackerTypes[4]:\n tracker = cv2.TrackerMedianFlow_create()\n elif trackerType == trackerTypes[5]:\n tracker = cv2.TrackerGOTURN_create()\n elif trackerType == trackerTypes[6]:\n tracker = cv2.TrackerMOSSE_create()\n elif trackerType == trackerTypes[7]:\n tracker = cv2.TrackerCSRT_create()\n else:\n tracker = None\n print('Incorrect tracker name')\n print('Available trackers are:')\n for t in trackerTypes:\n print(t)\n \n return tracker\n\ndef talker():\n '''yolo Publisher'''\n trackerType = \"CSRT\"\n pipeline = rs.pipeline()\n config = rs.config()\n config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n \n # Start streaming\n profile = pipeline.start(config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n\n pub = rospy.Publisher('bbox_position1', multipositions, queue_size=10)\n #pub1 = rospy.Publisher('bbox_position', multipositions, queue_size=10)\n rospy.init_node('bbox_pub1', anonymous=False)\n #rate = rospy.Rate(10) # 10hz\n\n\n yolo = YOLO()\n\n # set start time to current time\n start_time = time.time()\n # displays the frame rate every 2 second\n display_time = 2\n # Set primarry FPS to 0\n fps = 0\n\n #bboxes = []\n\n # we create the video capture object cap\n #cap = cv2.VideoCapture(0)\n #if not cap.isOpened():\n #raise IOError(\"We cannot open webcam\")\n \n j = 0\n\n while not rospy.is_shutdown():\n\n if(j%10 == 0):\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n depth_frame = frames.get_depth_frame()\n\n image_frame = np.asanyarray(color_frame.get_data())\n depth_frame = np.asanyarray(depth_frame.get_data())\n\n color_frame = cv2.resize(image_frame, None, fx=1.0, fy=1.0, interpolation=cv2.INTER_AREA)\n depth_frame = cv2.resize(depth_frame, None, fx=1.0, fy=1.0, interpolation=cv2.INTER_AREA)\n\n position_list = multipositions()\n r_image, ObjectsList, position_list = yolo.detect_img(color_frame,depth_frame,depth_scale)\n\n\n bridge = CvBridge()\n image = bridge.cv2_to_imgmsg(color_frame,\"bgr8\")\n position_list.picture = image\n\n #print(position_list.lists)\n #bbox = (1,2,3,4)\n #print(bbox)\n \n bboxes = []\n colors = []\n multiTracker = cv2.MultiTracker_create()\n print(\"hello\")\n for item in position_list.lists:\n bbox = (item.left,item.top,item.right,item.bottom)\n bboxes.append(bbox)\n colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))\n print(bbox)\n print(bboxes)\n print(\"bboxes done\")\n\n #multiTracker = cv2.MultiTracker_create()\n #print(\"hello\")\n img = cv2.imread(\"1.jpg\")\n for bbox in bboxes:\n print(bbox)\n multiTracker.add(createTrackerByName(trackerType), color_frame, bbox)\n\n print(\"multitracker done\")\n #pub.publish(position_list)\n j+=1\n\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n depth_frame = frames.get_depth_frame()\n\n frame = np.asanyarray(color_frame.get_data())\n depth_image = np.asanyarray(depth_frame.get_data())\n\n #ret, frame = cap.read()\n # resize our captured frame if we need\n frame = cv2.resize(frame, None, fx=1.0, fy=1.0, interpolation=cv2.INTER_AREA)\n\n bridge = CvBridge()\n image = bridge.cv2_to_imgmsg(frame,\"bgr8\")\n\n #position_list = multipositions()\n # detect object on our frame\n #r_image, ObjectsList, position_list = yolo.detect_img(frame)\n #position_list.picture = image\n\n success, boxes = multiTracker.update(frame)\n for i, newbox in enumerate(boxes):\n p1 = (int(newbox[0]), int(newbox[1]))\n p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))\n cv2.rectangle(frame, p1, p2, colors[i], 2, 1)\n \n\n #cv2.imshow('MultiTracker', frame)\n\n #show us frame with detection\n cv2.imshow(\"tracking\", frame)\n cv2.imshow(\"Web cam input\", r_image)\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n cv2.destroyAllWindows()\n break\n\n # calculate FPS\n fps += 1\n TIME = time.time() - start_time\n if TIME > display_time:\n print(\"FPS:\", fps / TIME)\n fps = 0 \n start_time = time.time()\n \n hello_str = \"hello world %s\" % rospy.get_time()\n rospy.loginfo(hello_str)\n #pub.publish(hello_str)\n pub.publish(position_list)\n\n\n cap.release()\n cv2.destroyAllWindows()\n yolo.close_session()\n\n #pub = rospy.Publisher('chatter', String, queue_size=10)\n #rospy.init_node('yolo', anonymous=False)\n #rate = rospy.Rate(10) # 10hz\n '''\n while not rospy.is_shutdown():\n hello_str = \"hello world %s\" % rospy.get_time()\n rospy.loginfo(hello_str)\n pub.publish(hello_str)\n rate.sleep()\n '''\ndef getDistanceAngle(pixel_x, pixel_y, real_z):\n z = np.float(real_z)\n x = (pixel_x-camera_cx)*z/camera_fx\n y = (pixel_y-camera_cy)*z/camera_fy\n\n horizon_angle = math.atan2(x,z)\n vertical_angle = math.atan2(y,z)\n absolute_distance = math.sqrt(x*x+y*y+z*z)\n print(absolute_distance)\n print(horizon_angle)\n print(vertical_angle)\n #pass\n\n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"src/other/bbox_pub1.py","file_name":"bbox_pub1.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"206786611","text":"import psycopg2\nfrom psycopg2 import sql\nfrom easysnmp import snmp_get\nfrom easysnmp import EasySNMPConnectionError\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\nimport time\n\nclass DataGetter1:\n def __init__(self):\n self.conn = psycopg2.connect(dbname='anomalyDetectionDB', user='alina', password='alina', host='localhost')\n self.conn.autocommit = True\n self.prevDataIn = {}\n self.prevDataOut = {}\n self.local = timezone('Europe/Moscow')\n pass\n def get_data(self):\n file = open('devices', 'r')\n devices = file.read()\n file.close()\n cursor = self.conn.cursor()\n to_add = []\n # to_return = []\n devices = devices.split('\\n')\n for device in devices:\n device_arr = device.split(':')\n int_id = device_arr[0]\n snmp_int_id = device_arr[1]\n host = device_arr[2]\n community = device_arr[3]\n in_octets = None\n out_octets = None\n while in_octets is None:\n try:\n in_octets = int(snmp_get('ifInOctets.' + snmp_int_id, hostname=host, community=community, version=2).value)\n except EasySNMPConnectionError as err:\n print(\"Connection error at \", (datetime.now(self.local)))\n while out_octets is None:\n try:\n out_octets = int(snmp_get('ifOutOctets.' + snmp_int_id, hostname=host, community=community, version=2).value)\n except EasySNMPConnectionError as err:\n print(\"Connection error at \", (datetime.now(self.local)))\n if self.prevDataIn.get(int_id) is None:\n #print(\"Not found\")\n self.prevDataIn[int_id] = in_octets\n self.prevDataOut[int_id] = out_octets\n else:\n #print(\"Found\")\n if in_octets > self.prevDataIn[int_id]:\n in_speed = (in_octets - self.prevDataIn[int_id]) / 30\n else:\n in_speed = (4294967295 - self.prevDataIn[int_id] + in_octets) / 30\n if out_octets > self.prevDataOut[int_id]:\n out_speed = (out_octets - self.prevDataOut[int_id]) / 30\n else:\n out_speed = (4294967295 - self.prevDataOut[int_id] + out_octets) / 30\n timestamp = datetime.now(self.local).strftime('%Y-%m-%d %H:%M:%S')\n to_add.append((int_id, timestamp, in_speed, out_speed))\n if len(to_add) != 0:\n # noinspection SqlDialectInspection,SqlNoDataSourceInspection\n insert = sql.SQL('INSERT INTO data (int_id, datetime, in_speed, out_speed) VALUES {}').format(\n sql.SQL(',').join(map(sql.Literal, to_add))\n )\n cursor.execute(insert)\n to_add.clear()\n cursor.close()\n\n def provide_data(self, int_id, mins = 20, days = 7):\n cursor = self.conn.cursor()\n timestamp = (datetime.now(self.local) - timedelta(days=days)).strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute('SELECT datetime, in_speed, out_speed FROM data WHERE datetime > %s AND int_id = %s', (timestamp, int_id))\n in_speed = []\n out_speed = []\n in_speed_sum = 0\n out_speed_sum = 0\n results = cursor.fetchall()\n for i in range(len(results)):\n in_speed_sum += results[i][1]\n out_speed_sum += results[i][2]\n if i % (mins * 2) - 1 == 0 and i > 0:\n in_speed.append(in_speed_sum/(mins * 2 * 1024))\n out_speed.append(out_speed_sum/(mins * 2 * 1024))\n in_speed_sum = 0\n out_speed_sum = 0\n cursor.close()\n return in_speed, out_speed, timestamp\n\n\nif __name__ == \"__main__\":\n dataGetter = DataGetter1()\n dataGetter.provide_data()","sub_path":"DataGetter.py","file_name":"DataGetter.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"480571234","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom logs.models import Logs\n\n\ndef addlogmsg(user, instance, message):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n add_log_msg = Logs(user=user, instance=instance, message=message)\n add_log_msg.save()\n\n\ndef showlogs(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect(reverse('index'))\n\n if not request.user.is_superuser:\n return HttpResponseRedirect(reverse('index'))\n\n logs = Logs.objects.all()\n\n return render(request, 'showlogs.html', locals())\n","sub_path":"logs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"380421180","text":"from __future__ import annotations\n\nfrom enum import Enum\n\nfrom . import widget\nfrom .animate import Coord, Animater, Direction\n\n\nclass Window(widget.PrimaryCanvas):\n animation_speed = 10\n views = {}\n current = None\n\n def init(self):\n self.animater = Animater(self)\n\n def __coord(self, id):\n return Coord(*self.coords(id))\n\n def __set(self, view: View, coord: Coord):\n wid = view.draw(coord, anchor='nw')\n self.views[view] = wid\n return wid\n\n def set_view(self, view: View):\n self.current = view\n self.__set(self.current, self.origin)\n\n def move_view(self, view: View, end: Coord):\n wid = self.views.get(view)\n if wid is not None:\n self.animater.add_motion(\n wid, end, speed=self.animation_speed\n )\n\n def move_in(self, view: View, direction: Direction):\n distance = self.get_distance(direction)\n start = self.origin + distance\n wid = self.__set(view, start)\n self.move_view(view, self.origin)\n return wid\n\n def move_out(self, view: View, direction: Direction):\n distance = self.get_distance(direction)\n end = self.origin + distance\n self.move_view(view, end)\n del self.views[view]\n\n def change_view(self, view: View, direction: Direction = None):\n if direction is None:\n self.set_view(view)\n return\n if not isinstance(direction, Direction):\n direction = Direction[direction.upper()] # Cast string for convenience\n\n self.animater.clear()\n\n last = self.current\n self.current = view\n self.move_in(self.current, direction.flip())\n self.move_out(last, direction)\n\n self.animater.start()\n\n def get_distance(self, direction: Direction):\n if not isinstance(direction, Direction):\n direction = Direction[direction.upper()] # Cast string for convenience\n\n if direction in (Direction.UP, Direction.DOWN):\n return direction * Coord(0, self.winfo_height())\n elif direction in (Direction.LEFT, Direction.RIGHT):\n return direction * Coord(self.winfo_width(), 0)\n else:\n raise NotImplementedError\n\n @property\n def active(self):\n return self.animater.running\n\n @property\n def origin(self):\n return Coord(self.canvasx(0), self.canvasy(0))\n\n\nclass DrawType(Enum):\n image = 'create_image'\n window = 'create_window'\n text = 'create_text'\n\n\nclass View:\n\n def __init__(self, master: Window, **kwds):\n self.master = master\n self.kwds = kwds\n self.drawtype = self.data = None\n for k, v in self.kwds.items():\n if hasattr(DrawType, k):\n self.drawtype = DrawType[k]\n self.data = v\n if self.drawtype is None:\n raise NotImplementedError\n\n def draw(self, *args, **kwds):\n fn = getattr(self.master, self.drawtype.value)\n return fn(*args, **{**self.kwds, **kwds})\n","sub_path":"src/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46294012","text":"#!/usr/bin/python2\nfrom datetime import date\n\nDefaultBuildClean = True\nDefaultSolution = \"./NOSApp/Builds/NOSApp.sln\"\nDefaultArch = \"i686\"\nDefaultStream = \"Development\"\nDefaultQMakeOptions = \"\"\n\nDefaultCompilerFlags = {\n \"debug\":\n \"-fsigned-char \\\\\\n\" +\n \"-Wall \\\\\\n\" + \n \"-Wno-unknown-pragmas \\\\\\n\" +\n \"-Wno-comment \\\\\\n\" +\n \"-Wno-switch \\\\\\n\" +\n \"-Wno-missing-braces \\\\\\n\" +\n \"-Wno-pointer-sign \\\\\\n\" +\n \"-Winline \\\\\\n\" +\n \"-Wno-reorder \\\\\\n\",\n \"release\":\n \"-fsigned-char \\\\\\n\" +\n \"-Wformat \\\\\\n\" + \n \"-Wimplicit \\\\\\n\" + \n \"-Wsequence-point \\\\\\n\"\n}\n\nDefaultLibraryMap = {\n \"i686\" : {\n \"OpenGL32\" : \"\",\n \"WS2_32\": \"\",\n \"freetype\": \"freetype\",\n \"glu32\" : \"\",\n \"glew32s\" : \"GLEW\",\n \"glew32sd\" : \"GLEW\",\n \"qt3support\": \"\", \n \"qtassistantclient\": \"\", \n \"qtaxcontainer\": \"\", \n \"qtaxserver\": \"\", \n \"qtcore\": \"\", \n \"qtgui\": \"\", \n \"qtmain\": \"\", \n \"qtnetwork\": \"\", \n \"qtopengl\": \"\", \n \"qtscript\": \"\", \n \"qtsql\": \"\", \n \"qtsvg\": \"\", \n \"qttest\": \"\", \n \"qtuitools\": \"\", \n \"qtxml\": \"\", \n },\n\n \"arm\" : {\n \"OpenGL32\" : \"\",\n \"WS2_32\": \"\",\n \"freetype\": \"freetype\",\n \"glu32\" : \"\",\n \"glew32s\" : \"\",\n \"glew32sd\" : \"\",\n \"qt3support\": \"\", \n \"qtassistantclient\": \"\", \n \"qtaxcontainer\": \"\", \n \"qtaxserver\": \"\", \n \"qtcore\": \"\", \n \"qtgui\": \"\", \n \"qtmain\": \"\", \n \"qtnetwork\": \"\", \n \"qtopengl\": \"\", \n \"qtscript\": \"\", \n \"qtsql\": \"\", \n \"qtsvg\": \"\", \n \"qttest\": \"\", \n \"qtuitools\": \"\", \n \"qtxml\": \"\", \n },\n}\n\n\nDefaultAccurevServer = \"172.28.28.116:5060\"\nDefaultAccurevLogin = \"marslan ocivan\"\n\nDefaultVCConfig = \"Debug|Win32\"\n\n# nightly build starts at 2:00 am\nDoNightlyBuild = True;\nNightlyStartTime = 2;\n\n# the time when 3-weekly build cycle begins\nStartTime = date(day = 4, month = 2, year = 2008)\n# [ integration #, start date, full release, notes ]\nMileStones = [\n [ 11, date(day = 4, month = 2, year = 2008), True, \"Nemesis only - no relase note\" ], \n [ 12, date(day = 18, month = 2, year = 2008), True, \"First composer interation.\" ],\n [ 13, date(day = 17, month = 3, year = 2008), False, \"\" ],\n [ 14, date(day = 7, month = 4, year = 2008), False, \"\" ],\n [ 15, date(day = 28, month = 4, year = 2008), False, \"\" ],\n [ 16, date(day = 19, month = 5, year = 2008), False, \"\" ],\n [ 17, date(day = 2, month = 6, year = 2008), True, \"Composer Alpha\" ],\n [ 18, date(day = 30, month = 6, year = 2008), False, \"\" ],\n [ 19, date(day = 21, month = 7, year = 2008), False, \"\" ],\n [ 20, date(day = 4, month = 8, year = 2008), True, \"Composer/Nemesis Beta\" ],\n [ 21, date(day = 1, month = 9, year = 2008), False, \"\" ],\n [ 22, date(day = 15, month = 9, year = 2008), True, \"Composer Release Candidate\" ],\n [ 23, date(day = 20, month = 10, year = 2008), True, \"Nemesis Release Candidate\" ],\n]\n\nSMTPServer = \"exch01-aklnz.marine.net.int\"\n#SMTPServer = \"localhost\"\nCCList = [\n \"mars.lan@navico.com\",\n \"steven.bromley@navico.com\",\n \"jason.detring@navico.com\",\n \"robert.smithson@navico.com\",\n]\n#CCList = [\"mars.lan@navico.com\"] \n\nArchMap = {\n \"i686\": \"/usr/local/qt/bin\",\n \"arm\": \"/usr/local/qtopia/bin\"\n}\n\nNightlyBuildMap = {\n \"Development\": [\n \"aaron.coleman@navico.com\",\n \"andy.caddy@navico.com\",\n \"cristian.barrera@navico.com\",\n \"daniel.yu@navico.com\",\n \"damon.michaels@navico.com\",\n \"gonzalo.victorio@navico.com\",\n \"luis.lechuga@navico.com\",\n \"sandino.moreno@navico.com\",\n \"hayden.dakers@navico.com\",\n \"hector.morales@navico.com\",\n \"henning.maeland@navico.com\",\n \"jason.detring@navico.com\",\n \"jeff.maxwell@navico.com\",\n \"Alfredo.Basas@navico.com\",\n \"justin.beck@navico.com\",\n \"kevin.brown@navico.com\",\n \"lance.lybarger@navico.com\",\n \"mars.lan@navico.com\",\n \"matt.hunt@navico.com\",\n \"paul.butterworth@navico.com\",\n \"peter.harvey@navico.com\",\n \"rafael.ruiz@navico.com\",\n \"rich.fine@navico.com\",\n \"robert.smithson@navico.com\",\n \"ryan.mccarter@navico.com\",\n \"ryan.underwood@navico.com\",\n \"shel.michaels@navico.com\",\n \"stefan.richardson@navico.com\",\n \"steven.bromley@navico.com\",\n \"steven.rossen@navico.com\",\n \"zach.vincent@navico.com\",\n ],\n \"NOS\": [\n \"ryan.mccarter@navico.com\",\n \"andy.caddy@navico.com\",\n \"stefan.richardson@navico.com\",\n \"justin.beck@navico.com\",\n \"aaron.coleman@navico.com\",\n \"Alfredo.Basas@navico.com\",\n ],\n \"Releases\": [\n \"ryan.mccarter@navico.com\",\n \"jason.detring@navico.com\",\n \"mars.lan@navico.com\",\n \"robert.smithson@navico.com\",\n ],\n}\n\nBuildMapPrefix = \"/accurev\"\nBuildMapSuffix = \"_Build\"\nBuildMap = {\n \"Alarms\": BuildMapPrefix + \"/Alarms\" + BuildMapSuffix,\n \"Autopilot_Development\":BuildMapPrefix + \"/Autopilot_Development\" + BuildMapSuffix, \n \"Development\": BuildMapPrefix + \"/Development\" + BuildMapSuffix,\n \"Dynamic_Data\": BuildMapPrefix + \"/Dynamic_Data\" + BuildMapSuffix, \n \"GUI_Development\": BuildMapPrefix + \"/GUI_Development\" + BuildMapSuffix, \n \"Integration\" : BuildMapPrefix + \"/Integration\" + BuildMapSuffix,\n \"Linux_Support\": BuildMapPrefix + \"/Linux_Support\" + BuildMapSuffix,\n \"Mapping_Development\": BuildMapPrefix + \"/Mapping_Development\" + BuildMapSuffix,\n \"Mapping_Group\": BuildMapPrefix + \"/Mapping_Group\" + BuildMapSuffix,\n \"NOS\" : BuildMapPrefix + \"/NOS\" + BuildMapSuffix,\n \"Radar_Development\": BuildMapPrefix + \"/Radar_Development\" + BuildMapSuffix,\n \"Releases\": BuildMapPrefix + \"/Releases\" + BuildMapSuffix,\n \"Sonar_Development\": BuildMapPrefix + \"/Sonar_Development\" + BuildMapSuffix,\n \"User_Data_Sharing\": BuildMapPrefix + \"/User_Data_Sharing\" + BuildMapSuffix,\n \"Weather_Development\" : BuildMapPrefix + \"/Weather_Development\" + BuildMapSuffix,\n \"NDP2k_Development\" : BuildMapPrefix + \"/NDP2k_Development\" + BuildMapSuffix,\n \"Navigation_Development\" : BuildMapPrefix + \"/Navigation_Development\" + BuildMapSuffix,\n \"NDP2k_UI\" : BuildMapPrefix + \"/NDP2k_UI\" + BuildMapSuffix,\n}\n\nAllDevelopers = [\n \"aaron.coleman@navico.com\",\n \"andy.caddy@navico.com\",\n \"cristian.barrera@navico.com\",\n \"daniel.yu@navico.com\",\n \"damon.michaels@navico.com\",\n \"gonzalo.victorio@navico.com\",\n \"luis.lechuga@navico.com\",\n \"sandino.moreno@navico.com\",\n \"hayden.dakers@navico.com\",\n \"hector.morales@navico.com\",\n \"henning.maeland@navico.com\",\n \"jason.detring@navico.com\",\n \"jeff.maxwell@navico.com\",\n \"Alfredo.Basas@navico.com\",\n \"justin.beck@navico.com\",\n \"kevin.brown@navico.com\",\n \"lance.lybarger@navico.com\",\n \"mars.lan@navico.com\",\n \"matt.hunt@navico.com\",\n \"paul.butterworth@navico.com\",\n \"peter.harvey@navico.com\",\n \"rafael.ruiz@navico.com\",\n \"rich.fine@navico.com\",\n \"robert.smithson@navico.com\",\n \"ryan.mccarter@navico.com\",\n \"ryan.underwood@navico.com\",\n \"shel.michaels@navico.com\",\n \"stefan.richardson@navico.com\",\n \"steven.bromley@navico.com\",\n \"steven.rossen@navico.com\",\n \"zach.vincent@navico.com\",\n]\n","sub_path":"Linux-Build/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"342795087","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 26 21:53:29 2018\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom grab_data import grab_data\nimport os\nimport pandas as pd\ncwd = os.getcwd()\n\n################################################################################\nbase_size=20\nmpl.rcParams['legend.fontsize'] = base_size\nmpl.rcParams['figure.figsize'] = (30,24)\nmpl.rcParams['figure.titlesize']=base_size+5\nmpl.rcParams['xtick.labelsize']=base_size\nmpl.rcParams['ytick.labelsize']=base_size\nmpl.rcParams['font.size']=base_size\nmpl.rcParams['axes.titlesize']=base_size\nmpl.rcParams['axes.labelsize']=base_size\nmpl.rcParams['lines.markersize'] = 4 # markersize, in points\nmpl.rcParams['legend.markerscale'] = 1 # line width in points\nmpl.rcParams['lines.markeredgewidth'] = 0.4 # the line width around the marker symbol\nmpl.rcParams['lines.linewidth'] = 4\n#####################################\n\ndata_loc = cwd + '/laptop_tracking_20180619/'\nparams_loc = cwd + '/20180619_Sun_Sensor_laptop_runs.txt'\ndata,data_all_ss1,data_all_ss2,data_all_ss3,data_all = grab_data(data_loc,params_loc)\n\n#Pick a sun sensor run to use for the \"Kalman Filter\"\nrun=4\nmask = data_all_ss1['run'] == run\ntime = elapsed=data_all_ss1.loc[mask,'elapsed']-data_all_ss1.loc[mask,'elapsed'][0]\nss_track_x = data_all_ss1.loc[mask,'ang_x_track']\n##pre-calculate velocities (in real-time code this will run with latest three samples)\n#vel=np.zeros(len(ss_track_x))\n#vel[0:2]=0.0\n#for i in range(2,len(ss_track_x)):\n \n \nimu_ang_z = data_all_ss1.loc[mask,'imu_ang_z']\nptu_cmd_x = data_all_ss1.loc[mask,'ptu_cmd_x']\n\n#def ddx(x,y):\n# return (np.array([2/((x[1]-x[0])*(x[2]-x[0])),\n# -2/((x[2]-x[1])*(x[1]-x[0])),\n# 2/((x[2]-x[1])*(x[2]-x[0]))])*y.T).sum()\n\n#Kalman Predictor stage\ndef predict(x,A,u,B,P,Q):\n x=np.dot(A,x) + np.dot(B,u)\n P=np.dot(A,np.dot(P,A.T)) + Q \n return x,P\n\n#Kalman Correction stage\ndef correct(x,H,z,P,Q,R):\n C=np.dot(H,np.dot(P,H.T)) + R\n K=np.dot(P,np.dot(H.T,np.linalg.inv(C)))\n #print(K)\n \n x=x+np.dot(K,(z-np.dot(H,x)))\n P=P-np.dot(K,np.dot(H,K.T))\n \n return x,P\ndt=time.diff().mean()\nhz=1/dt\nptu_step2deg = 23.1428/3600\n\n\n#Initial state estimate \nx=np.array([[0.], #sun_pos_x\n [0.], #ptu_vel_x\n [0.], #gon_vel_x\n [0.]]) #sun_acc_x\n\n#Prediction matrix (model)\nA=np.array([[ 1.0 , dt , dt , 0.5*dt**2 ],\n [ 0.0 , 0.0 , -1.0 ,-0.5*dt**2 ],\n [ 0.0 , 0.0 , 1.0 , 0.5*dt**2 ],\n [ 0.0 , 0.0 , 0.0 , 1.0 ]])\n \n#P: covariance matrix (covariance between state variables)\nP=np.array([[0.01, 0. , 0. , 0. ],\n [0. , 0.01, 0. , 0. ],\n [0. , 0. ,0.01 , 0. ],\n [0. , 0. ,0. , 0.01]])\n#P=np.cov(np.array([vel+ptu_cmd_x*ptu_step2deg,pos]))\n \n#uncertainty from environment\nQ=np.array([[0.01, 0. , 0. , 0. ],\n [0. , 0.01, 0. , 0. ],\n [0. , 0. ,0.01 , 0. ],\n [0. , 0. ,0. , 0.01]])\n\n#Initial control commands \n#u=np.array([[ptu_cmd_dx[2]*ptu_step2deg]])\nu=np.array([[0.0]])\n \n#Control Matrix \nB=np.array([[dt],\n [1.0],\n [0.0],\n [0.0]]) \n \n#B=np.array([[0.0,0.0,0.0],\n# [0.0,0.0,0.0],\n# [0.0,0.0,0.0]]) \n\n#Initial sensor measurements\nz=np.array([[ss_track_x[0]],\n [ss_track_x[1]],\n [ss_track_x[2]],\n [imu_ang_z[2]]])\n \n#Sensor matrix: just identity matrix because sensors measure the state\n#H=np.array([[0.0,0.0,1.0],\n# [0.5*dt,0.0,0.5*dt]\n# [2./((x[1]-x[0])*(x[2]-x[0])),-2./((x[2]-x[1])*(x[1]-x[0])),2./((x[2]-x[1])*(x[2]-x[0]))]])\n\n#H=np.array([[0.0 ,0.0 ,1.0 ],\n# [-1./(2*dt) ,0.0 ,1./(2*dt) ],\n# [2./((dt)*(2*dt)),-2./((dt)*(dt)),2./((dt)*(2*dt))]])\n# \n#H=np.array([[1.0 ,-2*dt ,-0.5*(2*dt)**2 ],\n# [1.0 ,-dt ,-0.5*(dt)**2 ],\n# [1.0 ,0.0 ,0.0 ]])\n \nH=np.array([[ 1.0 , 2*dt , 2*dt , 2*(dt)**2],\n [ 1.0 , dt , dt , 2*(dt)**2],\n [ 1.0 , 0.0 , 0.0 , 0. ],\n [ 0.0 , 0.0 , 1.0 , 0. ]])\n \n#Unit variance for the sake of simplicity\n\n#Uncertainty from sensors \nR=np.array([[ 0.015, 0. , 0. , 0.0],\n [ 0. , 0.015, 0. , 0.0],\n [ 0. , 0. , 0.015 , 0.0],\n [ 0. , 0. , 0. , 0.1]])\n\nN=len(time)\npredictions,corrections,measurements = [],[],[]\n#pred=pd.DataFrame(columns=['pos','vel','acc'])\n#corr=pd.DataFrame(columns=['pos','vel','acc'])\n#meas=pd.DataFrame(columns=['pos','vel','acc'])\ndata=pd.DataFrame(columns=['pred_sun_pos','corr_sun_pos','meas_sun_pos',\n 'pred_ptu_vel','corr_ptu_vel','meas_ptu_vel',\n 'pred_gon_vel','corr_gon_vel','meas_gon_vel',\n 'pred_sun_acc','corr_sun_acc','meas_sun_acc',\n 'elapsed'])\nfor k in np.arange(2,N-1):\n #Update sensor measurements\n z=np.array([[ss_track_x[k-2]],\n [ss_track_x[k-1]],\n [ss_track_x[k] ],\n [imu_ang_z[k] ]])\n #Update ptu control commands\n u=np.array([[ptu_cmd_x[k]*ptu_step2deg]]) \n u=np.array([[0.0]]) \n \n x,P=predict(x,A,u,B,P,Q)\n data.loc[k,['pred_sun_pos','pred_ptu_vel','pred_gon_vel','pred_sun_acc']]=[x[0][0],x[1][0],x[2][0],x[3][0]]\n predictions.append(x)\n x,P=correct(x,H,z,P,Q,R)\n data.loc[k,['corr_sun_pos','corr_ptu_vel','corr_gon_vel','corr_sun_acc']]=[x[0][0],x[1][0],x[2][0],x[3][0]]\n corrections.append(x)\n measurements.append(z)\n# data.loc[k,['meas_pos','meas_vel','meas_acc']]=[z[0][0],z[1][0],z[2][0]]\n data.loc[k,['meas_sun_pos']]=z[2][0]\n #data.loc[k,['meas_ptu_vel']]=[z[0][0]*(-1./(2*dt)) + z[2][0]*(1./(2*dt))]\n data.loc[k,['meas_sun_acc']]=[(z[0][0]*2./((dt)*(2*dt))) - \n (z[1][0]*2./((dt)*(dt))) +\n (z[2][0]*2./((dt)*(2*dt)))]\n data.loc[k,['elapsed']]=time[k]\n\nprint('predicted final estimate:',predictions[-1][0])\nprint('corrected final estimates:',corrections[-1][0])\nprint('measured state:',measurements[-1][0])\n\npos_measure=np.zeros(N-1)\nvel_measure=np.zeros(N-1)\npos_predict=np.zeros(N-1)\nvel_predict=np.zeros(N-1)\npos_correct=np.zeros(N-1)\nvel_correct=np.zeros(N-1)\nfor i in range(len(measurements)):\n pos_measure[i]=measurements[i][0][0]\n vel_measure[i]=measurements[i][1][0]\n pos_predict[i]=predictions[i][0][0]\n vel_predict[i]=predictions[i][1][0]\n pos_correct[i]=corrections[i][0][0]\n vel_correct[i]=corrections[i][1][0]\n\nplt.figure()\nplt.plot(data.index,data.pred_sun_pos,'-o',label='predicted position')\nplt.plot(data.index,data.corr_sun_pos,'-o',label='corrected position')\nplt.plot(data.index,data.meas_sun_pos,'-o',label='measured position')\nplt.plot(data.index,ss_track_x[3:],'-o',label='ss_track_x')\nplt.legend()\n\nplt.figure()\nplt.plot(data.index,data.pred_vel,label='predicted velocity')\nplt.plot(data.index,data.corr_vel,label='corrected velocity')\nplt.plot(data.index,data.meas_vel,label='measured velocity')\nplt.plot(data.index,ptu_cmd_x[3:]*ptu_step2deg,label='ptu_cmd_x')\nplt.plot(data.index,imu_ang_z[3:],label='imu_ang_z')\nplt.plot(data.index,imu_ang_z[3:]+ptu_cmd_x[3:]*ptu_step2deg,label='imu_ang_x+ptu_cmd_x')\nplt.legend()\n\nplt.figure()\nplt.plot(data.index,data.meas_pos-data.corr_pos,'-o',label='position error')\nplt.plot(data.index,data.meas_vel-data.corr_vel,'-o',label='velocity error')\nplt.legend()\n \n#plt.figure()\n#plt.plot(pos_measure,label='measured')\n#plt.plot(pos_predict,label='predicted')\n#plt.plot(pos_correct,label='corrected')\n#plt.title('Position offset')\n#plt.legend()\n#\n#plt.figure()\n#plt.plot(vel_measure,label='measured')\n#plt.plot(vel_predict,label='predicted')\n#plt.plot(vel_correct,label='corrected')\n#plt.title('Velocity offset')\n#plt.legend()\n# \n#plt.figure()\n#plt.plot(pos_correct-pos_measure,label='Position (predicted - measured) ')\n##plt.plot(vel_correct-vel_measure,label='Velocity (predicted - measured) ')\n#plt.title('Corrected vs. Measured')\n#plt.legend()\n#\n#plt.figure()\n#plt.plot(vel_correct-vel_measure,label='Velocity (predicted - measured) ')\n##plt.plot(vel_correct-vel_measure,label='Velocity (predicted - measured) ')\n#plt.title('Corrected vs. Measured')\n#plt.legend()","sub_path":"testing/20180619/kalman_ss_r3.py","file_name":"kalman_ss_r3.py","file_ext":"py","file_size_in_byte":8439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504717948","text":"import sys\nimport cv2\n\nfc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nvc = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = vc.read()\n k = cv2.waitKey(1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = fc.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5, minSize=(30,30), flags=cv2.CASCADE_SCALE_IMAGE)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)\n cv2.imshow('Face Detection', frame)\n\nvideo_capture.release()\ncv2.destroyAllWindows()","sub_path":"face_detection/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"231714148","text":"# -*- encoding:utf-8 -*-\n# !/usr/bin/env python3\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\nclient.admin.authenticate('dba', 'dba', mechanism='SCRAM-SHA-1')\ndb = client['admin']\ncoll = db['movie']\n\nprint(coll.count())\n","sub_path":"pymongoTest.py","file_name":"pymongoTest.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23659321","text":"\"\"\"\n/*******************************************************************************\n * Copyright (c) 2011, 2013 IBM Corp.\n *\n * All rights reserved. This program and the accompanying materials\n * are made available under the terms of the Eclipse Public License v1.0\n * and Eclipse Distribution License v1.0 which accompany this distribution.\n *\n * The Eclipse Public License is available at\n * http://www.eclipse.org/legal/epl-v10.html\n * and the Eclipse Distribution License is available at\n * http://www.eclipse.org/org/documents/edl-v10.php.\n *\n * Contributors:\n * Ian Craggs - initial API and implementation and/or initial documentation\n * EH Ong - port to Python 3 and Micropython\n *******************************************************************************/\n\"\"\"\n\nimport MQTTSN\nimport MQTTSNinternal\nimport socket, time, _thread, sys, struct, queue\n\n\nclass Callback:\n def __init__(self):\n self.events = []\n\n def connectionLost(self, cause):\n print(\"default connectionLost\", cause)\n self.events.append(\"disconnected\")\n\n def messageArrived(self, topicName, payload, qos, retained, msgid):\n print(\"default publishArrived\", topicName, payload, qos, retained, msgid)\n return True\n\n def deliveryComplete(self, msgid):\n print(\"default deliveryComplete\")\n\n def advertise(self, address, gwid, duration):\n print(\"advertise\", address, gwid, duration)\n\nclass TopicMap:\n def __init__(self):\n self.registered = {}\n\n def register(self, topicId, topicName):\n self.registered[topicId] = topicName\n\nclass Client:\n def __init__(self, clientid, host=\"localhost\", port=1883):\n self.clientid = clientid\n self.host = host\n self.port = port\n self.msgid = 1\n self.callback = None\n self.__receiver = None\n self.topicmap = TopicMap()\n self.queue = queue.Queue()\n\n def start(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n mreq = struct.pack(\"4sl\", socket.inet_aton(self.host), socket.INADDR_ANY)\n\n self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n self.startReceiver()\n\n def stop(self):\n self.stopReceiver()\n\n def __nextMsgid(self):\n def getWrappedMsgid():\n id = self.msgid + 1\n if id == 65535:\n id = 1\n return id\n\n if len(self.__receiver.outMsgs) >= 65535:\n raise \"No slots left!!\"\n else:\n self.msgid = getWrappedMsgid()\n while self.msgid in self.__receiver.outMsgs:\n self.msgid = getWrappedMsgid()\n return self.msgid\n\n\n def registerCallback(self, callback):\n self.callback = callback\n\n\n def connect(self, cleansession=True):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.settimeout(5.0)\n\n self.sock.connect((self.host, self.port))\n\n connect = MQTTSN.Connects()\n connect.ClientId = self.clientid\n connect.CleanSession = cleansession\n connect.KeepAliveTimer = 0\n self.sock.send(connect.pack())\n\n response, address = MQTTSN.unpackPacket(MQTTSN.getPacket(self.sock))\n assert response.mh.MsgType == MQTTSN.CONNACK\n\n self.startReceiver()\n\n\n def startReceiver(self):\n self.__receiver = MQTTSNinternal.Receivers(self.sock)\n if self.callback:\n id = _thread.start_new_thread(self.__receiver, (self.callback,self.topicmap,self.queue,))\n\n\n def waitfor(self, msgType, msgId=None):\n if self.__receiver:\n msg = self.__receiver.waitfor(msgType, msgId)\n else:\n msg = self.__receiver.receive()\n while msg.mh.MsgType != msgType and (msgId == None or msgId == msg.MsgId):\n msg = self.__receiver.receive()\n return msg\n\n\n def subscribe(self, topic, qos=0):\n subscribe = MQTTSN.Subscribes()\n subscribe.MsgId = self.__nextMsgid()\n if isinstance(topic, str):\n subscribe.TopicName = topic\n if len(topic) > 2:\n subscribe.Flags.TopicIdType = MQTTSN.TOPIC_NORMAL\n else:\n subscribe.Flags.TopicIdType = MQTTSN.TOPIC_SHORTNAME\n else:\n subscribe.TopicId = topic # should be int\n subscribe.Flags.TopicIdType = MQTTSN.TOPIC_PREDEFINED\n subscribe.Flags.QoS = qos\n if self.__receiver:\n self.__receiver.lookfor(MQTTSN.SUBACK)\n self.sock.send(subscribe.pack())\n msg = self.waitfor(MQTTSN.SUBACK, subscribe.MsgId)\n self.topicmap.register(msg.TopicId, topic)\n return msg.ReturnCode, msg.TopicId\n\n\n def unsubscribe(self, topics):\n unsubscribe = MQTTSN.Unsubscribes()\n unsubscribe.MsgId = self.__nextMsgid()\n unsubscribe.data = topics\n if self.__receiver:\n self.__receiver.lookfor(MQTTSN.UNSUBACK)\n self.sock.send(unsubscribe.pack())\n msg = self.waitfor(MQTTSN.UNSUBACK, unsubscribe.MsgId)\n\n\n def register(self, topicName):\n register = MQTTSN.Registers()\n register.TopicName = topicName\n if self.__receiver:\n self.__receiver.lookfor(MQTTSN.REGACK)\n self.sock.send(register.pack())\n msg = self.waitfor(MQTTSN.REGACK, register.MsgId)\n self.topicmap.register(msg.TopicId, topicName)\n return msg.TopicId\n\n\n def publish(self, topic, payload, qos=0, retained=False):\n if isinstance(payload, str) or isinstance(payload, bytes):\n pass\n else:\n raise TypeError('Payload must be str or bytes.')\n publish = MQTTSN.Publishes()\n publish.Flags.QoS = qos\n publish.Flags.Retain = retained\n if isinstance(topic, str):\n publish.Flags.TopicIdType = MQTTSN.TOPIC_SHORTNAME\n publish.TopicName = topic\n else:\n publish.Flags.TopicIdType = MQTTSN.TOPIC_NORMAL\n publish.TopicId = topic\n if qos in [-1, 0]:\n publish.MsgId = 0\n else:\n publish.MsgId = self.__nextMsgid()\n #print(\"MsgId\", publish.MsgId)\n self.__receiver.outMsgs[publish.MsgId] = publish\n publish.Data = payload\n self.sock.send(publish.pack())\n return publish.MsgId\n\n\n def disconnect(self):\n disconnect = MQTTSN.Disconnects()\n if self.__receiver:\n self.__receiver.lookfor(MQTTSN.DISCONNECT)\n self.sock.send(disconnect.pack())\n msg = self.waitfor(MQTTSN.DISCONNECT)\n self.stopReceiver()\n\n\n def stopReceiver(self):\n self.sock.close() # this will stop the receiver too\n## assert self.__receiver.inMsgs == {}\n## assert self.__receiver.outMsgs == {}\n self.__receiver = None\n\n def receive(self):\n return self.__receiver.receive()\n\n\ndef publish(topic, payload, retained=False, port=1883, host=\"localhost\"):\n publish = MQTTSN.Publishes()\n publish.Flags.QoS = 3\n publish.Flags.Retain = retained\n if isinstance(payload, str):\n pass\n elif isinstance(payload, bytes):\n payload = payload.decode()\n if isinstance(topic, str):\n if len(topic) > 2:\n publish.Flags.TopicIdType = MQTTSN.TOPIC_NORMAL\n publish.TopicId = len(topic)\n payload = topic + payload\n else:\n publish.Flags.TopicIdType = MQTTSN.TOPIC_SHORTNAME\n publish.TopicName = topic\n else:\n publish.Flags.TopicIdType = MQTTSN.TOPIC_NORMAL\n publish.TopicId = topic\n publish.MsgId = 0\n #print(\"payload\", payload)\n publish.Data = payload\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(publish.pack(), (host, port))\n sock.close()\n return\n\n\nif __name__ == \"__main__\":\n\taclient = Client(\"linh\", port=1885)\n\taclient.registerCallback(Callback())\n\taclient.connect()\n","sub_path":"src/Boards/client_MQTTSN/MQTTSNclient.py","file_name":"MQTTSNclient.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11181810","text":"from django.http import HttpResponse\n\nfrom polls.models import Category, Good\n\n\ndef index(request, id):\n if id == None:\n cat = Category.objects.first()\n else:\n cat = Category.objects.get(pk=id)\n goods = Good.objects.filter(category=cat).order_by(\"name\")\n s = \"Категория:\" + cat.name + \"

\"\n for good in goods:\n s = s + \"(\" + str(good.pk) + \")\" + good.name + \"
\"\n return HttpResponse(s)\n\n\ndef goods(request, id):\n good = Good.objects.get(pk=id)\n s = good.name + \"

\" + good.category.name + \"

\" + good.description\n if not good.in_stock:\n s = s + \"

\" + \"Нет в наличии!\"\n return HttpResponse(s)\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607243383","text":"import abc\n\nfrom pyparsing import alphanums, alphas, Combine, Literal, nums, oneOf, \\\n OneOrMore, Optional, pythonStyleComment, QuotedString, Word, ZeroOrMore\nfrom peewee import DoesNotExist\n\nfrom logic.identifier import add_identifiers\nfrom logic.reserved import add_reserveds, get_reserved_by_name\n\n\nclass ObfuscateBNF(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, get_obfuscated):\n \"\"\"BNF grammar for source statements.\n\n Parameters\n ----------\n get_obfuscated : function\n Function to return the obfuscated name for an identifier.\n \"\"\"\n self.get_obfuscated = get_obfuscated\n\n self.directive = oneOf(\"#:\")\n self.comment = ~self.directive + pythonStyleComment\n\n self.separator = Word(\"~!@$%^&*()+`-={}|[]:;<>?,/.\", max=2)\n\n self.string = \\\n QuotedString(quoteChar='\"', escChar='\\\\', multiline=False,\n unquoteResults=False) |\\\n QuotedString(quoteChar=\"'\", escChar='\\\\', multiline=False,\n unquoteResults=False)\n\n self.doc_string = \\\n QuotedString(quoteChar='\"\"\"', escChar='\\\\', multiline=True,\n unquoteResults=False) |\\\n QuotedString(quoteChar=\"'''\", escChar='\\\\', multiline=True,\n unquoteResults=False)\n self.string_or_doc = self.doc_string | self.string\n self.triple_quote = Literal(\"'''\") | Literal('\"\"\"')\n\n self.e = Literal('E') | Literal('e')\n self.point = Literal('.')\n\n self.plusorminus = Literal('+') | Literal('-')\n self.number = Word(nums)\n self.integer = Combine(Optional(self.plusorminus) + self.number)\n self.fnumber = Combine(\n self.integer +\n Optional(self.point + Optional(self.number)) +\n Optional(self.e + self.integer))\n\n self.tab = Literal(' ')\n\n self.ident = Word(alphas+'_', alphanums+'_')\n self.conseq_idents_numbs = OneOrMore(self.ident | self.fnumber)\n self.attrib = self.ident + OneOrMore('.'+self.ident)\n\n self.statement = (\n ZeroOrMore(\n (self.directive |\n self.tab |\n self.conseq_idents_numbs |\n self.separator |\n self.string_or_doc |\n self.triple_quote)\n ) + Optional(self.comment).suppress()\n )\n\n self.attribs = (\n ZeroOrMore(\n (self.directive.suppress() |\n self.tab.suppress() |\n self.attrib |\n self.ident.suppress() |\n self.separator.suppress() |\n self.fnumber.suppress() |\n self.string_or_doc.suppress() |\n self.triple_quote.suppress())\n ) + Optional(self.comment).suppress()\n )\n\n self.conseq_idents = (\n ZeroOrMore(\n (self.directive.suppress() |\n self.tab.suppress() |\n self.ident |\n self.separator.suppress() |\n self.fnumber.suppress() |\n self.string.suppress())\n ) + Optional(self.comment).suppress()\n )\n\n self.conseq_idents_no_obfuscate = (\n ZeroOrMore(\n (self.directive.suppress() |\n self.tab.suppress() |\n self.ident |\n self.separator.suppress() |\n self.fnumber.suppress() |\n self.string_or_doc.suppress() |\n self.triple_quote.suppress())\n ) + Optional(self.comment).suppress()\n )\n\n self.attribs.setParseAction(self.add_attribs_reserveds)\n self.conseq_idents.setParseAction(self.add_conseq_idents)\n self.conseq_idents_no_obfuscate.setParseAction(\n self.add_conseq_idents_no_obfuscate)\n self.conseq_idents_numbs.setParseAction(\n self.transform_conseq_ident_numbs)\n self.directive.setParseAction(self.transform_directive)\n\n ###############\n # Parse actions\n ###############\n def add_conseq_idents(self, conseq_idents_list):\n \"\"\"Add names to obfuscate to identifiers table.\n\n Parameters\n ----------\n conseq_idents_list : list\n \"\"\"\n if 'import' not in conseq_idents_list[:] and \\\n 'except' not in conseq_idents_list[:]:\n add_identifiers(set(conseq_idents_list))\n\n def add_conseq_idents_no_obfuscate(\n self, conseq_idents_no_obfuscate_list):\n \"\"\"Add names that are not obfuscated to identifiers table.\n\n Parameters\n ----------\n conseq_idents_no_obfuscate_list : list\n \"\"\"\n # If an except error was not added to reserved list, don't obfuscate it\n if 'import' not in conseq_idents_no_obfuscate_list[:] and \\\n 'except' not in conseq_idents_no_obfuscate_list[:]:\n add_identifiers(set(conseq_idents_no_obfuscate_list),\n do_obfuscate=False)\n\n def add_attribs_reserveds(self, attribs_list):\n \"\"\"Add attributes of reserved names to reserved list.\n\n Take a list of attributes strings from a source statement, break\n it into lists of objects with their attributes, and add attributes\n that follow a reserved name to the reserved list.\n\n Example\n ------\n If r is reserved, then\n a.r.c + d.r.e\n would add c and e to reserveds.\n\n Parameters\n ----------\n attribs_list : list\n \"\"\"\n if attribs_list:\n # Create an ordered list of attribute parents\n # Ex. a.b.c => [a, b]\n _attrib_list = [attribs_list[0]]\n is_last_token_an_attrib = True\n for token in attribs_list[1:]:\n if is_last_token_an_attrib and token != '.':\n # End of attrib list reached. Process list.\n add_attribs_reserveds_list(_attrib_list)\n # Start new attrib list\n _attrib_list = [token]\n is_last_token_an_attrib = True\n elif is_last_token_an_attrib and token == '.':\n is_last_token_an_attrib = False\n elif not is_last_token_an_attrib and token == '.':\n continue # Multiple dots, continue attrib list\n elif not is_last_token_an_attrib and token != '.':\n _attrib_list.append(token)\n is_last_token_an_attrib = True\n else:\n # Process last list\n if _attrib_list:\n add_attribs_reserveds_list(_attrib_list)\n\n def transform_conseq_ident_numbs(self, conseq_ident_list):\n \"\"\"Allow for non-name tokens in a statement.\n\n Names start with an alpha or underscore. Obfuscate these name tokens\n and simply copy unchanged other tokens.\n\n Parameters\n ----------\n conseq_ident_list : list\n\n Returns\n -------\n statement : str\n \"\"\"\n return ' '.join([\n self.get_obfuscated(ident) if\n (ident[0].isalpha() or ident[0] == '_') else ident\n for ident in conseq_ident_list\n ])\n\n def transform_directive(self, directive_list):\n \"\"\"Create a directive statement.\"\"\"\n return ''.join([directive_list[0], ' '])\n\n\ndef add_attribs_reserveds_list(attrib_list):\n \"\"\"Add attributes that follow a reserved name to reserveds list.\"\"\"\n if len(attrib_list) > 1: # A single item does not change\n is_reserved = False\n reserved_set = set()\n for name in attrib_list:\n if not is_reserved:\n try:\n get_reserved_by_name(name)\n is_reserved = True\n package_name = name\n continue # Don't add already reserved name\n except DoesNotExist:\n continue\n if is_reserved:\n reserved_set.add(name)\n if reserved_set:\n add_reserveds(package_name, reserved_set)\n","sub_path":"logic/obfuscatefile/obfuscatebnf.py","file_name":"obfuscatebnf.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495768233","text":"import json\nimport uuid\n\nfrom jwkest.jws import factory\nfrom jwkest.jws import NoSuitableSigningKeys\nfrom jwkest.jws import alg2keytype\n\nfrom oic.oauth2 import Message\nfrom oic.oauth2 import SINGLE_REQUIRED_STRING\nfrom oic.oauth2 import OPTIONAL_LIST_OF_STRINGS\nfrom oic.oic.message import SINGLE_REQUIRED_INT\nfrom oic.utils.sdb import Token\n\nfrom oic.utils.time_util import utc_time_sans_frac\n\n__author__ = 'roland'\n\n\nclass TokenAssertion(Message):\n c_param = {\n \"iss\": SINGLE_REQUIRED_STRING,\n \"azp\": SINGLE_REQUIRED_STRING,\n \"sub\": SINGLE_REQUIRED_STRING,\n 'kid': SINGLE_REQUIRED_STRING,\n \"exp\": SINGLE_REQUIRED_INT,\n 'jti': SINGLE_REQUIRED_STRING,\n \"aud\": OPTIONAL_LIST_OF_STRINGS, # Array of strings or string\n }\n\n\nclass JWTToken(Token):\n def __init__(self, typ, lifetime, iss, sign_alg, keyjar, **kwargs):\n Token.__init__(self, typ, lifetime, **kwargs)\n self.iss = iss\n self.lifetime = lifetime\n self.sign_alg = sign_alg\n self.keyjar = keyjar # my signing key\n self.db = {}\n\n def __call__(self, sid, sinfo=None, kid='', **kwargs):\n keys = self.keyjar.get_signing_key(alg2keytype(self.sign_alg),\n owner='', kid=kid)\n\n if not keys:\n raise NoSuitableSigningKeys('kid={}'.format(kid))\n\n key = keys[0] # Might be more then one if kid == ''\n\n rt = ' '.join(sinfo['response_type'])\n try:\n exp = utc_time_sans_frac() + self.lifetime[rt]\n except KeyError:\n exp = utc_time_sans_frac() + self.lifetime['']\n\n _jti = '{}-{}'.format(self.type, uuid.uuid4().hex)\n _tok = TokenAssertion(\n iss=self.iss,\n azp=sinfo['client_id'],\n sub=sinfo['sub'],\n kid=key.kid,\n exp=exp,\n jti=_jti\n )\n\n self.db[_jti] = sid\n\n try:\n _tok['aud'] = kwargs['aud']\n except KeyError:\n pass\n\n return _tok.to_jwt([key], self.sign_alg)\n\n def _unpack_jwt(self, token, only_info=False):\n if not token:\n raise KeyError\n\n _rj = factory(token)\n _msg = json.loads(_rj.jwt.part[1].decode('utf8'))\n if _msg['iss'] == self.iss:\n owner = ''\n else:\n owner = _msg['iss']\n\n keys = self.keyjar.get_signing_key(alg2keytype(_rj.jwt.headers['alg']),\n owner=owner)\n info = _rj.verify_compact(token, keys)\n if only_info:\n return info\n\n try:\n sid = self.db[info['jti']]\n except KeyError:\n raise\n\n return sid, info\n\n def type_and_key(self, token):\n sid, _ = self._unpack_jwt(token)\n return self.type, sid\n\n def get_key(self, token):\n sid, _ = self._unpack_jwt(token)\n return sid\n\n def get_type(self, token):\n self._unpack_jwt(token)\n return self.type\n\n def expires_at(self):\n return utc_time_sans_frac() + self.lifetime\n\n def valid(self, token):\n _, info = self._unpack_jwt(token)\n\n if info['jti'] in self.db:\n if info['exp'] >= utc_time_sans_frac():\n return True\n\n return False\n\n def invalidate(self, token):\n _, info = self._unpack_jwt(token)\n del self.db[info['jti']]\n\n def get_info(self, token):\n return self._unpack_jwt(token, only_info=True)\n","sub_path":"src/oic/extension/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272529023","text":"import librosa\nimport numpy as np\nfrom scipy import signal\nfrom scipy.io import wavfile\n\nimport data_loader\n\nSAMPLE_RATE = 16000 # 1 sec\n\n\n_, _, _, silence_df = data_loader.load_data()\n\n\ndef read_wav_file_using_librosa(filename):\n wav, sample_rate = librosa.load(filename, sr=None) # same effect\n return wav\n\n\ndef read_wav_file(filename):\n sample_rate, wav = wavfile.read(filename)\n wav = wav.astype(np.float32) / np.iinfo(np.int16).max\n return wav\n\n\ndef to_log_spectrogram(wav, window_size=20, step_size=10, eps=1e-10):\n if type(wav) is str:\n wav = read_wav_file(wav)\n\n if len(wav) > SAMPLE_RATE:\n wav = _cut_sample_to_rate(wav)\n elif len(wav) < SAMPLE_RATE:\n wav = _pad_sample_to_rate(wav)\n\n nperseg = int(round(window_size * SAMPLE_RATE / 1e3))\n noverlap = int(round(step_size * SAMPLE_RATE / 1e3))\n frequencies, times, spectrogram = signal.spectrogram(\n wav,\n fs=SAMPLE_RATE,\n window='hann',\n nperseg=nperseg,\n noverlap=noverlap,\n detrend=False\n )\n return np.log(spectrogram.T.astype(np.float32) + eps)\n # log_spectrogram = np.log(spectrogram.T.astype(np.float32) + eps)\n # return np.reshape(log_spectrogram, newshape=(99, 161, 1))\n\n\ndef to_spectrogram(wav):\n if type(wav) is str:\n wav = read_wav_file(wav)\n\n if len(wav) > SAMPLE_RATE:\n wav = _cut_sample_to_rate(wav)\n elif len(wav) < SAMPLE_RATE:\n wav = _pad_sample_to_rate(wav)\n\n spectrogram = signal.stft(wav, 16000, nperseg=400, noverlap=240, nfft=512, padded=False, boundary=None)\n phase = np.angle(spectrogram[2]) / np.pi\n amp = np.log1p(np.abs(spectrogram[2]))\n\n return np.stack([phase, amp], axis=2)\n\n\ndef to_mel(wav):\n if type(wav) is str:\n wav = read_wav_file(wav)\n spectrogram = librosa.feature.melspectrogram(\n wav, sr=SAMPLE_RATE, n_mels=40, hop_length=160, n_fft=480, fmin=20, fmax=4000\n )\n spectrogram = librosa.power_to_db(spectrogram)\n return spectrogram.astype(np.float32)\n\n\ndef to_mfcc(wav):\n if type(wav) is str:\n wav = read_wav_file(wav)\n spectrogram = librosa.feature.melspectrogram(\n wav, sr=SAMPLE_RATE, n_mels=40, hop_length=160, n_fft=480, fmin=20, fmax=4000\n )\n idx = [spectrogram > 0]\n spectrogram[idx] = np.log(spectrogram[idx])\n\n dct_filters = librosa.filters.dct(n_filters=40, n_input=40)\n mfcc = [np.matmul(dct_filters, x) for x in np.split(spectrogram, spectrogram.shape[1], axis=1)]\n mfcc = np.hstack(mfcc)\n mfcc = mfcc.astype(np.float32)\n return mfcc\n\n\ndef raw(wav):\n if type(wav) is str:\n wav = read_wav_file(wav)\n\n if len(wav) > SAMPLE_RATE:\n wav = _cut_sample_to_rate(wav)\n elif len(wav) < SAMPLE_RATE:\n wav = _pad_sample_to_rate(wav)\n\n return wav\n\n\ndef _cut_sample_to_rate(sample, sample_rate=SAMPLE_RATE):\n beg = np.random.randint(0, len(sample) - sample_rate)\n return sample[beg: beg + sample_rate]\n\n\ndef _pad_sample_to_rate(sample, sample_rate=SAMPLE_RATE):\n remaining_len = sample_rate - len(sample)\n random_silence_sample = _get_random_silence_sample(remaining_len)\n j = np.random.randint(0, remaining_len)\n silence_part_left = random_silence_sample[0:j]\n silence_part_right = random_silence_sample[j:remaining_len]\n return np.concatenate([silence_part_left, sample, silence_part_right])\n\n\ndef _get_random_silence_sample(length, sample_rate=SAMPLE_RATE):\n silence_wav_file = read_wav_file(silence_df.wav_file[np.random.randint(0, len(silence_df))])\n i = np.random.randint(0, len(silence_wav_file) - length)\n return silence_wav_file[i:i + sample_rate]\n\n\ndef add_white_noise(wav):\n return wav + 0.005 * np.random.randn(len(wav))\n\n\ndef shift_left(wav):\n by = int(np.random.rand() * 800 + 1200)\n return np.roll(wav, by * -1)\n\n\ndef shift_right(wav):\n by = int(np.random.rand() * 800 + 1200)\n return np.roll(wav, by)\n\n\ndef speed_up(wav, by=0.05):\n faster_wav = librosa.effects.time_stretch(wav, 1 - by)\n return _cut_sample_to_rate(faster_wav)\n\n\ndef slow_down(wav, by=0.05):\n slower_wav = librosa.effects.time_stretch(wav, 1 + by)\n return _pad_sample_to_rate(slower_wav)\n","sub_path":"src/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"74869475","text":"# -*- coding: utf-8 -*-\n\"\"\"\nОбучение происходит по картинке с экрана\nЗа один шаг может быть выбрано несколько действий\nДлина уровня постепенно растет (линейно)\nЕсли агент не получает награду за N шагов, то исследовательский коэфф. увеличивается.\n\"\"\"\n\n# from cnq_agent import CNQAgent\n# from cnq_agent_ma import CNQAgent\n# from vgg19_agent import CNQAgent\nfrom ar_agent import CNQAgent\nimport numpy as np\nimport gym_remote.exceptions as gre\nimport gym_remote.client as grc\n\nEPISODES = 2500\nMAX_FRAMES = 18000\n# AGENT_NAME = 'agent_4_II_repeat-6_exp-5-30-10'\nAGENT_NAME = 'agent_4_image_info_long_a_6'\nREPLAYS_DIR = 'D:\\\\PROJECTS\\\\GENERAL\\\\RESEARCH & CONTEST\\\\Retro GYM\\\\replays\\\\'\n# AGENT_NAME = 'base_visual_sonic_agent'\nEXPERIMENT_SIZE = 5\n\ndef train():\n from retro_contest.local import make\n import cv2\n from data_writer import VideoWriter\n ONE_TRY = 200\n RATIO = (MAX_FRAMES - ONE_TRY)/EPISODES\n\n # initialize gym environment and the agent\n env = make(game='SonicTheHedgehog-Genesis', state='SpringYardZone.Act3')\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n\n\n print('state_shape: ', env.observation_space.shape)\n print('action_size: ', action_size)\n # exit()\n agent = CNQAgent(action_size, AGENT_NAME)\n agent.epsilon_decay = 0.98\n # agent.load_model()\n batch_size = 200\n total_time = 0\n total_r = 0\n reward = 0\n action = np.zeros(12)\n\n prev_epsilon = agent.epsilon\n done_counter = 0\n # Iterate the game\n for e in range(EPISODES):\n # reset state in the beginning of each game\n state = env.reset()\n local_r_exp = []\n experiment_steps = EXPERIMENT_SIZE\n experiment_mode = False\n # print(state.shape)\n # exit()\n state = np.reshape(state, (1, 224, 320, 3))\n\n # time_t represents each frame of the game\n # Our goal is to keep the pole upright as long as possible until score of 500\n # the more time_t the more score\n for time_t in range(int(ONE_TRY)):\n # turn this on if you want to render\n # env.render()\n # Decide action\n action = agent.act(state, action, reward)\n # print(action)\n reward = 0\n for j in range(6):\n next_state, local_r, done, _ = env.step(action)\n total_r += local_r\n reward+=local_r\n \n next_state = np.reshape(next_state, (1, 224, 320, 3))\n # Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n # make next_state the new current state for the next frame.\n state = next_state\n # done becomes True when the game ends\n # ex) The agent drops the pole\n\n if done:\n break\n \n local_r_exp.append(reward)\n if len(local_r_exp) >= 30 and sum(local_r_exp) < 10.0:\n prev_epsilon = agent.epsilon\n agent.epsilon = 0.8\n local_r_exp = []\n # print('IN experiment_mode {} -> {}'.format(prev_epsilon, 0.9))\n experiment_mode = True\n\n if experiment_mode:\n experiment_steps-=1\n if experiment_steps <= 0:\n experiment_mode = False\n # print('OUT experiment_mode {} -> {}'.format(0.9, prev_epsilon))\n experiment_steps = EXPERIMENT_SIZE\n agent.epsilon = prev_epsilon\n\n ONE_TRY +=RATIO\n # print('ONE TRY: {}'.format(ONE_TRY))\n # train the agent with the experience of the episode\n # print the score and break out of the loop\n print(\"episode: {}/{}, score: {}\".format(e, EPISODES, round(total_r, 1)))\n total_r = 0\n done_counter += 1\n if done_counter % 40 == 0:\n recorder = VideoWriter(320, 224, 30, REPLAYS_DIR + str(done_counter) + '.avi')\n for state, action, reward, next_state, done in agent.memory: \n # cv2.imshow('game', state[0])\n # cv2.waitKey(20)\n frame = cv2.putText(state[0], str(reward), (200, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))\n recorder.add_frame(frame)\n print('model saved...')\n recorder.stop_and_release()\n agent.save_model()\n if len(agent.memory) > batch_size:\n agent.replay(batch_size)\n\n agent.save_model()\n\ndef valid():\n agent = CNQAgent(12, AGENT_NAME)\n agent.load_model()\n agent.epsilon = 0.1\n env = make(game='SonicTheHedgehog-Genesis', state='SpringYardZone.Act3')\n state = env.reset()\n reward = 0\n action = np.zeros(12)\n while True:\n state = np.reshape(state, (1, 224, 320, 3))\n action = agent.act(state, action, reward) \n # print(action) \n state, reward, done, _ = env.step(action)\n cv2.imshow('game', state)\n cv2.waitKey(20)\n if done:\n obs = env.reset()\n\ndef test():\n agent = CNQAgent(12, AGENT_NAME)\n agent.load_model()\n agent.epsilon = 0.1\n print('connecting to remote environment')\n env = grc.RemoteEnv('tmp/sock')\n # env = make(game='SonicTheHedgehog-Genesis', state='SpringYardZone.Act3')\n print('starting episode')\n state = env.reset()\n reward = 0\n action = np.zeros(12)\n while True:\n state = np.reshape(state, (1, 224, 320, 3))\n action = agent.act(state, action, reward)\n state, reward, done, _ = env.step(action)\n if done:\n print('episode complete')\n env.reset()\n\n\nif __name__ == \"__main__\":\n # train()\n # valid()\n\n try:\n test()\n except gre.GymRemoteError as e:\n print('exception', e)","sub_path":"agent_4.py","file_name":"agent_4.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"620670179","text":"# Initialization > (Occurrences, Text)\noccurrences = {}\ntext = open(\"text.txt\")\n\n# Loop > Iterate through Text\nfor line in text:\n # Initialization > Word\n word = \"\"\n\n # Loop > Iterate through the Line\n for character in line:\n # Logic --- NOTE (Lapys) -> Any delimiter for a word.\n if character == ' ' or character == '\\n':\n # Initialization > Occurrence Found\n occurrence_found = False\n\n # Loop > Index Occurrences\n for occurrence in occurrences:\n # Logic\n if occurrence == word:\n # Modification > Occurrences > [Word]\n occurrences[word] += 1\n\n # Update > Occurrence Found\n occurrence_found = True\n\n # Logic\n if not occurrence_found:\n # Modification > Occurrences > [Word]\n occurrences[word] = 1\n\n # Update > Word\n word = \"\"\n\n else:\n # Update > Word\n word += character\n\n# Loop > Index Occurrences\nfor occurrence in occurrences:\n # Print\n print('\"' + occurrence + \"\\\":\", occurrences[occurrence])\n","sub_path":"Technologies/Python/Covenant University/v3/demo.v5/demo.v5.py","file_name":"demo.v5.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"561861116","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 14 14:51:15 2019\r\n\r\nRef:https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/\r\n@author: 105502506\r\n\"\"\"\r\n\r\nimport loadDataset as lD\r\nimport getNeighbors as gN\r\nimport getResponse as gR\r\nimport getAccuracy as gA\r\n\r\ndef main():\r\n\t# prepare data\r\n trainingSet=[]\r\n testSet=[]\r\n split = 0.5\r\n lD.loadDataset('iris.data', split, trainingSet, testSet)\r\n print ('Train set: ' + repr(len(trainingSet)))\r\n print ('Test set: ' + repr(len(testSet)))\r\n \r\n\t# generate predictions\r\n for k in range(1,21):\r\n predictions=[]\r\n for x in range(len(testSet)):\r\n neighbors = gN.getNeighbors(trainingSet, testSet[x], k)\r\n result = gR.getResponse(neighbors)\r\n predictions.append(result)\r\n #print('> predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))\r\n accuracy = gA.getAccuracy(testSet, predictions)\r\n print('Accuracy(k=' + repr(k) + '): ' + repr(accuracy) + '%')\r\n \r\nmain()","sub_path":"hw2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"250389283","text":"from gymenv_v2 import timelimit_wrapper, GurobiOriginalEnv\nimport numpy as np\nimport argparse\n\n\ndef make_gurobi_env(load_dir, idx, timelimit):\n\tprint('loading training instances, dir {} idx {}'.format(load_dir, idx))\n\tA = np.load('{}/A_{}.npy'.format(load_dir, idx))\n\tb = np.load('{}/b_{}.npy'.format(load_dir, idx))\n\tc = np.load('{}/c_{}.npy'.format(load_dir, idx))\n\tenv = timelimit_wrapper(GurobiOriginalEnv(A, b, c, solution=None, reward_type='obj'), timelimit)\n\treturn env\n\nif __name__ == '__main__':\n\n\timport argparse\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--instance-idx', type=int, default=0)\n\tparser.add_argument('--instance-name', type=str, default='randomip_n60_m60')\n\tparser.add_argument('--timelimit', type=int, default=100)\n\targs = parser.parse_args()\n\n\tinstance_idx = args.instance_idx\n\tinstance_name = args.instance_name\n\ttimelimit = args.timelimit\n\n\t# create an environment\n\tenv = make_gurobi_env('instances/{}'.format(instance_name), instance_idx, timelimit)\n\n\tgap, int_sol, lp_sol = env.env.max_gap()\n\n\tprint('max gap for this instance', gap)\n\tprint('opt int solution for this instance', int_sol)\n\tprint('initial lp solution for this instance', lp_sol)\n","sub_path":"findgaps.py","file_name":"findgaps.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20222104","text":"import torch\n\nfrom discrete_nn.dataset.fashion import fashion_mnist_dataloaders\nfrom discrete_nn.models.conv.real import ConvReal\n\n\ndef train_model():\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # basic dataset holder\n # creates the dataloader for pytorch\n batch_size = 100\n train_loader, validation_loader, test_loader = fashion_mnist_dataloaders(batch_size, device, \"2d\")\n net = ConvReal()\n net = net.to(device)\n\n num_epochs = 100\n # will save metrics and model to disk\n return net.train_model(train_loader, validation_loader, test_loader, num_epochs, model_name=\"Fashion-Conv-real\")\n\n\nif __name__ == \"__main__\":\n train_model()\n","sub_path":"discrete_nn/experiments/fashion_conv/real.py","file_name":"real.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"398610730","text":"import numpy as np\nfrom collections import OrderedDict\nfrom numpy import ndarray\nfrom typing import List\n\n\ndef one_hot_encoding(X, classes):\n \"\"\"This implementation is explicit about the number of classes\"\"\"\n base = np.zeros((classes, X.shape[1]))\n\n for i, entry in enumerate(X[0, :]):\n base[int(entry), i] = 1\n\n return base\n\n\ndef flatten_params(params: OrderedDict) -> ndarray:\n\n flattened_values = []\n\n for values in params.values():\n for value in values:\n flattened_value = np.concatenate(value)\n flattened_values.append(flattened_value)\n\n return np.concatenate(flattened_values)\n\n\ndef unflatten_params(values: ndarray, meta: List[dict]) -> OrderedDict:\n \"\"\"\n meta - A list of dictionaries of that each contain the keys `name` and\n `shapes`. For example if the orginal `params` used to construct `values`\n was `Ws` and `bs` respetively where `Ws` has 2 entries of shape `(6, 4)`\n and `(1, 4)` and `bs` has 2 entries of shape `(6, 1)` and `(1, 1)` then\n `meta` would be\n [{\n 'name': 'Ws',\n 'shapes': [(6, 4), (1, 4)]\n }, {\n 'name': 'bs',\n 'shapes': [(6, 1), (1, 1)]\n }]\n the order of the entries in `meta` should correspond to the order in the\n OrderedDict `params`.\n \"\"\"\n\n index = 0\n params = OrderedDict()\n\n for entry in meta:\n\n name = entry['name']\n shapes = entry['shapes']\n\n param_entries = []\n for (n, m) in shapes:\n k = n * m\n param_entry = values[index:index+k].reshape(n, m)\n param_entries.append(param_entry)\n index = index + k\n\n params[name] = param_entries\n\n return params\n\n\ndef grad_check(fn, params, epsilon=10**-7):\n\n partials = np.zeros_like(params)\n\n for i, _ in enumerate(partials):\n bump = np.zeros_like(params, dtype=np.float64)\n bump[i] = epsilon\n\n partial = fn(params + bump) - fn(params - bump)\n partial /= 2 * epsilon\n partials[i] = partial\n\n return partials\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477514745","text":"\n\n#calss header\nclass _CAPSTAN():\n\tdef __init__(self,): \n\t\tself.name = \"CAPSTAN\"\n\t\tself.definitions = [u'a machine with a spinning vertical cylinder that is used, especially on ships, for pulling heavy objects with a rope', u'a thin, spinning cylinder in a tape recorder (= a machine that records and plays back sound) that pulls the tape through the machine']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_capstan.py","file_name":"_capstan.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71194058","text":"from math import exp\nimport numpy as np\nimport pandas\nimport re, os\nfrom numpy.random import randint\nfrom math import sqrt\nfrom __future__ import division\nfrom scipy.stats import futil\nfrom scipy.sparse.csgraph import _validation\nfrom sklearn import linear_model\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom scipy.sparse import hstack\nfrom sklearn.cross_validation import train_test_split\nfrom pandas.io.parsers import read_csv\nfrom nltk import word_tokenize \nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem.lancaster import LancasterStemmer\n \n \ntrain = read_csv(\"C:/Users/Wen/Desktop/Kaggle/hashtag/Stack/train80.csv\", quotechar=\"\\\"\")\ntest = read_csv(\"C:/Users/Wen/Desktop/Kaggle/hashtag/Stack/test.csv\", quotechar=\"\\\"\")\nstacked = read_csv(\"C:/Users/Wen/Desktop/Kaggle/hashtag/Stack/train20.csv\", quotechar=\"\\\"\")\n \n \nclass LemmaTokenizer(object):\n def __init__(self):\n self.wnl = WordNetLemmatizer()\n def __call__(self, doc):\n return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]\n \nclass StemTokenizer(object):\n def __init__(self):\n self.wnl = LancasterStemmer()\n def __call__(self, doc):\n return [self.wnl.stem(t) for t in word_tokenize(doc)]\n \ndef writePredictionFile(outfile, res, test):\n with open(\"C:/Users/Wen/Desktop/Kaggle/hashtag/Stack/\" + outfile, \"w\") as out:\n out.write(\"\")\n ID = test[\"id\"]\n header = [\"id\",\"s1\",\"s2\",\"s3\",\"s4\",\"s5\",\"w1\",\"w2\",\"w3\",\"w4\",\"k1\",\"k2\",\"k3\",\n \"k4\",\"k5\",\"k6\",\"k7\",\"k8\",\"k9\",\"k10\",\"k11\",\"k12\",\"k13\",\"k14\",\"k15\"]\n with open(\"/home/pascal/Dropbox/TwitterWeather/\" + outfile, \"a\") as out:\n out.write(\",\".join(header) + \"\\n\")\n for i in range(len(res)):\n out.write(str(ID[i]) + \",\" + \",\".join([str(x) for x in res[i]]) + '\\n')\n \n \n \ndef PredictModels(models, real=True):\n x_train, y_train = np.array(train.ix[:,1]), np.array(train.ix[:,4:])\n if real:\n x_test, y_test = np.array(test.ix[:,1]), np.array(test.ix[:,4:])\n else: \n x_test, y_test = np.array(stacked.ix[:,1]), np.array(stacked.ix[:,4:])\n num_models = len(models[\"S\"][\"Vect\"]) + len(models[\"T\"][\"Vect\"])\n listPredictions = []\n listRealities = []\n for m in range(num_models-len(models[\"T\"][\"Vect\"])):\n block_pred = []\n for i, block in enumerate([\"S\", \"W\", \"K\"]):\n varrange = [range(0,5), range(5,9), range(9,24)]\n vect = models[block][\"Vect\"][m]\n clf = models[block][\"Clf\"][m]\n vect.fit(np.hstack((x_train, x_test)))\n xtrain = vect.transform(x_train)\n xtest = vect.transform(x_test)\n if isinstance(clf, linear_model.SGDRegressor):\n percolumn = []\n for z in varrange[i]:\n percolumn.append([clf.fit(xtrain, y_train[:, z]).predict(xtest)])\n print(\"SGD\" + str(z))\n pred = np.vstack((percolumn)).transpose()\n else:\n pred = clf.fit(xtrain, y_train[:, varrange[i]]).predict(xtest)\n block_pred.append(pred)\n print(\"block model done.\") \n listPredictions.append(np.hstack(block_pred))\n for model, clf in zip(models[\"T\"][\"Vect\"], models[\"T\"][\"Clf\"]):\n model.fit(np.hstack((x_train, x_test)))\n xtrain = model.transform(x_train)\n xtest = model.transform(x_test)\n if isinstance(clf, ExtraTreesRegressor):\n pred = clf.fit(xtrain.toarray(), y_train).predict(xtest.toarray())\n else:\n if isinstance(clf, linear_model.SGDRegressor):\n percolumn = []\n for z in range(24):\n percolumn.append([clf.fit(xtrain, y_train[:, z]).predict(xtest)])\n print(\"SGD\" + str(z))\n pred = np.vstack((percolumn)).transpose()\n else:\n pred = clf.fit(xtrain, y_train).predict(xtest)\n listPredictions.append(pred)\n print(\"total model done.\")\n print(\"lenlistpred \" , len(listPredictions))\n for i in range(len(listPredictions)):\n print(\"listpreds\", listPredictions[i].shape)\n X = np.hstack((listPredictions))\n if real:\n return(X)\n return([X,y_test])\n \n \n \nmodel1 = dict()\nmodel1[\"S\"] = dict()\nmodel1[\"W\"] = dict()\nmodel1[\"K\"] = dict()\nmodel1[\"T\"] = dict()\nmodel1[\"S\"][\"Vect\"] = [TfidfVectorizer(min_df=3, strip_accents='unicode', token_pattern=r'\\w{1,}', ngram_range=(1, 3), smooth_idf=25000, sublinear_tf=1)]\nmodel1[\"W\"][\"Vect\"]= [TfidfVectorizer(min_df=3, strip_accents='unicode', token_pattern=r'\\w{1,}', ngram_range=(1, 3), smooth_idf=25000, sublinear_tf=1)]\nmodel1[\"K\"][\"Vect\"]= [CountVectorizer(ngram_range=(1, 2), min_df=20)]\nmodel1[\"S\"][\"Clf\"] = [linear_model.SGDRegressor(alpha=0.00008, n_iter= 1000)]\nmodel1[\"W\"][\"Clf\"] = [linear_model.SGDRegressor(alpha=0.00008, n_iter= 1000)]\nmodel1[\"K\"][\"Clf\"] = [linear_model.SGDRegressor(alpha=0.00008, n_iter= 1000)]\nmodel1[\"T\"][\"Vect\"] = [CountVectorizer(ngram_range=(1,3), max_features=200000),\n TfidfVectorizer(max_features=200000, ngram_range=(1,3),\n smooth_idf=12000, use_idf=True),\n CountVectorizer(ngram_range=(4,5), max_features=200000,analyzer=\"char\"),\n CountVectorizer(max_features=1000, ngram_range=(1,3)),\n CountVectorizer(max_features=1000, ngram_range=(3,7), analyzer=\"char\"),\n TfidfVectorizer(max_features=1000, ngram_range=(1,2),\n smooth_idf=12000, use_idf=True),\n TfidfVectorizer(max_features=1000, ngram_range=(3,7), analyzer=\"char\",\n smooth_idf=12000, use_idf=True)]\nmodel1[\"T\"][\"Clf\"] = [linear_model.Ridge(alpha=30),\n linear_model.Ridge(alpha=1.175),\n linear_model.Ridge(alpha=120),\n ExtraTreesRegressor(n_estimators=40, max_features=33,\n min_samples_split=50, n_jobs=4),\n ExtraTreesRegressor(n_estimators=40, max_features=33,\n min_samples_split=50, n_jobs=4),\n ExtraTreesRegressor(n_estimators=40, max_features=33,\n min_samples_split=50, n_jobs=4),\n ExtraTreesRegressor(n_estimators=40, max_features=33,\n min_samples_split=50, n_jobs=4)]\n \ntrainpreds = PredictModels(model1, False)\npandas.DataFrame(trainpreds[0]).to_csv(\"stack3model1X.txt\", index=False)\ndel trainpreds\ntestpreds = PredictModels(model1, True)\npandas.DataFrame(testpreds).to_csv(\"stack3model1XTest.txt\", index=False)\n \n \nmodel2 = dict()\nmodel2[\"S\"] = dict()\nmodel2[\"W\"] = dict()\nmodel2[\"K\"] = dict()\nmodel2[\"T\"] = dict()\nmodel2[\"S\"][\"Vect\"] = []\nmodel2[\"W\"][\"Vect\"]= []\nmodel2[\"K\"][\"Vect\"]= []\nmodel2[\"S\"][\"Clf\"] = []\nmodel2[\"W\"][\"Clf\"] = []\nmodel2[\"K\"][\"Clf\"] = []\nmodel2[\"T\"][\"Vect\"] = [CountVectorizer(max_features=200000, tokenizer=LemmaTokenizer(),\n ngram_range=(1,3)),\n CountVectorizer(max_features=200000, tokenizer=StemTokenizer(),\n ngram_range=(1,3)),\n HashingVectorizer(lowercase=False, binary=True),\n HashingVectorizer(lowercase=False, binary=False),\n TfidfVectorizer(max_features=200000, ngram_range=(1,2),\n smooth_idf=12000),\n TfidfVectorizer(max_features=1000, ngram_range=(1,2), smooth_idf=12000),\n TfidfVectorizer(max_features=1000, ngram_range=(1,2), smooth_idf=12000),\n TfidfVectorizer(max_features=1000, ngram_range=(1,2), smooth_idf=12000),\n CountVectorizer(max_features=1000, ngram_range=(1,3))]\nmodel2[\"T\"][\"Clf\"] = [linear_model.Ridge(alpha=31),\n linear_model.Ridge(alpha=31),\n linear_model.Ridge(alpha=1.175),\n linear_model.Ridge(alpha=1.175),\n linear_model.Ridge(alpha=31),\n ExtraTreesRegressor(n_estimators=40, max_features=10,\n min_samples_split=50, n_jobs=4),\n ExtraTreesRegressor(n_estimators=40, max_features=20,\n min_samples_split=50, n_jobs=4),\n ExtraTreesRegressor(n_estimators=40, max_features=\"sqrt\",\n min_samples_split=25, n_jobs=4),\n ExtraTreesRegressor(n_estimators=40, max_features=\"sqrt\",\n min_samples_split=50, n_jobs=4)]\n \n \ntrainpreds = PredictModels(model2, False)\ntestpreds = PredictModels(model2, True)\n \npandas.DataFrame(trainpreds[0]).to_csv(\"stack3model2X.txt\", index=False)\npandas.DataFrame(testpreds).to_csv(\"stack3model2XTest.txt\", index=False)\n \n \nmodel3 = dict()\nmodel3[\"S\"] = dict()\nmodel3[\"W\"] = dict()\nmodel3[\"K\"] = dict()\nmodel3[\"T\"] = dict()\nmodel3[\"S\"][\"Vect\"] = [TfidfVectorizer(min_df=1, strip_accents='unicode',\n max_features = 500000, token_pattern=r'\\w{1,}',\n ngram_range=(1, 3), smooth_idf=25000, sublinear_tf=1),\n CountVectorizer(ngram_range=(1,3)),\n TfidfVectorizer(max_features=200000, ngram_range=(1,2),\n smooth_idf=12000)]\nmodel3[\"W\"][\"Vect\"]= [TfidfVectorizer(min_df=1, strip_accents='unicode',\n max_features = 500000, token_pattern=r'\\w{1,}',\n ngram_range=(1, 3), smooth_idf=25000, sublinear_tf=1),\n CountVectorizer(ngram_range=(1,4), max_features=900000),\n TfidfVectorizer(max_features=200000, ngram_range=(1,2),\n smooth_idf=12000)]\nmodel3[\"K\"][\"Vect\"]= [TfidfVectorizer(min_df=1, strip_accents='unicode',\n max_features = 500000, token_pattern=r'\\w{1,}',\n ngram_range=(1, 4), smooth_idf=25000, sublinear_tf=1),\n CountVectorizer(ngram_range=(1,2), min_df=20),\n TfidfVectorizer(max_features=200000, ngram_range=(1,2),\n smooth_idf=12000)]\nmodel3[\"S\"][\"Clf\"] = [linear_model.Ridge(alpha=0.6),\n linear_model.Ridge(alpha=10),\n linear_model.Ridge(alpha=1.075)]\nmodel3[\"W\"][\"Clf\"] = [linear_model.Ridge(alpha=1.175),\n linear_model.Ridge(alpha=30),\n linear_model.Ridge(alpha=1.3)]\nmodel3[\"K\"][\"Clf\"] = [linear_model.Ridge(alpha=0.9),\n linear_model.Ridge(alpha=17),\n linear_model.Ridge(alpha=1.075)]\nmodel3[\"T\"][\"Vect\"] = [CountVectorizer(ngram_range=(1, 4))]\nmodel3[\"T\"][\"Clf\"] = [linear_model.SGDRegressor(loss='squared_loss', alpha=0.00001,\n n_iter= 1000)] \n \ntrainpreds = PredictModels(model3, False)\ntestpreds = PredictModels(model3, True)\n \npandas.DataFrame(trainpreds[0]).to_csv(\"stack3model3X.txt\", index=False)\npandas.DataFrame(testpreds).to_csv(\"stack3model3XTest.txt\", index=False)\npandas.DataFrame(trainpreds[1]).to_csv(\"stack3yAllModels.txt\", index=False)\n","sub_path":"Stack_algo.py","file_name":"Stack_algo.py","file_ext":"py","file_size_in_byte":11659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"303171866","text":"from enum import Enum\nimport wave\nimport itertools\nimport subprocess\n\nclass LetterKind(Enum):\n VOWEL = 0\n CONSONANT = 1\n COMPOUND = 2\n LEGACY = 3\n\nletters = {\n 0: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"a\", \"A\", \"A\" ] },\n 1: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"á\", \"Á\", \"Á\" ] },\n 8: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"e\", \"E\", \"E\" ] },\n 9: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"é\", \"É\", \"É\" ] },\n 14: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"i\", \"I\", \"I\" ] },\n 15: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"í\", \"Í\", \"Í\" ] },\n 23: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"o\", \"O\", \"O\" ] },\n 24: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"ó\", \"Ó\", \"Ó\" ] },\n 25: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"ö\", \"Ö\", \"Ö\" ] },\n 26: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"ő\", \"Ő\", \"Ő\" ] },\n 33: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"u\", \"U\", \"U\" ] },\n 34: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"ú\", \"Ú\", \"Ú\" ] },\n 35: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"ü\", \"Ü\", \"Ü\" ] },\n 36: { \"kind\": LetterKind.VOWEL, \"symbol\": [\"ű\", \"Ű\", \"Ű\" ] },\n 2: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"b\", \"B\", \"B\" ] },\n 3: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"c\", \"C\", \"C\" ] },\n 5: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"d\", \"D\", \"D\" ] },\n 10: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"f\", \"F\", \"F\" ] },\n 11: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"g\", \"G\", \"G\" ] },\n 13: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"h\", \"H\", \"H\" ] },\n 16: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"j\", \"J\", \"J\" ] },\n 17: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"k\", \"K\", \"K\" ] },\n 18: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"l\", \"L\", \"L\" ] },\n 20: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"m\", \"M\", \"M\" ] },\n 21: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"n\", \"N\", \"N\" ] },\n 27: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"p\", \"P\", \"P\" ] },\n 28: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"r\", \"R\", \"R\" ] },\n 29: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"s\", \"S\", \"S\" ] },\n 31: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"t\", \"T\", \"T\" ] },\n 37: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"v\", \"V\", \"V\" ] },\n 38: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"z\", \"Z\", \"Z\" ] },\n 4: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"cs\", \"Cs\", \"CS\" ], \"double_symbol\": [\"ccs\", \"Ccs\", \"CCS\" ] },\n 6: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"dz\", \"Dz\", \"DZ\" ], \"double_symbol\": [\"ddz\", \"Ddz\", \"DDZ\" ] },\n 7: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"dzs\", \"Dzs\", \"DZS\"], \"double_symbol\": [\"ddzs\", \"Ddzs\", \"DDZS\"] },\n 12: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"gy\", \"Gy\", \"GY\" ], \"double_symbol\": [\"ggy\", \"Ggy\", \"GGY\" ] },\n 19: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"ly\", \"Ly\", \"LY\" ], \"double_symbol\": [\"lly\", \"Lly\", \"LLY\" ] },\n 22: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"ny\", \"Ny\", \"NY\" ], \"double_symbol\": [\"nny\", \"Nny\", \"NNY\" ] },\n 30: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"sz\", \"Sz\", \"SZ\" ], \"double_symbol\": [\"ssz\", \"Ssz\", \"SSZ\" ] },\n 32: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"ty\", \"Ty\", \"TY\" ], \"double_symbol\": [\"tty\", \"Tty\", \"TTY\" ] },\n 39: { \"kind\": LetterKind.CONSONANT, \"symbol\": [\"zs\", \"Zs\", \"ZS\" ], \"double_symbol\": [\"zzs\", \"Zzs\", \"ZZS\" ] },\n 100: { \"kind\": LetterKind.COMPOUND, \"symbol\": [\"q\", \"Q\", \"Q\" ], \"same_as\": [17, 37] }, # k v\n 101: { \"kind\": LetterKind.COMPOUND, \"symbol\": [\"x\", \"X\", \"X\" ], \"same_as\": [17, 30] }, # k sz\n 200: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"th\", \"Th\", \"TH\" ], \"same_as\": 31 }, # h\n 201: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"cz\", \"Cz\", \"CZ\" ], \"same_as\": 3 }, # c\n 202: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"ch\", \"Ch\", \"CH\" ], \"same_as\": 4 }, # cs\n 203: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"ch\", \"Ch\", \"CH\" ], \"same_as\": 17 }, # k\n 204: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"ch\", \"Ch\", \"CH\" ], \"same_as\": 13 }, # h\n 205: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"y\", \"Y\", \"Y\" ], \"same_as\": 14 }, # i\n 206: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"ew\", \"Ew\", \"EW\" ], \"same_as\": 25 }, # ö\n 207: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"eö\", \"Eö\", \"EÖ\" ], \"same_as\": 25 }, # ö\n 208: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"ph\", \"Ph\", \"PH\" ], \"same_as\": 10 }, # f\n 209: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"sch\", \"Sch\", \"SCH\"], \"same_as\": 29 }, # s\n 210: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"gh\", \"Gh\", \"GH\" ], \"same_as\": 11 }, # g\n 211: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"w\", \"W\", \"W\" ], \"same_as\": 37 }, # v\n #212: { \"kind\": LetterKind.LEGACY, \"symbol\": [\"ts\", \"Ts\", \"TS\" ], \"same_as\": 4 }, # Causes troubles with assimilations\n}\n\nassimilations = {\n 1: { \"from\": [31, 31, 29], \"to\": [ 4, 4] }, # t t s -> cs cs\n 2: { \"from\": [31, 29, 5], \"to\": [ 4, 5] }, # t s d -> cs d\n 3: { \"from\": [ 5, 30], \"to\": [ 3, 3] }, # d sz -> c c\n 4: { \"from\": [12, 16], \"to\": [12, 12] }, # gy j -> gy gy\n 5: { \"from\": [32, 16], \"to\": [32, 32] }, # ty j -> ty ty\n 6: { \"from\": [31, 16], \"to\": [32, 32] }, # t j -> ty ty\n 7: { \"from\": [ 5, 16], \"to\": [12, 12] }, # d j -> gy gy\n 8: { \"from\": [30, 29], \"to\": [29, 29] }, # sz s -> s s\n 9: { \"from\": [31, 3], \"to\": [ 3, 3] }, # t c -> c c\n 10: { \"from\": [31, 29], \"to\": [ 4, 4 ] }, # t s -> cs cs\n 11: { \"from\": [ 2, 13], \"to\": [27, 13] }, # b h -> p h\n 12: { \"from\": [38, 31], \"to\": [30, 31] }, # z t -> sz t\n 13: { \"from\": [21, 2], \"to\": [20, 2] }, # n b -> m b\n 14: { \"from\": [ 5, 3], \"to\": [ 3, 3] }, # d c -> c c\n 15: { \"from\": [18, 16], \"to\": [16, 16] }, # l j -> j j\n 16: { \"from\": [31, 3], \"to\": [ 3, 3] }, # t c -> c c\n 17: { \"from\": [12, 4], \"to\": [ 4, 4] }, # gy cs -> cs cs\n 18: { \"from\": [30, 39], \"to\": [39, 39] }, # sz zs -> zs zs\n 19: { \"from\": [18, 28], \"to\": [28, 28] }, # l r -> r r\n 20: { \"from\": [21, 20], \"to\": [20, 20] }, # n m -> m m\n 21: { \"from\": [38, 29], \"to\": [29, 29] }, # z s -> s s\n 22: { \"from\": [ 5, 29], \"to\": [ 4, 4] }, # d s -> cs cs\n 23: { \"from\": [ 5, 31], \"to\": [31, 31] }, # d t -> t t\n 24: { \"from\": [38, 27], \"to\": [30, 27] }, # z p -> sz p\n 25: { \"from\": [31, 30], \"to\": [ 3, 3] }, # t sz -> c c\n 26: { \"from\": [22, 16], \"to\": [22, 22] }, # ny j -> ny ny\n 27: { \"from\": [12, 29], \"to\": [ 4, 4] }, # gy s -> cs cs\n 28: { \"from\": [11, 31], \"to\": [17, 31] }, # g t -> k t\n 29: { \"from\": [ 3, 30], \"to\": [ 3, 3] }, # c sz -> c c\n}\n\n# TODO: Consonant + Vowel pairs\nvoice_samples = {\n 0: { \"wavefile\": \"a.wav\", \"letters\": [ 0] }, # a\n 1: { \"wavefile\": \"aacute.wav\", \"letters\": [ 1] }, # á\n 2: { \"wavefile\": \"b.wav\", \"letters\": [ 2] }, # b\n 3: { \"wavefile\": \"c.wav\", \"letters\": [ 3] }, # c\n 4: { \"wavefile\": \"cs.wav\", \"letters\": [ 4] }, # cs\n 5: { \"wavefile\": \"d.wav\", \"letters\": [ 5] }, # d\n 6: { \"wavefile\": \"dz.wav\", \"letters\": [ 6] }, # dz\n 7: { \"wavefile\": \"dzs.wav\", \"letters\": [ 7] }, # dzs\n 8: { \"wavefile\": \"e.wav\", \"letters\": [ 8] }, # e\n 9: { \"wavefile\": \"eacute.wav\", \"letters\": [ 9] }, # é\n 10: { \"wavefile\": \"f.wav\", \"letters\": [10] }, # f\n 11: { \"wavefile\": \"g.wav\", \"letters\": [11] }, # g\n 12: { \"wavefile\": \"gy.wav\", \"letters\": [12] }, # gy\n 13: { \"wavefile\": \"h.wav\", \"letters\": [13] }, # h\n 14: { \"wavefile\": \"i.wav\", \"letters\": [14] }, # i\n 15: { \"wavefile\": \"iacute.wav\", \"letters\": [15] }, # í\n 16: { \"wavefile\": \"j.wav\", \"letters\": [16] }, # j\n 17: { \"wavefile\": \"k.wav\", \"letters\": [17] }, # k\n 18: { \"wavefile\": \"l.wav\", \"letters\": [18] }, # l\n 19: { \"wavefile\": \"ly.wav\", \"letters\": [19] }, # ly\n 20: { \"wavefile\": \"m.wav\", \"letters\": [20] }, # m\n 21: { \"wavefile\": \"n.wav\", \"letters\": [21] }, # n\n 22: { \"wavefile\": \"ny.wav\", \"letters\": [22] }, # ny\n 23: { \"wavefile\": \"o.wav\", \"letters\": [23] }, # o\n 24: { \"wavefile\": \"oacute.wav\", \"letters\": [24] }, # ó\n 25: { \"wavefile\": \"oumlaut.wav\", \"letters\": [25] }, # ö\n 26: { \"wavefile\": \"odoubleacute.wav\", \"letters\": [26] }, # ő\n 27: { \"wavefile\": \"p.wav\", \"letters\": [27] }, # p\n 28: { \"wavefile\": \"r.wav\", \"letters\": [28] }, # r\n 29: { \"wavefile\": \"s.wav\", \"letters\": [29] }, # s\n 30: { \"wavefile\": \"sz.wav\", \"letters\": [30] }, # sz\n 31: { \"wavefile\": \"t.wav\", \"letters\": [31] }, # t\n 32: { \"wavefile\": \"ty.wav\", \"letters\": [32] }, # ty\n 33: { \"wavefile\": \"u.wav\", \"letters\": [33] }, # u\n 34: { \"wavefile\": \"uacute.wav\", \"letters\": [34] }, # ú\n 35: { \"wavefile\": \"uumlaut.wav\", \"letters\": [35] }, # ü\n 36: { \"wavefile\": \"udoubleacute.wav\", \"letters\": [36] }, # ű\n 37: { \"wavefile\": \"v.wav\", \"letters\": [37] }, # v\n 38: { \"wavefile\": \"z.wav\", \"letters\": [38] }, # z\n 39: { \"wavefile\": \"zs.wav\", \"letters\": [39] }, # zs\n #1: { \"wavefile\": \"l_a.wav\", \"letters\": [18, 0] }, # l a\n #2: { \"wavefile\": \"b_eacute.wav\", \"letters\": [ 2, 9] }, # b é\n}\n\ndef tokenize(string):\n tokens = []\n ok = True\n\n while ok:\n matches = []\n\n # Match single letters\n for key, letter in letters.items():\n # Lowercase\n symbol = letter[\"symbol\"][0]\n if string[:len(symbol)] == symbol:\n matches.append((\"single\", key, len(symbol)));\n # Uppercase\n symbol = letter[\"symbol\"][1]\n if string[:len(symbol)] == symbol:\n matches.append((\"single\", key, len(symbol)));\n\n # Match double multigraphs\n for key, letter in letters.items():\n if \"double_symbol\" in letter:\n # Lowercase\n symbol = letter[\"double_symbol\"][0]\n if string[:len(symbol)] == symbol:\n matches.append((\"double\", key, len(symbol)));\n # Uppercase\n symbol = letter[\"double_symbol\"][1]\n if string[:len(symbol)] == symbol:\n matches.append((\"double\", key, len(symbol)));\n\n if len(matches) == 1:\n if matches[0][0] == \"single\":\n tokens.append(matches[0][1])\n elif matches[0][0] == \"double\":\n tokens.append(matches[0][1])\n tokens.append(matches[0][1])\n string = string[matches[0][2]:]\n ok = string != \"\"\n else:\n ok = False\n\n if string == \"\":\n return [tokens]\n elif len(matches) == 0:\n return []\n else:\n result_list = []\n for match_type, match_key, match_len in matches:\n for t in tokenize(string[match_len:]):\n new_tokens = tokens[:]\n if match_type == \"single\":\n new_tokens.append(match_key)\n elif match_type == \"double\":\n new_tokens.append(match_key)\n new_tokens.append(match_key)\n new_tokens.extend(t)\n result_list.append(new_tokens)\n return result_list\n\ndef modernize(tokens):\n def get_modern(key):\n if \"same_as\" in letters[key]:\n return letters[key][\"same_as\"]\n else:\n return key\n return list(map(lambda k: get_modern(k), tokens))\n\ndef unwrap_compounds(tokens):\n result = []\n for key in tokens:\n if letters[key][\"kind\"] == LetterKind.COMPOUND:\n result.extend(letters[key][\"same_as\"])\n else:\n result.append(key)\n return result\n\ndef process_assimilations(tokens):\n result = []\n while len(tokens) > 0:\n macthed_ass = None\n for key in assimilations:\n if tokens[:len(assimilations[key][\"from\"])] == assimilations[key][\"from\"]:\n macthed_ass = key\n break\n if macthed_ass is not None:\n result.extend(assimilations[macthed_ass][\"to\"])\n tokens = tokens[len(assimilations[macthed_ass][\"from\"]):]\n else:\n result.append(tokens[0])\n tokens = tokens[1:]\n\n return result\n\ndef get_best_one(tokens):\n scores = []\n for i in range(0, len(tokens)):\n scores.append({\n \"token\": tokens[i],\n \"score\": 0,\n });\n\n best_to_have = [\n # Ambigous \"ch\"\n [31, 8, 203], # t-e-ch\n [27, 30, 14, 204], # p-sz-i-ch\n # Settlement prefixes ending in \"s\"\n [0, 12, 0, 11, 23, 29], # a-gy-a-g-o-s\n [2, 9, 17, 9, 29], # b-é-k-é-s\n [11, 23, 20, 2, 23, 29], # g-o-m-b-o-s\n [17, 0, 27, 23, 29], # k-a-p-o-s\n [17, 8, 20, 8, 21, 8, 29], # k-e-m-e-n-e-s\n [17, 14, 29], # k-i-s\n [21, 8, 20, 8, 29], # n-e-m-e-s\n [28, 1, 17, 23, 29], # r-á-k-o-s\n # Other settlement thing\n [13, 8, 19], # h-e-ly\n # Other things\n [17, 25, 38, 29, 9, 11], # k-ö-z-s-é-g\n [28, 23, 30, 30], # r-o-sz-sz\n ]\n\n for i in range(0, len(scores)):\n for o in range(0, len(scores[i][\"token\"])):\n for b in best_to_have:\n if scores[i][\"token\"][o:o+len(b)] == b:\n scores[i][\"score\"] += 1\n # Double letters\n #if len(letters[scores[i][\"token\"][o]][\"symbol\"][0]) > 1:\n # scores[i][\"score\"] += 1\n scores = sorted(scores, key=lambda m: (-m[\"score\"], len(m[\"token\"])))\n\n return scores[0][\"token\"]\n\ndef remove_consecutive_duplicates(tokens):\n return [k for k, g in itertools.groupby(tokens)]\n\ndef load_waves(voice_name):\n for key in voice_samples:\n filename = voice_name + \"/\" + voice_samples[key][\"wavefile\"]\n w = wave.open(filename, \"rb\")\n voice_samples[key][\"sampledata\"] = w.readframes(w.getnframes())\n w.close()\n\ndef write_voice(output, tokens):\n while len(tokens) > 0:\n macthed_sample = None\n for key in voice_samples:\n if tokens[:len(voice_samples[key][\"letters\"])] == voice_samples[key][\"letters\"]:\n macthed_sample = key\n break\n if macthed_sample is not None:\n output.writeframes(voice_samples[key][\"sampledata\"])\n tokens = tokens[len(voice_samples[key][\"letters\"]):]\n else:\n raise Exception(\"No sample found\")\n\n pass\n\n#test_sentence = \"Egész sokat dicsért technológiai haladásunk olyan mint fejsze egy beteges bűnöző kezében\"\n#test_sentence = \"A község a közigazgatási rendszer területi és szervezeti alapegysége a legtöbb európai és számos Európán kívüli országban\"\n#test_sentence = \"község vanmár balra vadászzsákmány hegycsúcs utca aljas Thewrewk Kemenesszentmárton egészségügyi átcipel hagyjátok bátyja gyógyítja mondjuk tanítsa bolondság hányja lábhoz tűztem különbség nádcukor központ\"\n#test_sentence = \"1 12 123 1234 12345 123456 1234567 12345678 123456789\"\n#test_sentence = \"kemenesszentmárton horváth thewrewk község technológia madách széchenyi szombathely pszichológia\"\n#test_sentence = \"Megszentségteleníthetetlenségeskedéseitekért\"\n#test_sentence = \"árvíztűrő tükörfúrógép\"\ntest_sentence = \"Hát lehetne eggyel jobb de nem kegyetlenül kurva rossz hanem aránylag szar viszon még kéne bele egypár dolog\"\n#test_sentence = \"A bajkonuri űrrepülőtér Kazahsztánban Bajkonur város mellett található rakétaindító hely amelynek hatezer négyzetkilométeres körzetét ezerkilencszázkilencvenötben húsz évre Oroszország vette bérbe\"\n#test_sentence = \"Te hollóalkatú Voldemort képű Margit híd\"\n#test_sentence = \"Sokkal könnyebb hidat verni a Dunán mintsem a Marczal posványságán\"\n\n#test_sentence = \"ól sztár báj szmesh máuf\"\n#test_sentence = \"számbádi vánsz told mí dö vörd iz gona roll mí\"\n#test_sentence = \"áj éjnt dö shárpeszt túl in dö shed\"\n#test_sentence = \"sí voz lúking kájnd o dámb vit hör fingör end hör támb\"\n#test_sentence = \"in dö sép of ön el on hör fórhed\"\n\n\nload_waves(\"voice_tibor\")\noutput = wave.open(\"out.wav\", \"wb\")\noutput.setparams((1, 2, 44100, 0, \"NONE\", \"not compressed\"))\nfor word in test_sentence.split(\" \"):\n #word = resolve_numbers(word) # e.g.: 123 -> százhuszonhárom # before tokenization\n tokens = tokenize(word)\n #print(tokens)\n\n t = get_best_one(tokens)\n t = unwrap_compounds(t)\n t = modernize(t)\n t = process_assimilations(t)\n t = remove_consecutive_duplicates(t)\n print(\"-\".join(list(map(lambda x: letters[x][\"symbol\"][0], t))))\n write_voice(output, t)\n\noutput.close()\nsubprocess.Popen([\"vlc\", \"out.wav\"])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11257219","text":"#=========================================================================\n# MinMax Unit Tests\n#=========================================================================\n\nfrom pymtl import *\nfrom pclib.test import TestVectorSimulator\n\nfrom MinMax import MinMax\n\n#-------------------------------------------------------------------------\n# Test basics\n#-------------------------------------------------------------------------\n\ndef test_basics( dump_vcd ):\n\n # Test vectors\n\n test_vectors = [\n # -- in -- -- out --\n [ 4, 3, 3, 4 ],\n [ 9, 6, 6, 9 ],\n [ 12, 16, 12, 16 ],\n [ 12, 16, 12, 16 ],\n ]\n\n # Instantiate and elaborate the model\n\n model = MinMax()\n model.vcd_file = dump_vcd\n model.elaborate()\n\n # Function to set the inputs on the model\n\n def tv_in( model, test_vector ):\n model.in0.value = test_vector[0]\n model.in1.value = test_vector[1]\n\n # Function to verify the outputs from the model\n\n def tv_out( model, test_vector ):\n if test_vector[2] != '?':\n assert model.min.value == test_vector[2]\n if test_vector[3] != '?':\n assert model.max.value == test_vector[3]\n\n # Create and run the test simulation\n\n sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )\n sim.run_test()\n\n","sub_path":"examples/sorter/MinMax_test.py","file_name":"MinMax_test.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"230173579","text":"\nimport numpy as np\nfrom gym import utils\nimport os\n\n\nimport pybullet\nimport pybullet_data\n\nfrom pybullet_envs.gym_locomotion_envs import WalkerBaseBulletEnv, Ant\nfrom pybullet_envs.robot_locomotors import WalkerBase\nfrom pybullet_envs.scene_abstract import Scene\n\nfrom robot_bases import BodyPart\n\nimport pdb\n\n# with 1/120 max speed at contact is 5 for E-W\nTIME_STEP_FIXED = 1/60 #1/120 # 0.0165\nFRAME_SKIP = 4 # 4\n\n_VEL_THRSH = .05\n_REW_THRSH = 0.2\n_EPS = 1e-06\n\n\n\n\ndef get_cube(_p, x, y, z):\n body = _p.loadURDF(os.path.join(os.path.join(os.path.dirname(__file__)), \n \"assets/wall.urdf\"), [x, y, z])\n _p.changeDynamics(body, -1, mass=1.2) #match Roboschool\n part_name, _ = _p.getBodyInfo(body)\n part_name = part_name.decode(\"utf8\")\n bodies = [body]\n return BodyPart(_p, part_name, bodies, 0, -1)\n\n\ndef get_sphere(_p, x, y, z):\n body = _p.loadURDF(os.path.join(os.path.join(os.path.dirname(__file__)), \n \"assets/ball_blue.urdf\"), [x, y, z])\n part_name, _ = _p.getBodyInfo(body)\n part_name = part_name.decode(\"utf8\")\n bodies = [body]\n return BodyPart(_p, part_name, bodies, 0, -1)\n\n\n\nclass TopCamera:\n \"\"\" Overwriting the visualisation angle to make it birds-eye view \"\"\"\n def __init__(self, env):\n self.env = env\n pass\n\n def move_and_look_at(self, i, j, k, x, y, z):\n # lookat = [x, y, z]\n lookat = self.env.camera_info['lookat']\n distance = self.env.camera_info['camera']['distance']-4.5\n yaw = self.env.camera_info['camera']['yaw']\n pitch = self.env.camera_info['camera']['pitch']\n # distance, yaw, pitch = 3, -90., -45.\n self.env._p.resetDebugVisualizerCamera(distance, yaw, pitch, lookat)\n\n\n\n\nclass BoundedStadiumScene(Scene):\n \"\"\" \n Custom-made playing field with walls and no reflection.\n \"\"\"\n zero_at_running_strip_start_line = True # if False, center of coordinates (0,0,0) will be at the middle of the stadium\n stadium_halflen = 105 * 0.25 # FOOBALL_FIELD_HALFLEN\n stadium_halfwidth = 50 * 0.25 # FOOBALL_FIELD_HALFWID\n stadiumLoaded = 0\n multiplayer = False\n\n def __init__(self, ball_pos, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ball_pos = ball_pos\n\n def episode_restart(self, bullet_client):\n self._p = bullet_client\n Scene.episode_restart(self, bullet_client) # contains cpp_world.clean_everything()\n\n if (self.stadiumLoaded == 0):\n self.stadiumLoaded = 1\n # Add stadium with walls\n filename = os.path.join(os.path.dirname(__file__), \n \"assets/plane_bounded.sdf\")\n self.ground_plane_mjcf = self._p.loadSDF(filename)\n self._p.changeDynamics(0, -1, lateralFriction=.8, restitution=0.5, rollingFriction=0.005)\n # self._p.changeVisualShape(i, -1, rgbaColor=[1, 1, 1, 1])\n # self._p.configureDebugVisualizer(pybullet.COV_ENABLE_PLANAR_REFLECTION,i)\n for i in range(1, len(self.ground_plane_mjcf)):\n self._p.changeDynamics(i, -1, \n # lateralFriction=100, \n # linearDamping=100,\n # rollingFriction=0.1,\n # spinningFriction=0.03,\n restitution=1)\n # Add ball\n filename = os.path.join(os.path.dirname(__file__), \n \"assets/ball_blue.urdf\")\n ball_body = self._p.loadURDF(filename, self.ball_pos)\n self._p.changeDynamics(ball_body, -1, restitution=1, mass=2.5)#,rollingFriction=0.001)\n # Add Obstacle to scene\n # self.obstacle = get_cube(self._p, 3.25, 0, 0.25)\n self.ground_plane_mjcf += (ball_body, )\n # # Update bouncyness\n # for i in range(0, len(self.ground_plane_mjcf)):\n # print(\"===\", self._p.getDynamicsInfo(i, -1))\n\n\n\nclass NormalScene(Scene):\n \"\"\" \n Custom-made playing field with walls and no reflection.\n \"\"\"\n zero_at_running_strip_start_line = True # if False, center of coordinates (0,0,0) will be at the middle of the stadium\n stadium_halflen = 105 * 0.25 # FOOBALL_FIELD_HALFLEN\n stadium_halfwidth = 50 * 0.25 # FOOBALL_FIELD_HALFWID\n stadiumLoaded = 0\n multiplayer = False\n\n def __init__(self, ball_pos, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ball_pos = ball_pos\n\n def episode_restart(self, bullet_client):\n self._p = bullet_client\n Scene.episode_restart(self, bullet_client) # contains cpp_world.clean_everything()\n\n if (self.stadiumLoaded == 0):\n self.stadiumLoaded = 1\n # Add stadium floor\n filename = os.path.join(os.path.dirname(__file__), \n \"assets/plane_normal.sdf\")\n self.ground_plane_mjcf = self._p.loadSDF(filename)\n self._p.changeDynamics(0, -1, lateralFriction=.8, restitution=0.5, \n rollingFriction=0.005)\n # self._p.changeVisualShape(i, -1, rgbaColor=[1, 1, 1, 1])\n # self._p.configureDebugVisualizer(pybullet.COV_ENABLE_PLANAR_REFLECTION,i)\n # Add ball\n filename = os.path.join(os.path.dirname(__file__), \n \"assets/ball_blue.urdf\")\n ball_body = self._p.loadURDF(filename, self.ball_pos)\n self._p.changeDynamics(ball_body, -1, restitution=1, mass=5.)#,rollingFriction=0.001)\n # Add Obstacle to scene\n # self.obstacle = get_cube(self._p, 3.25, 0, 0.25)\n self.ground_plane_mjcf += (ball_body, )\n # # Update bouncyness\n # for i in range(0, len(self.ground_plane_mjcf)):\n # print(\"===\", self._p.getDynamicsInfo(i, -1))\n\n\n\nclass Quadruped(Ant):\n \"\"\"\n same as ant added ball repositioning\n \"\"\"\n\n def __init__(self):\n WalkerBase.__init__(self, \"ant.xml\", \"torso\", \n action_dim=8, obs_dim=34, power=2.5)\n self.walk_target_x = 0 \n self.walk_target_y = 0\n self.init_ball_pos = [0, 0, .25]\n self.init_robot_pos = [0, -1.5, .5]\n\n\n def robot_specific_reset(self, bullet_client):\n WalkerBase.robot_specific_reset(self, bullet_client)\n # Robot to initial position\n self.robot_body.reset_position(self.init_robot_pos)\n\n\n def calc_state(self):\n # standard_state = super().calc_state()\n\n j = np.array([j.current_relative_position() \\\n for j in self.ordered_joints], dtype=np.float32).flatten()\n self.joint_speeds = j[1::2]\n self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)\n body_pose = self.robot_body.pose()\n self.body_real_xyz = body_pose.xyz()\n self.body_xyz = body_pose.xyz()\n self.body_rpy = body_pose.rpy()\n z = self.body_xyz[2]\n if self.initial_z == None:\n self.initial_z = z\n r, p, yaw = self.body_rpy\n self.walk_target_theta = np.arctan2(self.walk_target_y-self.body_xyz[1],\n self.walk_target_x-self.body_xyz[0])\n self.walk_target_dist = np.linalg.norm(\n [self.walk_target_y - self.body_xyz[1], \n self.walk_target_x - self.body_xyz[0]])\n angle_to_target = self.walk_target_theta - yaw\n # rotate speed back to body point of view\n rot_speed = np.array([[np.cos(-yaw), -np.sin(-yaw), 0], \n [np.sin(-yaw), np.cos(-yaw), 0], [0, 0, 1]])\n vx, vy, vz = np.dot(rot_speed, self.robot_body.speed()) \n\n more = np.array(\n [\n z - self.initial_z,\n np.sin(angle_to_target),\n np.cos(angle_to_target),\n 0.3 * vx,\n 0.3 * vy,\n 0.3 * vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here\n r,\n p\n ],\n dtype=np.float32)\n\n standard_state = np.clip(np.concatenate([more] + \\\n [j] + \\\n [self.feet_contact]), -5, +5)\n # Add Ball position and velocity\n if 'ball_blue' in self.parts.keys():\n ball_body = self.parts['ball_blue']\n augmented_state = np.concatenate([standard_state, \n ball_body.get_position()[:2], \n ball_body.speed()[:2]])\n else:\n augmented_state = np.concatenate([standard_state, \n self.init_ball_pos[:2], \n [0, 0]])\n # Add Robot hull position\n augmented_state = np.concatenate([augmented_state, \n self.robot_body.pose().xyz()[:2]])\n\n return augmented_state\n\n\n\nclass QuadrupedKickerBaseEnv(WalkerBaseBulletEnv):\n \"\"\"\n Quadruped Ant agent\n foot_list = ['front_left_foot', 'front_right_foot', 'left_back_foot', 'right_back_foot']\n\n \"\"\"\n MAX_AGENT_STEPS = 100\n\n def __init__(self, render=False):\n self.init = True\n self.init_body = np.zeros(2)\n # self.init_ball_pos = [0.5, -0.5, .25]\n self.init_ball_pos = [0, 0, .25]\n self.init_robot_pos = [0, -1.5, .5]\n\n self.robot = Quadruped()\n WalkerBaseBulletEnv.__init__(self, self.robot, render)\n self.param_ranges = np.vstack([self.action_space.low,\n self.action_space.high]).T\n _offset = 0.25/2\n self.ball_ranges = np.array([[ -6.+_offset, 6.-_offset ],\n [ -3.+_offset, 9-_offset ]]) \n self.env_info = dict(\n num_targets=1,\n num_obstacles=0,\n wall_geoms=[0, 1, 2, 3],\n ball_geom=5,\n target_info= [{'xy': (-0.5, 1.), 'radius': 0.25 }] ,\n striker_ranges=self.param_ranges,\n ball_ranges=self.ball_ranges)\n # self.camera_info = {'camera': {'distance': 10,\n # 'yaw': -0,\n # 'pitch': -69},\n # 'lookat': [0, 0, 0]}\n self.camera_info = {'camera': {'distance': 12, 'yaw': -0, 'pitch': -89},\n 'lookat': [0, 3, 0]}\n self.camera = TopCamera(self)\n self._render_width = 240\n self._render_height = 240\n self.init = False\n\n\n\n def reset(self):\n self.nstep_internal = -1\n self.contact_objects = []\n r = super().reset()\n self.prev_ball_vx = 0\n self.prev_ball_vy = 0\n # self.parts['ball_blue'].reset_velocity(linearVelocity=[10, 10,0]) ##################\n return r\n\n\n def render(self, mode='human', close=False):\n \n if mode == \"human\":\n self.isRender = True\n if self.physicsClientId>=0:\n self.camera_adjust()\n\n if mode != \"rgb_array\":\n return np.array([])\n\n # base_pos = [0, 0, 0]\n # if (hasattr(self, 'robot')):\n # if (hasattr(self.robot, 'body_real_xyz')):\n # base_pos = self.robot.body_real_xyz\n if (self.physicsClientId>=0):\n view_matrix = self._p.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=self.camera_info['lookat'],\n roll=0,\n upAxisIndex=2,\n **self.camera_info['camera'])\n proj_matrix = self._p.computeProjectionMatrixFOV(\n fov=60,\n aspect=float(self._render_width) / self._render_height,\n nearVal=0.1,\n farVal=100.0)\n (_, _, px, _, _) = self._p.getCameraImage(\n width=self._render_width,\n height=self._render_height,\n viewMatrix=view_matrix,\n projectionMatrix=proj_matrix,\n renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)\n self._p.configureDebugVisualizer(\n self._p.COV_ENABLE_SINGLE_STEP_RENDERING, 1)\n else:\n px = np.array([[[255,255,255,255]]*self._render_width]\\\n *self._render_height, dtype=np.uint8)\n rgb_array = np.array(px, dtype=np.uint8)\n rgb_array = np.reshape(np.array(px), \n (self._render_height, self._render_width, -1))\n rgb_array = rgb_array[:, :, :3]\n return rgb_array\n\n\n def _get_info_dict(self, state=None, action=np.zeros(8)):\n ball_position = self.robot.parts['ball_blue'].get_position()[:2]\n hull_position = self.robot.body_xyz\n hull_angles = self.robot.body_rpy\n hull_pose = np.append(hull_position, hull_angles)\n info_dict = dict(position=ball_position,\n position_aux=np.concatenate([hull_position, \n hull_angles,\n action]),\n # position_aux=hull_pose,\n velocity_info = self.robot_body.speed(),\n # final_dist=np.linalg.norm(vec), \n # final_ctrl=np.linalg.norm(action),\n contact_objects=self.contact_objects)\n return info_dict\n\n\n def _get_done(self, action, state):\n # episode is done when the ball stops, or complete miss\n ball_position = self.robot.parts['ball_blue'].get_position()[:2]\n ball_velocity = self.robot.parts['ball_blue'].speed()[:2]\n ball_pos = np.linalg.norm(ball_position) \n ball_vel = np.linalg.norm(ball_velocity)\n strk_vel = np.linalg.norm(self.unwrapped.robot_body.speed()) \\\n + np.linalg.norm(action) \n # Termination conditions\n done = ball_vel<=_VEL_THRSH and ball_pos>_EPS or \\\n ball_vel<=_VEL_THRSH and strk_vel<=_VEL_THRSH\n # and np.isclose(ball_pos, 0., atol=_EPS)\n # print(\"\\n===\", self.nstep_internal, ball_vel, strk_vel, action)\n # print(\"===\", ball_vel<=_VEL_THRSH , ball_pos>_EPS)\n # print(\"===\", strk_vel<=_VEL_THRSH ,ball_vel<=_VEL_THRSH, np.isclose(ball_pos, 0., atol=_EPS))\n # print(\"===\", done)\n return done and not self.init\n\n\n def _get_reward(self, state):\n # Reward vector contains: distances to targets, ball coordinates (x,y)\n # target_coms = [self.get_body_com(n)[:2] \\\n # for n in self.unwrapped.model.body_names \\\n # if 'target' in n]\n # target_dist = [np.linalg.norm(tc - self.get_body_com(\"ball\")[:2]) \\\n # for tc in target_coms]\n hull_pos = self.robot_body.pose().xyz()[:2]\n target_dist = -np.linalg.norm(hull_pos)\n return target_dist #+ [tuple(state[3:5])]\n\n\n\n def initialize(self, seed_task, **kwargs):\n # restart seed\n self.seed(seed_task)\n self.action_space.seed(seed_task)\n # standard reset\n state = self.reset()\n info_dict = self._get_info_dict(state)\n self.init_body = info_dict['position'][0:2]\n return state, info_dict['position'], info_dict['position_aux']\n\n\n def finalize(self, state, traj_aux, **kwargs):\n \"\"\" Define outcome: target index if within range, or -1 if failed \"\"\"\n reward = self._get_reward(state)\n # returns closest target index if within threshold, otherwise -1\n trial_outcome = -1\n # trial_outcome = np.argmin(reward[:-1]) \\\n # if np.sum(reward[-1])>0. and \\\n # np.min(reward[:-1])<=_REW_THRSH else -1\n trial_fitness = -len(np.unique(traj_aux.astype(np.float16), axis=0))\n return np.array([trial_outcome, trial_fitness])\n\n\n def _check_contacts(self, state):\n \"\"\"\n Hack to get proper contacts and ball bounces\n \"\"\"\n ball_x, ball_y = self.robot.parts['ball_blue'].get_position()[:2]\n ball_vx, ball_vy = self.robot.parts['ball_blue'].speed()[:2]\n # check wall vicinity\n wall_W, wall_E = np.isclose(ball_x, self.ball_ranges[0,:], atol=0.3)\n wall_S, wall_N = np.isclose(ball_y, self.ball_ranges[1,:], atol=0.3)\n # check change of direction\n dv_x = np.sign(ball_vx) != np.sign(self.prev_ball_vx)\n dv_y = np.sign(ball_vy) != np.sign(self.prev_ball_vy)\n # evaluate contacts\n contact = np.array([wall_S*dv_y, wall_E*dv_x, wall_N*dv_y, wall_W*dv_x])\n # make a proper bounce, keep 90% of velocity\n if contact.any() and self.nstep_internal>1:\n _, _, vz = self.parts['ball_blue'].speed()\n if dv_x: ball_vx = -0.9*self.prev_ball_vx\n if dv_y: ball_vy = -0.9*self.prev_ball_vy\n self.parts['ball_blue'].reset_velocity(linearVelocity=[ball_vx, \n ball_vy, vz]) \n # print(\"\\n====\", self.nstep_internal, contact, ball_vx, ball_vy)\n # update prev ball_xy\n self.prev_ball_vx = ball_vx\n self.prev_ball_vy = ball_vy\n # return wall indices\n return np.where(contact)[0]+1\n\n\n def step(self, action):\n if self.nstep_internal > self.MAX_AGENT_STEPS: \n action = 0 * action\n self.nstep_internal += 1\n state, rew, done, _ = super().step(action)\n assert len(state)==self.observation_space.shape[0]\n info_dict = self._get_info_dict(state, action)\n done = self._get_done(action, state)\n # # Add wall contacts\n # ball_contacts = []\n # if 'ball_blue' in self.parts.keys():\n # contact_list = self.parts['ball_blue'].contact_list()\n # if len(contact_list):\n # ball_contacts = [cc[2] for cc in contact_list if 'wall' \\\n # in self._p.getBodyInfo(cc[2])[0].decode(\"utf8\")]\n # if len(ball_contacts):\n # self.contact_objects.append(ball_contacts[0])\n # Backup contact estimation\n alt_contact = self._check_contacts(state)\n if len(alt_contact):\n self.contact_objects.append(alt_contact[0])\n return state, rew, done, info_dict\n\n\n\n\nclass AugmentedQuadruped(Ant):\n \"\"\"\n same as ant added ball repositioning\n \"\"\"\n\n def __init__(self, scale):\n WalkerBase.__init__(self, \"ant.xml\", \"torso\", \n action_dim=8, obs_dim=34, power=2.5)\n self.walk_target_x = 0 \n self.walk_target_y = 0\n self.init_ball_pos = [0, 0, .25]\n self.init_robot_pos = [0, -1.5, .5]\n self.SCALE = scale\n\n\n def robot_specific_reset(self, bullet_client):\n WalkerBase.robot_specific_reset(self, bullet_client)\n # Robot to initial position\n self.robot_body.reset_position(self.init_robot_pos)\n\n\n def calc_state(self):\n # standard_state = super().calc_state()\n\n j = np.array([j.current_relative_position() \\\n for j in self.ordered_joints], dtype=np.float32).flatten()\n self.joint_speeds = j[1::2]\n self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)\n body_pose = self.robot_body.pose()\n self.body_real_xyz = body_pose.xyz()\n self.body_xyz = body_pose.xyz()\n self.body_rpy = body_pose.rpy()\n z = self.body_xyz[2]\n if self.initial_z == None:\n self.initial_z = z\n r, p, yaw = self.body_rpy\n self.walk_target_theta = np.arctan2(self.walk_target_y-self.body_xyz[1],\n self.walk_target_x-self.body_xyz[0])\n self.walk_target_dist = np.linalg.norm(\n [self.walk_target_y - self.body_xyz[1], \n self.walk_target_x - self.body_xyz[0]])\n angle_to_target = self.walk_target_theta - yaw\n # rotate speed back to body point of view\n rot_speed = np.array([[np.cos(-yaw), -np.sin(-yaw), 0], \n [np.sin(-yaw), np.cos(-yaw), 0], [0, 0, 1]])\n vx, vy, vz = np.dot(rot_speed, self.robot_body.speed()) \n\n more = np.array(\n [\n z - self.initial_z,\n np.sin(angle_to_target),\n np.cos(angle_to_target),\n 0.3 * vx,\n 0.3 * vy,\n 0.3 * vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here\n r,\n p\n ],\n dtype=np.float32)\n\n standard_state = np.clip(np.concatenate([more] + \\\n [j] + \\\n [self.feet_contact]), -5, +5)\n # Add Ball position and velocity\n if 'ball_blue' in self.parts.keys():\n ball_body = self.parts['ball_blue']\n augmented_state = np.concatenate([\n standard_state, \n self.SCALE * ball_body.get_position()[:2], \n ball_body.speed()[:2]])\n else:\n augmented_state = np.concatenate([\n standard_state, \n self.SCALE * np.array(self.init_ball_pos[:2]), \n [0, 0]])\n # Add Robot hull position\n augmented_state = np.concatenate([\n augmented_state, \n self.SCALE * self.robot_body.pose().xyz()[:2]])\n\n return augmented_state\n\n\n\n\n\nclass QuadrupedKickerEnv(QuadrupedKickerBaseEnv):\n\n def create_single_player_scene(self, bullet_client):\n self.stadium_scene = NormalScene(\n bullet_client=bullet_client,\n ball_pos=self.init_ball_pos,\n gravity=9.8,\n timestep=TIME_STEP_FIXED / FRAME_SKIP,\n frame_skip=FRAME_SKIP)\n return self.stadium_scene\n\n\n\nclass QuadrupedAugmentedKickerEnv(QuadrupedKickerEnv):\n\n def __init__(self, render=False):\n self.init = True\n self.init_body = np.zeros(2)\n # self.init_ball_pos = [0.5, -0.5, .25]\n self.init_ball_pos = [0, 0, .25]\n self.init_robot_pos = [0, -1.5, .5]\n\n self.robot = AugmentedQuadruped(scale=1)\n WalkerBaseBulletEnv.__init__(self, self.robot, render)\n self.param_ranges = np.vstack([self.action_space.low,\n self.action_space.high]).T\n _offset = 0.25/2\n self.ball_ranges = np.array([[ -6.+_offset, 6.-_offset ],\n [ -3.+_offset, 9-_offset ]]) \n self.env_info = dict(\n num_targets=1,\n num_obstacles=0,\n wall_geoms=[0, 1, 2, 3],\n ball_geom=5,\n target_info= [{'xy': (-0.5, 1.), 'radius': 0.25 }] ,\n striker_ranges=self.param_ranges,\n ball_ranges=self.ball_ranges)\n # self.camera_info = {'camera': {'distance': 10,\n # 'yaw': -0,\n # 'pitch': -69},\n # 'lookat': [0, 0, 0]}\n self.camera_info = {'camera': {'distance': 12, 'yaw': -0, 'pitch': -89},\n 'lookat': [0, 3, 0]}\n self.camera = TopCamera(self)\n self._render_width = 240\n self._render_height = 240\n self.init = False\n\n\n\n\nclass QuadrupedKickerAugmentedMixScaleEnv(QuadrupedKickerEnv):\n\n def __init__(self, render=False):\n self.init = True\n self.init_body = np.zeros(2)\n # self.init_ball_pos = [0.5, -0.5, .25]\n self.init_ball_pos = [0, 0, .25]\n self.init_robot_pos = [0, -1.5, .5]\n\n self.robot = AugmentedQuadruped(scale=100)\n WalkerBaseBulletEnv.__init__(self, self.robot, render)\n self.param_ranges = np.vstack([self.action_space.low,\n self.action_space.high]).T\n _offset = 0.25/2\n self.ball_ranges = np.array([[ -6.+_offset, 6.-_offset ],\n [ -3.+_offset, 9-_offset ]]) \n self.env_info = dict(\n num_targets=1,\n num_obstacles=0,\n wall_geoms=[0, 1, 2, 3],\n ball_geom=5,\n target_info= [{'xy': (-0.5, 1.), 'radius': 0.25 }] ,\n striker_ranges=self.param_ranges,\n ball_ranges=self.ball_ranges)\n # self.camera_info = {'camera': {'distance': 10,\n # 'yaw': -0,\n # 'pitch': -69},\n # 'lookat': [0, 0, 0]}\n self.camera_info = {'camera': {'distance': 12, 'yaw': -0, 'pitch': -89},\n 'lookat': [0, 3, 0]}\n self.camera = TopCamera(self)\n self._render_width = 240\n self._render_height = 240\n self.init = False\n\n\n###############################################################################\n###############################################################################\n###############################################################################\n\n\nclass QuadrupedKickerBoundedEnv(QuadrupedKickerBaseEnv):\n\n def create_single_player_scene(self, bullet_client):\n self.stadium_scene = BoundedStadiumScene(\n bullet_client=bullet_client,\n ball_pos=self.init_ball_pos,\n gravity=9.8,\n timestep=TIME_STEP_FIXED / FRAME_SKIP,\n frame_skip=FRAME_SKIP)\n return self.stadium_scene\n","sub_path":"sac/envs/pybullet/env_quadruped_kicker.py","file_name":"env_quadruped_kicker.py","file_ext":"py","file_size_in_byte":26046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"105733780","text":"# -*- coding:utf-8 -*-\n\nfrom flask import Blueprint, request, render_template\nfrom flask_user import current_user, login_required, roles_required\nfrom ..services import cart_service\nfrom ..helpers.flask_helper import json_response\nfrom ..models import ProductItem\n\n\nbp = Blueprint('user_cart', __name__, url_prefix='/carts')\n\n\n@bp.route('/', methods=['GET'])\n@roles_required('user')\ndef cart_page():\n user_id = None\n if current_user.is_authenticated():\n user_id = current_user._get_current_object().id\n cart_items= cart_service.get_items(user_id)\n product_items_with_quantity = [(ProductItem.from_cache_by_id(product_item_id), quantity) for product_item_id, quantity in cart_items]\n return render_template('cart.html', product_items_with_quantity=product_items_with_quantity)\n\n\n@bp.route('/save', methods=['POST'])\ndef save_cart_items():\n user_id = None\n if current_user.is_authenticated():\n user_id = current_user._get_current_object().id\n\n cart_items = request.json\n cart_items = filter(_check_cart_item, cart_items)\n cart_service.save_items(cart_items, user_id=user_id)\n return json_response(success=True)\n\n\n@bp.route('/list')\ndef list_cart_items():\n user_id = None\n if current_user.is_authenticated():\n user_id = current_user._get_current_object().id\n cart_items= cart_service.get_items(user_id)\n return json_response(items=[(ProductItem.from_cache_by_id(product_item_id).__json__(include_keys=['product.name']), quantity) for product_item_id, quantity in cart_items])\n\n\ndef _check_cart_item(cart_item):\n product_item_id = cart_item[0]\n quantity = cart_item[1]\n\n try:\n return int(product_item_id) > 0 and int(quantity) > 0\n except ValueError:\n return False\n","sub_path":"ca/frontend/carts.py","file_name":"carts.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"423677727","text":"#############################################################################\n# File : MixedFileOwnerships.py\n# Package : rpmlint\n# Author : Malte Kraus\n# Purpose : Check for files which have a parent with insecure owner.\n#############################################################################\n\nfrom AbstractCheck import AbstractCheck\nfrom Filter import addDetails, printError\n\n\nclass MixedFileOwnerships(AbstractCheck):\n def __init__(self):\n super().__init__(\"MixedFileOwnerships\")\n\n def check(self, pkg):\n if pkg.isSource():\n return\n\n files = pkg.files()\n for path, info in files.items():\n parent = path.rpartition(\"/\")[0]\n if parent not in files:\n # can't figure out who owns the parent directory if it's part of another RPM :(\n continue\n\n parent_owner = files[parent].user\n\n # root user is trusted\n if info.user != parent_owner and parent_owner not in ('root', '0'):\n printError(pkg, 'file-parent-ownership-mismatch', path, \"owned by\", info.user,\n \"is stored in directory owned by different user\", parent_owner)\n\n\ncheck = MixedFileOwnerships()\n\naddDetails(\"file-parent-ownership-mismatch\",\n \"\"\"A file or directory is stored in a directory owned by another unprivileged user.\n This is a security issue since the owner of the parent directory can replace this\n file/directory with a different one.\"\"\")\n","sub_path":"MixedFileOwnerships.py","file_name":"MixedFileOwnerships.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"439651849","text":"import re\n\nrx_please = re.compile(r'^[P|p]lease')\nrx_thanks = re.compile(r'([T|t]hanks)|([T|t]hank you)(\\.)?$')\n\nu_inp1 = input(\"If you don't say please first, I won't listen.\\n~> \")\nresult1 = rx_please.search(u_inp1)\n\nif result1:\n print(\"Alright, I heard you.\")\nelse:\n print(\"What was that? Sounded like an ungrateful mouse.\")\n \nu_inp2 = input(\"If you don't say thank you last, I'll scream.\\n~> \")\nresult2 = rx_thanks.search(u_inp2)\n\nif result2:\n print(\"Phew, glad you listened.\")\nelse:\n print(\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\")","sub_path":"010-regex-9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"195077282","text":"import numpy as np\nimport os, sys, csv, cv2, time\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib \nfrom skimage.util import view_as_windows\nfrom keras.models import load_model\n\ndef _show_countmaps(pred,true,diff):\n f, ax = plt.subplots(1,3)\n ax1, ax2, ax3 = ax.flatten()\n ax1.imshow(pred)\n ax1.set_title('pred')\n ax2.imshow(true)\n ax2.set_title('true')\n ax3.imshow(diff)\n ax3.set_title('diff')\n plt.show()\n \n \nif __name__ == \"__main__\":\n start_time = time.time() \n \n which_dataset = 'mbm' #'adipocyte' #'vgg'\n file_names = sorted(os.listdir('input/{}/clean_testset'.format(which_dataset)))\n \n models = ['random46.h5','random99.h5','random27.h5']\n models = ['random46.h5']\n models_mae = []\n \n for model_id in models:\n mean_abs_error = []\n model = load_model('checkpoints/{}/{}'.format(which_dataset,model_id))\n patch_size = int(model.inputs[0].shape[1]) \n \n for i,filename in enumerate(file_names):\n print()\n print('Model {}, Processing image {} (number {})'.format(model_id,filename,i))\n print()\n image_time = time.time()\n \n clean_filename = 'input/{}/clean_testset/{}'.format(which_dataset,filename)\n blackdotted_filename = 'input/{}/blackdotted_testset/{}'.format(which_dataset,filename)\n \n clean_image = cv2.imread(clean_filename) \n clean_image = cv2.cvtColor(clean_image, cv2.COLOR_BGR2RGB)\n clean_image = clean_image/255\n blackdotted_image = cv2.imread(blackdotted_filename,0)\n \n #print(clean_image.shape)\n #print(blackdotted_image.shape)\n #_show_patch(clean_image,blackdotted_image)\n #print(np.max(blackdotted_image)) #check if threshold it's the same\n #continue\n \n #blackdotted_image = cv2.threshold(blackdotted_image, 75, 255, cv2.THRESH_BINARY)[1] \n blackdotted_image = blackdotted_image/255\n \n total_counts = np.sum(blackdotted_image)\n \n #print('PRE PADDING CLEAN {}'.format(clean_image.shape)) \n #print('PRE PADDING BLACKDOTTED {}'.format(blackdotted_image.shape)) \n #print() \n \n clean_image = np.pad(clean_image, ((patch_size-1,patch_size-1),(patch_size-1,patch_size-1),(0,0)), 'constant', constant_values=(0,0))\n blackdotted_image = np.pad(blackdotted_image, ((patch_size-1,patch_size-1),(patch_size-1,patch_size-1)), 'constant', constant_values=(0,0))\n \n #print('POST PADDING CLEAN {}'.format(clean_image.shape)) \n #print('POST PADDING BLACKDOTTED {}'.format(blackdotted_image.shape))\n #print() \n \n clean_patches = view_as_windows(clean_image,(patch_size,patch_size,3))\n clean_patches = clean_patches.reshape((clean_patches.shape[0],clean_patches.shape[1],patch_size,patch_size,3))\n blackdotted_patches = view_as_windows(blackdotted_image,(patch_size,patch_size))\n \n #print('NUMBER OF CLEAN PATCHES {}'.format(clean_patches.shape))\n #print('NUMBER OF BLACKDOTTED PATCHES {}'.format(blackdotted_patches.shape))\n #print()\n\n clean_patches = clean_patches.reshape(clean_patches.shape[0]*clean_patches.shape[1],clean_patches.shape[2],clean_patches.shape[3],clean_patches.shape[4])\n \n #print('FLATTEN OF CLEAN PATCHES {}'.format(clean_patches.shape))\n #print()\n \n pred_countmap = model.predict(clean_patches).reshape((blackdotted_patches.shape[0],blackdotted_patches.shape[1])) \n true_countmap = np.zeros((blackdotted_patches.shape[0],blackdotted_patches.shape[1]))\n \n for r in range(blackdotted_patches.shape[0]):\n for c in range(blackdotted_patches.shape[1]):\n true_countmap[r,c] = np.sum(blackdotted_patches[r,c,:,:])\n \n pred_countmap = pred_countmap/(patch_size*patch_size)\n true_countmap = true_countmap/(patch_size*patch_size)\n \n #print('PRED COUNTMAP SHAPE {}'.format(pred_countmap.shape))\n #print('TRUE COUNTMAP SHAPE {}'.format(true_countmap.shape))\n #print()\n \n true_count = np.sum(true_countmap)\n pred_count = np.sum(pred_countmap)\n mean_abs_error.append(abs(true_count-pred_count))\n \n print('TOTAL COUNTS IN IMAGE {}'.format(total_counts))\n print('TRUE_COUNTMAP: TOTAL {} | MAX {} | MIN {}'.format(true_count,true_countmap.max(),true_countmap.min()))\n print('PRED_COUNTMAP: TOTAL {} | MAX {} | MIN {}'.format(pred_count,pred_countmap.max(),pred_countmap.min()))\n print()\n \n #_show_countmaps(pred_countmap,true_countmap,np.abs(pred_countmap - true_countmap))\n \n elapsed = (time.time() - image_time)\n print('Image Processed in {} seconds'.format(elapsed))\n print('-'*30)\n print()\n \n models_mae.append(np.mean(mean_abs_error)) \n print('MEAN ABSOLUTE ERROR OF {} ON TEST SET: {}'.format(model_id,np.mean(mean_abs_error)))\n print() \n \n print('MAE LIST {}'.format(models_mae))\n print('MAE MEAN {}'.format(np.mean(models_mae)))\n print('MAE STD {}'.format(np.std(models_mae)))\n print() \n \n elapsed = (time.time() - start_time)/60\n print()\n print('Total Time: {}'.format(elapsed)) \n \n \n \n \n","sub_path":"eval_count_maps.py","file_name":"eval_count_maps.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"205894976","text":"# (C) Copyright NuoDB, Inc. 2017-2020\n#\n# This source code is licensed under the MIT license found in the LICENSE\n# file in the root directory of this source tree.\n\nimport logging\nimport os\nimport re\nimport socket\nimport threading\nimport time\nimport datetime\n\nfrom copy import deepcopy\nfrom pynuoca.nuoca_plugin import NuocaMPInputPlugin\nfrom pynuoca.nuoca_util import nuoca_log\n\ntry:\n from pynuoadmin import nuodb_mgmt\nexcept ImportError:\n import nuodb_mgmt\n\n# NuoAdminNuoMonitor plugin\n#\n# NOTE: This is an Alpha version of a NuoCA plugin that can collect NuoDB\n# Engine metrics (NuoMon) from the NuoDB new Admin layer (which is a Beta\n# feature for NuoDB release 3.2). This NuoCA plugin has minimal testing and\n# is expected to change (or be incorporated into the NuoCA NuoMon plugin)\n#\n# Example NuoAdminNuoMonitor plugin configuration:\n#\n# - NuoAdminNuoMonitor:\n# description : Collection from internal nuoAdminNuoMonitor tool\n# database_regex_pattern: dbt2\n# api_server: localhost:8888\n# client_key: /opt/nuodb/tls-config/keys/nuocmd.pem\n# server_cert: None\n# server_id: nuoadmin0\n\nclass BaseCollector(object):\n\n def message_received(self, root):\n pass\n\n def invalid_message(self, message):\n pass\n\n def closed(self):\n pass\n\n\nclass BaseMetricsCollector(BaseCollector):\n def __init__(self):\n super(BaseMetricsCollector, self).__init__()\n self.__first = True\n self.__process = None\n\n @property\n def process(self):\n return self.__process\n\n @process.setter\n def process(self, p):\n self.__process = p\n\n def __get_item(self, attrs):\n units = [\"COUNT\", \"MILLISECONDS\", \"STATE\",\n \"NUMBER\", \"PERCENT\", \"IDENTIFIER\",\n \"DELTA\"]\n return {\"unit\": units[int(attrs['units']) - 1],\n \"description\": attrs['header']}\n\n # @trace\n def message_received(self, root):\n def parseStr(x):\n try:\n return int(x)\n except:\n return x\n\n items = {}\n if root.tag == \"Items\":\n for child in root:\n items[child.attrib['name']] = self.__get_item(child.attrib)\n items['Database'] = dict(unit=\"IDENTIFIER\", description=\"Database Name\")\n items['Region'] = dict(unit=\"IDENTIFIER\", description=\"Region Name\")\n self.onStart(items)\n return None\n elif root.tag == 'Status':\n new_values = dict([(k, parseStr(v)) for k, v in root.attrib.iteritems()])\n if self.__first:\n new_values['Database'] = self.process.db_name\n new_values['Region'] = self.process.region_name\n self.__first = False\n new_values['TimeStamp'] = int(time.time() * 1000.0)\n self.onChange(new_values)\n return None\n\n def closed(self):\n self.onEnd()\n pass\n\n\nclass MetricsCollector(BaseMetricsCollector):\n \"\"\" Base class for metrics collection.\n Remembers previous values\"\"\"\n\n def __init__(self):\n super(MetricsCollector, self).__init__()\n self.__metrics = {}\n self.__values = {}\n pass\n\n @property\n def metrics(self):\n return self.__metrics\n\n @property\n def values(self):\n return self.__values\n\n def init(self, args):\n return self\n\n def onStart(self, metrics):\n \"\"\" remembers metrics \"\"\"\n self.__metrics = metrics\n\n def onChange(self, metrics):\n \"\"\" remembers previous values \"\"\"\n self.__values.update(metrics)\n\n def onEnd(self):\n \"\"\" zero all values \"\"\"\n zeroMetrics = {}\n for k, v in self.__values.iteritems():\n if v != 0 and type(v) is int:\n zeroMetrics[k] = 0\n zeroMetrics['TimeStamp'] = int(time.time() * 1000.0)\n self.onChange(zeroMetrics)\n pass\n\n\nclass NuoAdminNuoMonMessageConsumer(object):\n \"\"\" NuoAdminNuoMonMessageConsumer\"\"\"\n\n _conn = None\n _nuo_monitor_obj = None\n\n def __init__(self, conn, nuo_monitor_obj):\n self._conn = conn\n self._nuo_monitor_obj = nuo_monitor_obj\n self._process_metrics_dict = {}\n\n def get_stats(self, db_name=None, start_id=None, server_id=None):\n while self._nuo_monitor_obj._enabled:\n try:\n for process_msg in self._get_messages(None, db_name,\n start_id, server_id):\n if process_msg and 'msg' in process_msg:\n self._nuo_monitor_obj.nuoAdminNuoMonitor_collect_queue.append(\n deepcopy(process_msg['msg']))\n except Exception as e:\n nuoca_log(logging.ERROR, \"Exception in NuoAdminNuoMonMessage\"\n \"ConsumerNuoAdminNuoMon.get_stats: %s\"\n % str(e))\n time.sleep(10)\n\n def _get_messages(self, log_options, db_name, start_id, server_id,\n include_process=False):\n\n # message generator is for a single process if `start_id` is specified;\n # otherwise, it is aggregated\n if start_id is not None:\n msg_stream = self._conn.monitor_process(start_id, log_options)\n else:\n msg_stream = self._conn.monitor_database(db_name, log_options,\n keep_open=True)\n # filter messages by name based on whether we are streaming stats or\n # log messages\n message_name = 'Status' if log_options is None else 'LogMessage'\n for process, xml_message in msg_stream:\n if server_id:\n if process.server_id != server_id:\n continue\n if process.start_id not in self._process_metrics_dict:\n mc = MetricsCollector()\n self._process_metrics_dict[process.start_id] = mc\n mc.process = process\n else:\n mc = self._process_metrics_dict[process.start_id]\n mc.message_received(xml_message)\n items = mc.values\n json_msg = items\n if len(json_msg) != 0:\n # add timestamp to message attributes; TODO: in the future, we\n # may want to include timestamp on the sending side\n if 'Time' not in json_msg:\n json_msg['Time'] = self._get_timestamp()\n # combine process and message; if `include_process`, return all\n # process attributes; otherwise just return start ID\n\n if include_process:\n process._dict['msg'] = json_msg\n yield process\n else:\n yield dict(startId=process.start_id, msg=json_msg)\n\n @staticmethod\n def _get_timestamp(time_sec=None):\n if time_sec is None:\n time_sec = time.time()\n dt = datetime.datetime.fromtimestamp(time_sec)\n return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')\n\n\nclass NuoAdminNuoMonitorPlugin(NuocaMPInputPlugin):\n DEFAULT_API_SERVER = 'localhost:8888'\n\n def __init__(self, parent_pipe):\n super(NuoAdminNuoMonitorPlugin, self).__init__(parent_pipe, 'NuoAdminNuoMon')\n self._config = None\n self._nuocaCollectionName = None\n self._api_server = NuoAdminNuoMonitorPlugin.DEFAULT_API_SERVER\n self._server_id = None\n self._client_key = None\n self._server_cert = None\n self._enabled = False\n self._numon_handler_ready = False\n self._domain = None\n self._domain_metrics = None\n self._domain_metrics_host = None\n self._database_regex_pattern = '.*'\n self._host_uuid_shortname = False\n self._thread = None\n self._nuoAdminNuoMonitor_collect_queue = []\n self._collection_interval = 30\n\n def _get_admin_conn(self):\n client_key = self._client_key\n if client_key is not None and ',' in client_key:\n client_key = client_key.split(',')\n if len(client_key) != 2:\n raise ValueError(\n 'Expected at most two tokens for --client-key')\n server_cert = self._server_cert\n api_server = self._api_server\n if (not api_server.startswith('http://') and\n not api_server.startswith('https://')): # noqa\n if client_key is None and server_cert is None:\n api_server = 'http://' + api_server\n else:\n api_server = 'https://' + api_server\n verify = server_cert\n if not verify:\n verify = False\n\n return nuodb_mgmt.AdminConnection(api_server, client_key, verify)\n\n def wait_for_terminate(self):\n if self._domain:\n self._domain.waitForTerminate()\n\n @property\n def nuoAdminNuoMonitor_collect_queue(self):\n return self._nuoAdminNuoMonitor_collect_queue\n\n def _NuoAdminNuoMon_handler_thread(self):\n self._numon_handler_ready = True\n self._domain_metrics.get_stats(server_id=self._server_id)\n\n def startup_NuoAdminNuoMon(self):\n try:\n self._numon_handler_ready = False\n self._conn = self._get_admin_conn()\n self._enabled = True\n self._domain_metrics = NuoAdminNuoMonMessageConsumer(self._conn, self)\n self._thread = threading.Thread(target=self._NuoAdminNuoMon_handler_thread)\n self._thread.daemon = True\n self._thread.start()\n try_count = 0\n while not self._numon_handler_ready and try_count < 5:\n try_count += 1\n time.sleep(1)\n return self._numon_handler_ready\n except Exception as e:\n nuoca_log(logging.ERROR, \"NuoAdminNuoMon Startup error: %s\" % str(e))\n return False\n\n def startup(self, config=None):\n try:\n self._config = config\n\n # Validate the configuration.\n required_config_items = ['api_server']\n if not self.has_required_config_items(config, required_config_items):\n return False\n\n # Don't reveal the domain password in the NuoCA log file.\n display_config = {}\n display_config.update(config)\n display_config['domain_password'] = ''\n nuoca_log(logging.INFO, \"NuoAdminNuoMon: plugin config: %s\" %\n str(display_config))\n\n self._api_server = os.path.expandvars(config['api_server'])\n if 'server_id' in config:\n self._server_id = os.path.expandvars(config['server_id'])\n if 'client_key' in config:\n self._client_key = os.path.expandvars(config['client_key'])\n if 'server_cert' in config:\n self._server_cert = os.path.expandvars(config['server_cert'])\n if 'nuocaCollectionName' in config:\n self._nuocaCollectionName = config['nuocaCollectionName']\n if 'domain_metrics_host' in config:\n self._domain_metrics_host = os.path.expandvars(config['domain_metrics_host'])\n if self._domain_metrics_host == 'localhost':\n self._domain_metrics_host = socket.gethostname()\n if 'database_regex_pattern' in config:\n self._database_regex_pattern = config['database_regex_pattern']\n if 'host_uuid_shortname' in config:\n self._host_uuid_shortname = config['host_uuid_shortname']\n self.startup_NuoAdminNuoMon()\n return self._numon_handler_ready\n except Exception as e:\n nuoca_log(logging.ERROR, \"NuoAdminNuoMon Plugin Startup error: %s\" % str(e))\n return False\n\n def shutdown(self):\n self._enabled = False\n pass\n\n def collect(self, collection_interval):\n uuid_hostname_regex = \\\n '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}-'\n rval = None\n self._collection_interval = collection_interval\n try:\n nuoca_log(logging.DEBUG, \"NuoAdminNuoMon: collect()\")\n base_values = super(NuoAdminNuoMonitorPlugin, self).collect(collection_interval)\n collection_count = len(self._nuoAdminNuoMonitor_collect_queue)\n nuoca_log(logging.DEBUG, \"NuoAdminNuoMon: collection_count %s\" %\n str(collection_count))\n if not collection_count:\n return rval\n\n rval = []\n for i in range(collection_count):\n collected_dict = self._nuoAdminNuoMonitor_collect_queue.pop(0)\n if self._domain_metrics_host and 'Hostname' in collected_dict:\n if collected_dict['Hostname'] != self._domain_metrics_host:\n continue\n if self._nuocaCollectionName:\n collected_dict['nuocaCollectionName'] = self._nuocaCollectionName\n if 'Database' in collected_dict:\n m = re.search(self._database_regex_pattern, collected_dict['Database'])\n if m:\n if self._host_uuid_shortname:\n m2 = re.search(uuid_hostname_regex, collected_dict['Hostname'])\n if m2:\n shortid = collected_dict['Hostname'][37:]\n if 'NodeType' in collected_dict:\n if collected_dict['NodeType'] == 'Transaction':\n shortid += \"(TE)\"\n elif collected_dict['NodeType'] == 'Storage':\n shortid += \"(SM)\"\n shortid_with_pid = shortid + str(collected_dict['ProcessId'])\n collected_dict['HostShortID'] = shortid\n collected_dict['HostShortIDwithPID'] = shortid_with_pid\n rval.append(collected_dict)\n else:\n rval.append(collected_dict)\n\n except Exception as e:\n nuoca_log(logging.ERROR, str(e))\n return rval\n","sub_path":"pynuoca/plugins/input/NuoAdminNuoMonitorPlugin.py","file_name":"NuoAdminNuoMonitorPlugin.py","file_ext":"py","file_size_in_byte":14197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530202779","text":"#Creando una blockchain (2do test)\n\n\nimport datetime\nimport hashlib\nimport json #Para codificar los bloques antes de hashearlos\nfrom flask import Flask,jsonify \n\n# 1- Armando la cadena de bloques como tal\nclass Blockchain:\n def __init__(self):\n self.chain=[]\n self.create_block(proof=1, previous_hash='0') #Creacion del bloque genesis\n def create_block(self,proof,previous_hash):\n #creo el diccionario\n block={\n 'index': len(self.chain)+1,\n 'timestamp': str(datetime.datetime.now()),\n 'proof': proof,\n 'previous_hash':previous_hash\n }\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1] #Con -1 obtenemos el ultimo bloque de la cached_name\n\n def proof_of_work(self,previous_proof): #Nro o pedazo de dato que los minero buscan encontrar para justamente minar o agregar un bloque a la cadena\n new_proof=1 \n check_proof= False\n \n while check_proof is False:\n #Se define la operacion, en este caso como es un ejemplo sencillo es: el cuadrado del new_proof menos el cuadrado del previous_proof\n #Luego se codifica todo en sha256\n hash_operation=hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()\n #Nivel de dificultad -> si los primeros 4 caracteres (en este caso 4 porque es un ejemplo, en las monedas actuales es 20 o más)\n if hash_operation[:4]=='0000':\n check_proof=True\n else:\n new_proof+=1\n return new_proof\n def hash(self, block):\n encode_block = json.dumps(block, sort_keys=True).encode() #dic del bloque ordenado por las llaves y se codifica en sha256\n return hashlib.sha256(encode_block).hexdigest()\n def is_chain_valid (self, chain):\n previous_block = chain[0] \n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof'] #proof anterior\n proof = block['proof'] #proof actual\n hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()\n if hash_operation[:4]!= '0000':\n return False\n \n previous_block = block\n block_index += 1 \n return True\n\n# 2- Minando la blockchain \n#Flash quickstart\napp=Flask(__name__) \nblockchain = Blockchain()\n#Minando un nuevo bloque\n@app.route('/mine_block', methods=['GET'])\n\ndef mine_block ():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n\n response = {'message': 'Felicidades, haz minado un bloque!',\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof':block['proof'],\n 'previous_hash':block['previous_hash']\n }\n return jsonify(response), 200 #Usando el codigo http code 200, como ejemplo, se puede usar otro la\n\n\n#Obteniendo cadena completa\n@app.route('/get_chain', methods=['GET'])\n\ndef get_chain():\n response={ 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n } \n return jsonify(response), 200\n\n#Evaluando la validez de la cadena de bloques\n@app.route('/is_valid', methods=['GET'])\n\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message' : 'Todo bien, el Blockchain es valido' }\n else:\n response = {'message' : 'Error, el Blockchain NO es valido' }\n return jsonify(response), 200\n\n#Corriendo app\nif __name__ == '__main__':\n app.run( host='0.0.0.0', port='5000', debug=True, use_reloader=False) ","sub_path":"Modulo 1/Blockchain_Test.py","file_name":"Blockchain_Test.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327183244","text":"'''\r\nThis file stores objects that parse and store raw data from an Animas Vibe OUS Spreadsheet.\r\n'''\r\n\r\nimport os, pythoncom, win32com.client\r\nfrom pywintypes import Time\r\n\r\nclass ReportCell():\r\n '''\r\n This class represents one cell from one line from an Animas-Dexcom Report Spreadsheet.\r\n '''\r\n def __init__(self, background, foreground, value):\r\n '''\r\n background = (r,g,b)\r\n foreground = (r,g,b)\r\n value = string\r\n '''\r\n self.background = background\r\n self.foreground = foreground\r\n self.value = value\r\n\r\nclass ReportLine():\r\n '''\r\n This class represents one line from an Animas-Dexcom Report Spreadsheet.\r\n '''\r\n def __init__(self, data):\r\n '''\r\n data = a list of dict objects. Each dict object has the following keys which store a ReportCell object: header_1, header_2, data\r\n '''\r\n pass\r\n \r\n\r\nclass Report():\r\n '''\r\n This class represents one Animas-Dexcom Report Spreadsheet.\r\n '''\r\n def __init__(self, file_path):\r\n self.HEADER_1_ROW = 5 # The row number of the 1st header\r\n self.HEADER_2_ROW = 6 # The row number of the 2nd header (with the column names)\r\n self.COLUMN_START = 1 # The column number of the 1st column\r\n self.COLUMN_END = 55 # The column number of the last column\r\n \r\n self.excel = None\r\n \r\n self.load_data_from_file(file_path)\r\n \r\n def load_data_from_file(self, file_path):\r\n # Open the file\r\n self.open_excel_file(file_path)\r\n e = self.excel\r\n \r\n # Output arrays\r\n self.header_1, self.header_2 = self.load_headers()\r\n self.rows = self.load_rows()\r\n \r\n # Close the workbook\r\n e.ActiveWorkbook.Close(False)\r\n \r\n def open_excel_file(self, file_path):\r\n '''\r\n Opens the report in Microsoft Excel.\r\n \r\n file_path = full path to .xlsx file\r\n '''\r\n if self.excel is None:\r\n win32com.client.gencache.is_readonly = False\r\n win32com.client.gencache.GetGeneratePath()\r\n self.excel = win32com.client.gencache.EnsureDispatch('Excel.Application')\r\n self.excel.Visible = True\r\n \r\n try:\r\n self.excel.Windows(os.path.basename(file_path)).Activate()\r\n except:\r\n self.excel.Workbooks.Open(file_path)\r\n \r\n def float_to_rgb(self, fColor):\r\n '''\r\n This function converts the color values from Excel (type = float) to a hex value (stored as a string)\r\n '''\r\n red = int(fColor) & 255\r\n green = (int(fColor) >> 8) & 255\r\n blue = (int(fColor) >> 16) & 255\r\n \r\n return (red, green, blue)\r\n \r\n def load_headers(self):\r\n e = self.excel\r\n \r\n header_1 = []\r\n header_2 = []\r\n \r\n # It is faster to get the values of a range of cells at once\r\n header_2_values = e.Range(e.Cells(self.HEADER_2_ROW, self.COLUMN_START), e.Cells(self.HEADER_2_ROW, self.COLUMN_END)).Value[0]\r\n \r\n # Load Header Cells\r\n for i in xrange(self.COLUMN_START, self.COLUMN_END+1):\r\n # Get Header 1 Cell data, create a ReportCell object, and add it to header_1\r\n header_1_background = self.float_to_rgb(e.Cells(self.HEADER_1_ROW, i).Interior.Color)\r\n header_1_foreground = self.float_to_rgb(e.Cells(self.HEADER_1_ROW, i).Font.Color)\r\n header_1_value = '' \r\n header_1_cell = ReportCell(header_1_background, header_1_foreground, header_1_value) \r\n header_1.append(header_1_cell)\r\n \r\n # Get Header 2 Cell data, create a ReportCell object, and add it to header_2\r\n header_2_background = self.float_to_rgb(e.Cells(self.HEADER_2_ROW, i).Interior.Color)\r\n header_2_foreground = self.float_to_rgb(e.Cells(self.HEADER_2_ROW, i).Font.Color)\r\n header_2_value = self.convert_to_unicode(header_2_values[i-1])\r\n header_2_cell = ReportCell(header_2_background, header_2_foreground, header_2_value) \r\n header_2.append(header_2_cell)\r\n \r\n return header_1, header_2\r\n \r\n def load_rows(self):\r\n e = self.excel\r\n \r\n e.Cells(self.HEADER_2_ROW, 2).Select()\r\n last_row, last_col = e.Selection.End(-4121).Row, e.Selection.End(-4161).Column # -4121 = xlBottom, -4161 = xlRight\r\n \r\n rows = []\r\n \r\n rows_values = e.Range(e.Cells(self.HEADER_2_ROW+1, self.COLUMN_START), e.Cells(last_row, last_col)).Value\r\n for i, values in enumerate(rows_values):\r\n row = []\r\n for j, value in enumerate(values):\r\n data_cell_background = None\r\n data_cell_foreground = None\r\n data_cell_value = self.convert_to_unicode(value)\r\n data_cell = ReportCell(data_cell_background, data_cell_foreground, data_cell_value)\r\n row.append(data_cell)\r\n rows.append(row)\r\n \r\n return rows\r\n \r\n def convert_to_unicode(self, value):\r\n if value is None:\r\n return u''\r\n if type(value) == unicode:\r\n return value.strip()\r\n if type(value) == float:\r\n return unicode(int(value))\r\n if type(value) == type(Time(0)):\r\n return '{0:02d}/{1:02d}/{2}'.format(value.month, value.day, value.year)\r\n else:\r\n return u'UNABLE TO PARSE'","sub_path":"widget/animas_dexcom_report_viewer/obj/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"472453661","text":"\"\"\"\n* 通常攻撃はaが最大の剣だけを使う(max_a)\n* 投げつけはbがmax_aより大き��ものだけ使う\n* 投げつけた終わった残りのHPをmax_aで攻撃するのが最適\n\"\"\"\nN, H = map(int, input().split())\nA = []\nB = []\nfor i in range(N):\n a, b = map(int, input().split())\n A.append(a)\n B.append(b)\nA.sort(reverse=True)\nB.sort(reverse=True)\n\nmax_a = A[0]\n\nans = 0\ni = 0\n# Throw\nT = [b for b in B if b > max_a]\nfor t in T:\n H -= t\n ans += 1\n if H <= 0:\n print(ans)\n exit()\n# 切り上げ\nans += -(-H//max_a)\nprint(ans)\n","sub_path":"abc/085/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293823373","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport six\nimport sqlalchemy.dialects.postgresql as pg\nfrom .app import db, app\n\n\nclass Authors(db.Model):\n __tablename__ = 'authors'\n\n # hgname, bzname\n hgname = db.Column(db.String(512), primary_key=True)\n bzname = db.Column(db.String(256))\n\n def __init__(self, hgname, bzname):\n self.hgname = hgname\n self.bzname = bzname\n\n def __repr__(self):\n s = ''\n return s.format(self.hgname,\n self.bzname)\n\n @staticmethod\n def post(data):\n # data is a dict: {'command': 'update' or 'create',\n # 'data': {'toinsert': hgname => bzname,\n # 'torm': [...]}}\n cmd = data['command']\n toinsert = data['data']['toinsert']\n if toinsert:\n for hgname, bzname in toinsert.items():\n if cmd == 'create':\n db.session.add(Authors(hgname, bzname))\n else:\n ins = pg.insert(Authors).values(hgname=hgname,\n bzname=bzname)\n upd = ins.on_conflict_do_update(index_elements=['hgname'],\n set_=dict(bzname=bzname))\n db.session.execute(upd)\n db.session.commit()\n\n torm = data['data']['torm']\n if torm:\n query = db.session.query(Authors)\n persons = query.filter(Authors.hgname.in_(torm))\n persons.delete(synchronize_session=False)\n db.session.expire_all()\n db.session.commit()\n\n return {'error': ''}\n\n @staticmethod\n def get(hgnames=[]):\n if not hgnames:\n persons = db.session.query(Authors).all()\n res = {p.hgname: p.bzname for p in persons}\n return {'bznames': res,\n 'error': ''}\n\n if isinstance(hgnames, dict):\n if 'persons' in hgnames:\n hgnames = hgnames['persons']\n else:\n return {'bznames': {},\n 'error': 'A dictionary with key \\'persons\\' expected'}\n\n # hgname is a list of string or a single string\n if not isinstance(hgnames, list):\n hgnames = [hgnames]\n\n for name in hgnames:\n if not isinstance(name, six.string_types):\n return {'bznames': {},\n 'error': 'Strings expected'}\n\n persons = db.session.query(Authors)\n persons = persons.filter(Authors.hgname.in_(hgnames)).all()\n res = {p.hgname: p.bzname for p in persons}\n return {'bznames': res,\n 'error': ''}\n\n\nclass FilesStats(db.Model):\n __tablename__ = 'filesstats'\n\n filename = db.Column(db.String(512), primary_key=True)\n author = db.Column(db.String(256), primary_key=True)\n score = db.Column(db.Float)\n\n def __init__(self, filename, author, score):\n self.filename = filename\n self.author = author\n self.score = score\n\n def __repr__(self):\n s = ''\n return s.format(self.filename,\n self.author,\n self.score)\n\n @staticmethod\n def post(data):\n # data is a dict: {'command': 'update' or 'create',\n # 'data': filename => {author => score}}\n cmd = data['command']\n data = data['data']\n for filename, scores in data.items():\n for person, score in scores.items():\n if cmd == 'create':\n db.session.add(FilesStats(filename, person, score))\n else:\n ins = pg.insert(FilesStats).values(filename=filename,\n author=person,\n score=score)\n upd = ins.on_conflict_do_update(index_elements=['filename',\n 'author'],\n set_=dict(score=score))\n db.session.execute(upd)\n db.session.commit()\n return {'error': ''}\n\n @staticmethod\n def get(filenames):\n if not filenames:\n return {'stats': {},\n 'error': 'No filenames specified'}\n\n if isinstance(filenames, dict):\n if 'filenames' in filenames:\n filenames = filenames['filenames']\n else:\n error = 'A dictionary with key \\'filenames\\' expected'\n return {'stats': {},\n 'error': error}\n\n # hgname is a list of string or a single string\n if not isinstance(filenames, list):\n filenames = [filenames]\n\n for name in filenames:\n if not isinstance(name, six.string_types):\n return {'stats': {},\n 'error': 'Strings expected'}\n\n files = db.session.query(FilesStats)\n files = files.filter(FilesStats.filename.in_(filenames)).all()\n res = {}\n for f in files:\n name = f.filename\n if name not in res:\n res[name] = {}\n res[name][f.author] = f.score\n\n return {'stats': res,\n 'error': ''}\n\n\ndef create():\n e = db.get_engine(app)\n d = e.dialect\n if not d.has_table(e, 'authors') or not d.has_table(e, 'filestats'):\n db.create_all()\n","sub_path":"mozreviewers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"591169756","text":"import torch\nfrom torch import nn\nfrom utils import get_model\nfrom torchvision import models \nfrom utils import get_optimizer\n\ndef save_checkpoint(model, train_dataset, optimizer, arch, class_num, learn_rate, epochs, hidden_unit_num):\n # Save the checkpoint \n model.class_to_idx = train_dataset.class_to_idx\n checkpoint = {'model': arch,\n 'output_size': class_num,\n 'classifier': model.classifier,\n 'learnrate': learn_rate, # should get from optimizer later \n 'epochs' : epochs,\n 'hidden_units' : hidden_unit_num, \n 'class_to_idx': model.class_to_idx,\n 'device' : str(next(model.parameters()).device),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'model_state_dict': model.state_dict()\n }\n\n torch.save(checkpoint, arch + '_checkpoint.pth')\n \n \ndef load_checkpoint(filepath, device):\n \n # load on model: https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-model-across-devices\n if device == torch.device('cuda'):\n checkpoint = torch.load(filepath)\n else: # load on CPU\n checkpoint = torch.load(filepath, map_location='cpu')\n \n arch = checkpoint['model']\n # Load model\n model = eval(get_model(arch)['load_command'])\n\n # Freeze parameters\n for param in model.parameters():\n param.requires_grad = False\n\n # new classifier for model\n model.classifier = nn.Sequential(nn.Linear(get_model(arch)['classifier_input'], checkpoint['hidden_units']),\n nn.ReLU(),\n nn.Dropout(0.3),\n nn.Linear(checkpoint['hidden_units'], checkpoint['output_size']),\n nn.LogSoftmax(dim=1))\n \n model.load_state_dict(checkpoint['model_state_dict'])\n \n model.class_to_idx = checkpoint['class_to_idx']\n \n if device == torch.device('cuda'):\n model.to(device)\n \n model.eval() # set to eval mode for inference\n \n optimizer = get_optimizer(model, checkpoint['learnrate'], checkpoint['optimizer_state_dict'])\n \n return (model, optimizer)","sub_path":"checkpoint_utils.py","file_name":"checkpoint_utils.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507129105","text":"#!/usr/local/bin/python3\n\n# =============================================\n# ========= write your code below ============\n# =============================================\n\ndef rc4(key, inputStream):\n ''' returns the RC4 encoding of inputStream based on the key\n (bytes, bytes) -> bytes\n '''\n S = ksa(key)\n output = prga(S, len(inputStream))\n res = bytearray()\n for i in range(len(inputStream)):\n res.append(inputStream[i] ^ output[i])\n return res\n\ndef ksa(key):\n S = bytearray()\n j = 0\n for i in range(256):\n S.append(i)\n for i in range(256):\n j = (j + S[i] + key[i % len(key)]) % 256\n S[i], S[j] = S[j], S[i]\n return S\n\ndef prga(S, length):\n i = 0\n j = 0\n output = bytearray()\n for i in range(length):\n i = (i + 1) % 256\n j = (j + S[i]) % 256\n S[i], S[j] = S[j], S[i]\n K = S[(S[i] + S[j]) % 256]\n output.append(K)\n return output\n\n# =============================================\n# ===== do not modify the code below ==========\n# =============================================\n \nif __name__ == \"__main__\":\n import os, sys, getopt\n def usage():\n print ('Usage: ' + os.path.basename(__file__) + ' options input_file ')\n print ('Options:')\n print ('\\t -k key_file, --key=key_file')\n print ('\\t -o output_file, --output=output_file')\n sys.exit(2)\n try:\n opts, args = getopt.getopt(sys.argv[1:],\"hk:o:\",[\"help\", \"key=\", \"output=\"])\n except getopt.GetoptError as err:\n print(err)\n usage()\n # extract parameters\n keyFile = None\n outputFile = None\n inputFile = args[0] if len(args) > 0 else None\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n elif opt in (\"-k\", \"--key\"):\n keyFile = arg\n elif opt in (\"-o\", \"--output\"):\n outputFile = arg\n # check arguments\n if (keyFile is None):\n print('key option is missing\\n')\n usage()\n if (outputFile is None):\n print('output option is missing\\n')\n usage()\n if (inputFile is None):\n print('input_file is missing\\n')\n usage()\n # run the command\n with open(keyFile, \"rb\") as keyStream:\n key = keyStream.read()\n with open(inputFile, \"rb\") as inputStream:\n data = inputStream.read()\n output = rc4(key, data)\n with open(outputFile, \"wb\") as outputStream:\n outputStream.write(output)","sub_path":"SecLab/Lab2/rc4.py","file_name":"rc4.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180742616","text":"import os,sys\nfrom pprint import pprint\nfrom selenium import webdriver\n\nfrom config import *\nfrom time import sleep\n\nfrom po_helloworld import *\n\nimport line_up_page\nimport item_add_page\n\ndef setupLocalChrome():\n caps = webdriver.DesiredCapabilities.CHROME.copy()\n\n chrome_options = webdriver.ChromeOptions()\n\n mobile_emulation = { \"deviceName\": \"Nexus 5\" }\n chrome_options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n # chrome_options.add_argument(\"--headless\")\n\n caps=chrome_options.to_capabilities()\n caps['acceptInsecureCerts'] = True\n\n browser = webdriver.Chrome('drivers/chrome/86/chromedriver', desired_capabilities=caps)\n return browser\n\n\ndef po_helloworld_test():\n po_helloworld()\n\n\ndef try_add_food(po):\n po.addFood()\n po.addFood()\n po.addFood()\n po.addFood()\n po.addFood()\n po.addFood()\n po.addFood()\n\ndef try_remove_food(po):\n po.removeFood()\n po.removeFood()\n po.removeFood()\n\ndef try_close(po):\n po.tapCrossButton()\n\ndef try_locate_element():\n browser = setupLocalChrome()\n browser.get('http://menymeny.com/food/%E3%82%84%E3%81%8D%E3%81%A8%E3%82%8A/5f6205657f11c030c1ddf6f2')\n\n item_add_po=item_add_page.FirstTimeLanding(browser)\n item_add_po.tapAcceptAndContinueButton()\n\n item_add_po=item_add_page.Main(browser)\n try_add_food(item_add_po)\n try_remove_food(item_add_po)\n\n try_close(item_add_po)\n\n browser.quit()\n","sub_path":"tests/po_tests/self_test_item_add_page.py","file_name":"self_test_item_add_page.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"452956191","text":"# vim: set ts=4 sw=4 et: coding=UTF-8\n\n# We basically extend rpmcheck\nfrom .rpmcheck import RpmCheck\n\n\nclass RpmBuild(RpmCheck):\n\n \"\"\"\n Replace various troublemakers in build phase\n \"\"\"\n\n def add(self, line):\n # we do not want to run suseupdateconfig, deprecated\n if self.reg.re_suseupdateconfig.search(line):\n return\n\n # if user uses cmake/configure directly just recommend him using the macros\n if not self.minimal and self.previous_line:\n if not self.previous_line.startswith('#') and \\\n line.startswith('./configure'):\n self.lines.append('# FIXME: you should use the %%configure macro')\n if not self.previous_line.startswith('# FIXME') and \\\n line.startswith('cmake'):\n self.lines.append('# FIXME: you should use %%cmake macros')\n\n RpmCheck.add(self, line)\n","sub_path":"spec_cleaner/rpmbuild.py","file_name":"rpmbuild.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"296496343","text":"from Products.Archetypes import DisplayList\nfrom Products.Archetypes.Field import StringField, ImageField, IntegerField, \\\n ReferenceField\nfrom Products.Archetypes import atapi\nfrom Products.CMFCore.interfaces import IContentish\nfrom Products.CMFCore.permissions import ModifyPortalContent\nfrom Products.validation import V_REQUIRED\nfrom Products.Archetypes.atapi import AnnotationStorage\nfrom archetypes.referencebrowserwidget import ReferenceBrowserWidget\nfrom zope.component import adapts\nfrom zope.i18nmessageid import MessageFactory\nfrom zope.interface import implements\nfrom archetypes.schemaextender.interfaces import IBrowserLayerAwareExtender\nfrom squircle.theme.interfaces import IThemeSpecific\n\nfrom archetypes.schemaextender.field import ExtensionField\n\n_ = MessageFactory('squircle.theme')\n\nMARK_PAGE = DisplayList((\n ('', _(u'--')),\n ('Frontpage', _(u'Frontpage')),\n ('Dossier', _(u'Dossier')),\n ('Theme', _(u'Theme')),\n ('Campaign', _(u'Campaign'))\n))\n\nTHEME_STYLE = DisplayList((\n ('', _(u'--')),\n ('theme-1', _(u'Theme 1')),\n ('theme-2', _(u'Theme 2')),\n ('theme-3', _(u'Theme 3')),\n ('theme-4', _(u'Theme 4')),\n ('theme-5', _(u'Theme 5')),\n ('theme-6', _(u'Theme 6'))\n))\n\n\nclass ExtStringField(ExtensionField, StringField):\n \"\"\"A trivial field.\"\"\"\n\n\nclass ExtImageField(ExtensionField, ImageField):\n \"\"\"A trivial field.\"\"\"\n\n\nclass ExtIntegerField(ExtensionField, IntegerField):\n \"\"\"A trivial field.\"\"\"\n\n\nclass ExtReferenceField(ExtensionField, ReferenceField):\n \"\"\"A trivial field.\"\"\"\n\n\nclass PageExtender(object):\n adapts(IContentish)\n implements(IBrowserLayerAwareExtender)\n layer = IThemeSpecific\n\n fields = [\n ExtImageField('ogImage',\n required=False,\n schemata=\"default\",\n storage=AnnotationStorage(migrate=True),\n languageIndependent=True,\n sizes={'large': (768, 768),\n 'preview': (400, 400),\n 'mini': (200, 200),\n 'thumb': (128, 128),\n 'tile': (64, 64),\n 'icon': (32, 32),\n 'listing': (16, 16),\n },\n validators=(('isNonEmptyFile', V_REQUIRED)),\n widget=atapi.ImageWidget(\n description=_(u'help_og_image',\n default=u'Minimum size 600x315, keep aspect ratio as close to 1.91:1 as possible'),\n label=_(u'label_og_image',\n default=u'Large Facebook sharing image'),\n show_content_type=False)\n ),\n\n ExtStringField('pageScript',\n required=False,\n schemata=\"default\",\n searchable=True,\n widget=atapi.TextAreaWidget(\n description=_(u'help_page_script',\n default=u'Use this to add some additional scripts (e.g. tracking code scripts). Be careful with this!'),\n label=_(u'label_page_script',\n default=u'Page script'),\n size=40,\n cols=4)\n ),\n\n ExtStringField(\"markedAs\",\n vocabulary=MARK_PAGE,\n schemata=\"squircle\",\n widget=atapi.SelectionWidget(\n label=_(u'label_marked_as', default=u'Mark page as'),\n description=_(u'help_marked_as',\n default=u'Indicate type of page'))\n ),\n\n ExtImageField('posterImage',\n required=False,\n schemata=\"squircle\",\n storage=AnnotationStorage(migrate=True),\n languageIndependent=True,\n sizes={'large': (768, 768),\n 'preview': (400, 400),\n 'mini': (200, 200),\n 'thumb': (128, 128),\n 'tile': (64, 64),\n 'icon': (32, 32),\n 'listing': (16, 16),\n },\n validators=(('isNonEmptyFile', V_REQUIRED)),\n widget=atapi.ImageWidget(\n description=_(u'help_poster_image',\n default=u'Background when shown in poster (2560x800)'),\n label=_(u'label_poster_image',\n default=u'Poster image'),\n show_content_type=False)\n ),\n\n ExtStringField('posterTitle',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_poster_title',\n default=u'Title when shown as a poster'),\n label=_(u'label_poster_title',\n default=u'Poster title'),\n size=40)\n ),\n\n ExtStringField('posterButton',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_poster_button',\n default=u'Text in button when shown as a poster'),\n label=_(u'label_poster_title',\n default=u'Poster button'),\n size=40)\n ),\n\n ExtStringField('posterThumbTitle',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_poster_thumbtitle',\n default=u'Title when shown as a thumbnailed poster'),\n label=_(u'label_poster_thumbnail',\n default=u'Poster thumbnail title'),\n size=40)\n ),\n\n ExtImageField('themeImage',\n required=False,\n schemata=\"squircle\",\n storage=AnnotationStorage(migrate=True),\n languageIndependent=True,\n sizes={'large': (768, 768),\n 'preview': (400, 400),\n 'mini': (200, 200),\n 'thumb': (128, 128),\n 'tile': (64, 64),\n 'icon': (32, 32),\n 'listing': (16, 16),\n },\n validators=(('isNonEmptyFile', V_REQUIRED)),\n widget=atapi.ImageWidget(\n description=_(u'help_theme_image',\n default=u'Background when shown as theme in frontpage (280x400)'),\n label=_(u'label_poster_image',\n default=u'Theme image'),\n show_content_type=False)\n ),\n\n ExtStringField('themeTitle',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_theme_title',\n default=u'Title when shown as theme in frontpage'),\n label=_(u'label_theme_title',\n default=u'Theme title'),\n size=40)\n ),\n\n ExtStringField('themeDescription',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_theme_description',\n default=u'Description when shown as theme in frontpage'),\n label=_(u'label_theme_description',\n default=u'Theme description'),\n size=40)\n ),\n\n ExtIntegerField('highlightNumItems',\n required=False,\n schemata=\"squircle\",\n mode=\"rw\",\n default=0,\n widget=atapi.IntegerWidget(\n label=_(u'label_highlightnumitems',\n default=u'Highlight first n items'),\n description=_(u'help_highlightnumitems',\n default=u'Highlight first n items when show within theme')\n ),\n ),\n\n ExtImageField('themePageImage',\n required=False,\n schemata=\"squircle\",\n storage=AnnotationStorage(migrate=True),\n languageIndependent=True,\n sizes={'large': (768, 768),\n 'preview': (400, 400),\n 'mini': (200, 200),\n 'thumb': (128, 128),\n 'tile': (64, 64),\n 'icon': (32, 32),\n 'listing': (16, 16),\n },\n validators=(('isNonEmptyFile', V_REQUIRED)),\n widget=atapi.ImageWidget(\n description=_(u'help_theme_page_image',\n default=u'Background when shown as a theme (3000x192)'),\n label=_(u'label_theme_page_image',\n default=u'Theme page image'),\n show_content_type=False)\n ),\n\n ExtStringField(\"themeStyle\",\n vocabulary=THEME_STYLE,\n schemata=\"squircle\",\n widget=atapi.SelectionWidget(\n label=_(u'label_theme_style',\n default=u'Theme style'),\n description=_(u'help_theme_style',\n default=u'Indicate style of theme'))\n ),\n\n ExtStringField('highlightTitle',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_highlight_title',\n default=u'Title when highlighted'),\n label=_(u'label_highlight_title',\n default=u'Highlight title'),\n size=40)\n ),\n\n ExtStringField('highlightDescription',\n required=False,\n schemata=\"squircle\",\n searchable=True,\n widget=atapi.StringWidget(\n description=_(u'help_highlight_description',\n default=u'Description when highlighted'),\n label=_(u'label_highlight_description',\n default=u'Highlight description'),\n size=40)\n ),\n\n ExtImageField('highlightImage',\n required=False,\n schemata=\"squircle\",\n storage=AnnotationStorage(migrate=True),\n languageIndependent=True,\n sizes={'large': (768, 768),\n 'preview': (400, 400),\n 'mini': (200, 200),\n 'thumb': (128, 128),\n 'tile': (64, 64),\n 'icon': (32, 32),\n 'listing': (16, 16),\n },\n validators=(('isNonEmptyFile', V_REQUIRED)),\n widget=atapi.ImageWidget(\n description=_(u'help_highlight_image',\n default=u'Background image when highlighted (730x410)'),\n label=_(u'label_highlight_image',\n default=u'Highlight image'),\n show_content_type=False)\n ),\n\n ExtReferenceField('itemsInPoster',\n relationship='itemsInPoster',\n multiValued=True,\n schemata=\"squircle\",\n isMetadata=True,\n languageIndependent=False,\n index='KeywordIndex',\n referencesSortable=True,\n keepReferencesOnCopy=True,\n write_permission=ModifyPortalContent,\n widget=ReferenceBrowserWidget(\n allow_search=True,\n allow_browse=True,\n allow_sorting=True,\n show_indexes=False,\n force_close_on_insert=True,\n label=_(u'label_items_in_poster',\n default=u'Items in poster'),\n description=_(u'help_items_in_poster',\n default=u'Items to show in poster (frontpage)'),\n visible={'edit': 'visible', 'view': 'invisible'}\n )\n ),\n\n ExtReferenceField('itemsInTheme',\n relationship='itemsInTheme',\n multiValued=True,\n schemata=\"squircle\",\n isMetadata=True,\n languageIndependent=False,\n index='KeywordIndex',\n referencesSortable=True,\n keepReferencesOnCopy=True,\n write_permission=ModifyPortalContent,\n widget=ReferenceBrowserWidget(\n allow_search=True,\n allow_browse=True,\n allow_sorting=True,\n show_indexes=False,\n force_close_on_insert=True,\n label=_(u'label_items_in_theme',\n default=u'Items in theme'),\n description=_(u'help_items_in_theme',\n default=u'Items to show in theme'),\n visible={'edit': 'visible', 'view': 'invisible'}\n )\n )\n ]\n\n def __init__(self, context):\n self.context = context\n\n def getFields(self):\n return self.fields\n","sub_path":"src/squircle/theme/content/extender.py","file_name":"extender.py","file_ext":"py","file_size_in_byte":15453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602842588","text":"#!/usr/bin/python3\n#Credentials for AMP Reborn api testing\n\nHOME = \"www.amp.com.au\"\nHTTP_BASIC_AUTH_USERNAME:\"honeyjar\"\n#HTTP_BASIC_AUTH_PASSWORD:\"\"\nHTTP_BASIC_AUTH_PASSWORD_B64:\"dGhpcyBpcyBub3QgYSByZWFsIHJlcG8=\"\nENDPOINTS = {\n \"PROD\" : {\n \"URI\":\"reborn.amp.com.au:3337/api/v1/\",\n \"USER\":\"honeyjar\",\n \"KEY\":\"dGhhdCdzIHJpZ2h0LCB3ZSdyZSB0ZXN0aW5nIGluIHByb2R1Y3Rpb24h\"\n },\n \"DEV\" : {\n \"URI\":\"reborn-dev.amp.com.au:3337/api/v1\",\n \"USER\":\"winnie\",\n \"KEY\":\"eW91IGp1c3QgZm91bmQgdGhlIG5pbmph\"\n }\n }\nDATABASES = {\n \"PROD\" : {\n \"DB_HOST\":\"reborn0.postgresql.internal.amp.com.au\",\n \"PORT\":\"5432\",\n \"USER\":\"root\",\n \"KEY\":\"m8&cCdL35!*6\"\n },\n \"DEV\" : {\n \"DB_HOST\":\"reborn1.postgresql.internal.amp.com.au\",\n \"PORT\":\"5432\",\n \"USER\":\"root\",\n \"KEY\":\"root\"\n }\n }\n\n","sub_path":"dev/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"494685490","text":"import pytest\nimport read_xls\nfrom openpyxl import load_workbook\n\n\ndef test_workSheetInMemory():\n wb = load_workbook(filename=\"sampledata.xlsx\")\n assert read_xls.read_data(wb) == {\n \"customer\": [\n {\n \"customer_id\": 1,\n \"first_name\": \"Rachel\",\n \"last_name\": \"Smith\",\n \"address\": \"123 Riverbend SE\",\n \"phone\": \"403-324-3455\",\n \"email\": \"rachel@telus.ca\",\n },\n {\n \"customer_id\": 2,\n \"first_name\": \"Bobby\",\n \"last_name\": \"Smith\",\n \"address\": \"435 Crest Place NE\",\n \"phone\": \"587-321-4564\",\n \"email\": \"bobby@gmail.com\",\n },\n {\n \"customer_id\": 3,\n \"first_name\": \"Rebecca\",\n \"last_name\": \"Wang\",\n \"address\": \"231 Crest Place NE\",\n \"phone\": \"403-132-5798\",\n \"email\": \"rebecca@hotmail.com\",\n },\n {\n \"customer_id\": 4,\n \"first_name\": \"Tim\",\n \"last_name\": \"Cormier\",\n \"address\": \"435 Redstone NE\",\n \"phone\": \"403-432-4532\",\n \"email\": \"tim@gmail.com\",\n },\n {\n \"customer_id\": 5,\n \"first_name\": \"Phil\",\n \"last_name\": \"Murray\",\n \"address\": \"568 NoseHill\",\n \"phone\": \"587-091-0934\",\n \"email\": \"phil@shaw.ca\",\n },\n {\n \"customer_id\": 6,\n \"first_name\": \"Shane\",\n \"last_name\": \"James\",\n \"address\": \"987 Brightton SE\",\n \"phone\": \"587-765-7898\",\n \"email\": \"shane@gmail.com\",\n },\n {\n \"customer_id\": 7,\n \"first_name\": \"John\",\n \"last_name\": \"Sandhu\",\n \"address\": \"356 Willow Park\",\n \"phone\": \"587-321-2343\",\n \"email\": \"john@telus.ca\",\n },\n {\n \"customer_id\": 8,\n \"first_name\": \"Dave\",\n \"last_name\": \"Hynes\",\n \"address\": \"1213 Brightton SE\",\n \"phone\": \"403-554-2321\",\n \"email\": \"dave@hotmai.com\",\n },\n {\n \"customer_id\": 9,\n \"first_name\": \"Cristina\",\n \"last_name\": \"Davey\",\n \"address\": \"451 Kensington\",\n \"phone\": \"587-090-2931\",\n \"email\": \"cristina@gmail.com\",\n },\n {\n \"customer_id\": 10,\n \"first_name\": \"Dave\",\n \"last_name\": \"James\",\n \"address\": \"189 SunnySide\",\n \"phone\": \"403-432-7094\",\n \"email\": \"dave@shaw.ca\",\n },\n ],\n \"product\": [\n {\"prod_id\": 1, \"prod_name\": \"Dishwasher\", \"prod_price\": 599.99},\n {\"prod_id\": 2, \"prod_name\": \"Wine Cooler\", \"prod_price\": 199.99},\n {\"prod_id\": 3, \"prod_name\": \"Microwave\", \"prod_price\": 159.99},\n {\"prod_id\": 4, \"prod_name\": \"Mini Oven\", \"prod_price\": 299.99},\n {\"prod_id\": 5, \"prod_name\": \"Electric Stove\", \"prod_price\": 899.99},\n {\"prod_id\": 6, \"prod_name\": \"Gas Stove\", \"prod_price\": 1119.99},\n {\"prod_id\": 7, \"prod_name\": \"Blender\", \"prod_price\": 129.99},\n {\"prod_id\": 8, \"prod_name\": \"Portable Heater\", \"prod_price\": 119.99},\n {\"prod_id\": 9, \"prod_name\": \"Dining aTable Set\", \"prod_price\": 899.99},\n {\"prod_id\": 10, \"prod_name\": \"Washer\", \"prod_price\": 999.99},\n ],\n \"invoice\": [\n {\n \"invoice_id\": 1,\n \"customer_id\": 1,\n \"invoice_date\": \"12-01-2019\",\n \"sales_amount\": 2499.97,\n \"GST(5%)\": 124.9985,\n \"total_amount\": 2624.9685,\n },\n {\n \"invoice_id\": 2,\n \"customer_id\": 2,\n \"invoice_date\": \"12-05-2019\",\n \"sales_amount\": 2019.97,\n \"GST(5%)\": 100.9985,\n \"total_amount\": 4745.937,\n },\n {\n \"invoice_id\": 3,\n \"customer_id\": 3,\n \"invoice_date\": \"12-09-2019\",\n \"sales_amount\": 1419.97,\n \"GST(5%)\": 70.9985,\n \"total_amount\": 3611.937,\n },\n ],\n \"invoice_detail\": [\n {\"detail_id\": 1, \"invoice_id\": 1, \"prod_id\": 1, \"quantity\": 1},\n {\"detail_id\": 2, \"invoice_id\": 1, \"prod_id\": 10, \"quantity\": 1},\n {\"detail_id\": 3, \"invoice_id\": 1, \"prod_id\": 9, \"quantity\": 1},\n {\"detail_id\": 4, \"invoice_id\": 2, \"prod_id\": 8, \"quantity\": 1},\n {\"detail_id\": 5, \"invoice_id\": 2, \"prod_id\": 9, \"quantity\": 1},\n {\"detail_id\": 6, \"invoice_id\": 2, \"prod_id\": 10, \"quantity\": 1},\n {\"detail_id\": 7, \"invoice_id\": 3, \"prod_id\": 4, \"quantity\": 1},\n {\"detail_id\": 8, \"invoice_id\": 3, \"prod_id\": 8, \"quantity\": 1},\n {\"detail_id\": 9, \"invoice_id\": 3, \"prod_id\": 10, \"quantity\": 1},\n ],\n }\n\n","sub_path":"src/python/comm230/read_xls_test.py","file_name":"read_xls_test.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"573917096","text":"\"\"\"\nCopyright (c) 2019 razaqq\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nimport sys\nimport traceback\nfrom subprocess import call\n\n\nif __name__ == '__main__':\n root = os.path.abspath(os.path.dirname(__file__))\n main = os.path.join(root, 'potatoalert.py')\n assets = os.path.join(root, 'assets')\n icon = os.path.join(assets, 'potato.ico')\n assets_sep = ':' if os.name == 'posix' else ';'\n\n debug_flags = ['-F', '-y', '-c', '-d', 'imports', '-d', 'bootloader']\n build_flags = ['-F', '-y', '-w']\n excludes = ['--exclude-module', 'tkinter']\n\n p = call([sys.executable, '-m', 'PyInstaller'] + build_flags + excludes +\n ['-i', icon, '--add-data', f\"{assets}{assets_sep}assets/\", main])\n\n try:\n built_binary_file = 'potatoalert_x64' if os.name == 'posix' else 'potatoalert_x64.exe'\n binary_file = 'potatoalert' if os.name == 'posix' else 'potatoalert.exe'\n if os.path.exists(os.path.join(root, 'dist', built_binary_file)):\n os.remove(os.path.join(root, 'dist', built_binary_file))\n os.rename(os.path.join(root, 'dist', binary_file), os.path.join(root, 'dist', built_binary_file))\n os.remove(os.path.join(root, 'potatoalert.spec'))\n except FileNotFoundError as e:\n print(traceback.format_exc())\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13526817","text":"import numpy as np\nimport albumentations\nfrom albumentations.core.transforms_interface import DualTransform\nfrom albumentations.augmentations import functional as F\n\nclass GridMask(DualTransform):\n \"\"\"GridMask augmentation for image classification and object detection.\n Args:\n num_grid (int): number of grid in a row or column.\n fill_value (int, float, lisf of int, list of float): value for dropped pixels.\n rotate ((int, int) or int): range from which a random angle is picked. If rotate is a single int\n an angle is picked from (-rotate, rotate). Default: (-90, 90)\n mode (int):\n 0 - cropout a quarter of the square of each grid (left top)\n 1 - reserve a quarter of the square of each grid (left top)\n 2 - cropout 2 quarter of the square of each grid (left top & right bottom)\n\n Targets:\n image, mask\n\n Image types:\n uint8, float32\n\n Reference:\n | https://arxiv.org/abs/2001.04086\n | https://github.com/akuxcw/GridMask\n | https://albumentations.readthedocs.io/en/latest/\n \"\"\"\n def __init__(self, num_grid = 3, fill_value = 0, rotate = 0, mode = 0, always_apply = True, p = .5):\n super().__init__(always_apply, p)\n if isinstance(num_grid, int):\n num_grid = (num_grid, num_grid)\n if isinstance(rotate, int):\n rotate = (-rotate, rotate)\n self.num_grid = num_grid\n self.fill_value = fill_value\n self.rotate = rotate\n self.mode = mode\n self.masks = None\n self.rand_h_max, self.rand_w_max = [[]] * 2\n def init_masks(self, height, width):\n if not self.masks:\n self.masks = list()\n n_masks = self.num_grid[1] - self.num_grid[0] + 1\n for n, n_g in enumerate(range(self.num_grid[0], self.num_grid[1] + 1)):\n grid_h, grid_w = height / n_g, width / n_g\n this_mask = np.ones((int((n_g + 1) * grid_h), int((n_g + 1) * grid_w)), dtype = np.uint8)\n for i in range(n_g + 1):\n for j in range(n_g + 1):\n this_mask[\n int(i * grid_h) : int(i * grid_h + grid_h / 2),\n int(j * grid_w) : int(j * grid_w + grid_w / 2)\n ] = self.fill_value\n if self.mode == 2:\n this_mask[\n int(i * grid_h + grid_h / 2) : int(i * grid_h + grid_h),\n int(j * grid_w + grid_w / 2) : int(j * grid_w + grid_w)\n ] = self.fill_value\n if self.mode == 1:\n this_mask = 1 - this_mask\n self.masks.append(this_mask)\n self.rand_h_max.append(grid_h)\n self.rand_w_max.append(grid_w)\n def apply(self, image, mask, rand_h, rand_w, angle, **kwargs):\n h, w = image.shape[:2]\n mask = F.rotate(mask, angle) if self.rotate[1] > 0 else mask\n mask = mask[..., np.newaxis] if image.ndim == 3 else mask\n image *= mask[rand_h : rand_h + h, rand_w : rand_w + w].astype(image.dtype)\n return image\n def get_params_dependent_on_targets(self, params):\n \"\"\"Return dict of dependent parameters of apply function to finish task.\n Args:\n params (dict): \n list of keys - list of targets_as_params\n list of values - list of values \n Targets:\n Dict\n \"\"\"\n img = params['image']\n height, width = img.shape[:2]\n self.init_masks(height, width)\n mid = np.random.randint(len(self.masks))\n mask = self.masks[mid]\n rand_h = np.random.randint(self.rand_h_max[mid])\n rand_w = np.random.randint(self.rand_w_max[mid])\n angle = np.random.randint(self.rotate[0], self.rotate[1]) if self.rotate[1] > 0 else 0\n return {\n 'mask': mask,\n 'rand_h': rand_h, \n 'rand_w': rand_w,\n 'angle': angle\n }\n @property\n def targets_as_params(self):\n return ['image']\n def get_transform_init_args_names(self):\n \"\"\"Return parameters of __init__ function.\n Args:\n No parameters - default\n Targets:\n list/tuple of parameters\n \"\"\"\n return ('num_grid', 'fill_value', 'rotate', 'mode')","sub_path":"Image Augmentation/GridMask.py","file_name":"GridMask.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"188809741","text":"\"\"\"Light obfuscation of a Python source file.\n\nReplaces content of docstrings and comments with white space. Does not\nalter function or variable names.\n\nA few limitations:\n\n * Unfortunately currently removes blank lines.\n\n * Does not work with vanilla astng, because it seems to contain bugs\n in handling of argument lists and except-clauses at least; l2tp-dev\n contains patches to fix astng for our purposes.\n\nNOTE!!!\n*** This module does not currently work because ASTNG has a number of bugs! ***\nNOTE!!!\n\nSee /usr/lib/python2.4/site-packages/pylint/lint.py for nice astng examples.\n\"\"\"\n\nimport os\nimport tempfile\nimport py_compile\nfrom logilab import astng\n\nclass Obfuscator:\n \"\"\"Light obfuscation of a Python source file.\"\"\"\n\n def __init__(self):\n pass\n\n def obfuscate(self, infile, outfile):\n # parse to astng\n mgr = astng.ASTNGManager()\n modname = None # XXX!\n ast = mgr.astng_from_file(infile, modname)\n\n # process\n self._recursive_process(ast)\n\n # write out\n f = open(outfile, 'wb')\n f.write(ast.as_string())\n f.close()\n\n def compile_pyc(self, infile, outfile):\n rc = py_compile.compile(infile, cfile=outfile, dfile=infile, doraise=True)\n\n def obfuscate_and_compile(self, infile, outfile):\n t = tempfile.mktemp(suffix='-obf')\n try:\n self.obfuscate(infile, t)\n self.compile_pyc(t, outfile)\n finally:\n if os.path.exists(t):\n os.unlink(t)\n \n def _obfuscate_docstring_preserve_length(self, old_doc):\n new_doc = ''\n for i in xrange(len(old_doc)):\n ch = old_doc[i]\n if ch in ['\\n', ' ', '\\t']:\n new_doc += ch\n else:\n new_doc += ' '\n return new_doc\n\n def _obfuscate_docstring(self, old_doc):\n return '.'\n\n def _recursive_process(self, x):\n # x may be a wide variety of things: None, int, string, astng objects, etc.\n\n # XXX: this is rather bruteforce checking but seems to work OK\n\n if hasattr(x, 'doc'):\n if hasattr(x, 'doc') and isinstance(x.doc, (str, unicode)):\n x.doc = self._obfuscate_docstring_preserve_length(x.doc)\n #x.doc = self._obfuscate_docstring(x.doc)\n \n if hasattr(x, 'getChildren'):\n for c in x.getChildren():\n self._recursive_process(c)\n\n\nif __name__ == '__main__':\n import sys\n infile = sys.argv[1]\n outfile = sys.argv[2]\n\n obf = Obfuscator()\n obf.obfuscate(infile, outfile)\n obf.obfuscate_and_compile(infile, outfile + 'c')\n","sub_path":"src/python/codebay/common/pymasker.py","file_name":"pymasker.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258953300","text":"import sys\r\nimport os\r\n\r\ndef main():\r\n\r\n inname = './table/zh.txt'\r\n outname = './table/bg_zh.txt'\r\n \r\n fin_script = open(inname,'r')\r\n fout_script = open(outname,'w')\r\n fout_script.write('%-25s,%-25s,%-25s,%-25s,%-80s,%-25s,%-25s\\n'%('# Process','Final state','Cross section','Events expected','Path','Configuration','Filename tag'))\r\n\r\n for s_line in fin_script :\r\n l = [x.strip() for x in s_line.split()]\r\n\r\n dname = l[0]\r\n cs = float(l[1])\r\n exp = 5050 * cs\r\n path = '/cefs/data/DstData/CEPC240/CEPC_v4/higgs/E240.P%s.e0.p0.whizard195'%dname\r\n conf = 'CEPC_V4'\r\n tag = '%s.%s.%s'%(dname,'e0','p0')\r\n\r\n if dname.split('_')[1] == 'X' :\r\n fout_script.write('#%-25s,%-25s,%-25s,%-25s,%-80s,%-25s,%-25s\\n'%(dname,dname,cs,exp,path,conf,tag))\r\n continue\r\n \r\n fout_script.write('%-25s,%-25s,%-25s,%-25s,%-80s,%-25s,%-25s\\n'%(dname,dname,cs,exp,path,conf,tag))\r\n\r\n fin_script.close()\r\n fout_script.close()\r\n \r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"BDT/python/gen_table.py","file_name":"gen_table.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"574475091","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', PostListView.as_view(), name='news'),\n path('', PostDetailView.as_view(), name='post'),\n path('search/', PostSearchView.as_view(), name='post_search'),\n path('add/', PostAddView.as_view(), name='post_add'),\n path('/edit', PostUpdateView.as_view(), name='post_edit'),\n path('/delete', PostDeleteView.as_view(), name='post_delete'),\n path('upgrade/', upgrade_me, name='upgrade'),\n path('categories/', CategoriesListView.as_view(), name='categories_list'),\n path('categories/', PostsCategoryListView.as_view(), name='posts_category'),\n path('subscribe/', subscribe, name='subscribe'),\n]","sub_path":"NewsPaper/newsportal/news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"144815411","text":"import json\nfrom . models import *\n\n\ndef cookieCart(request):\n try:\n # parses the cookie and turns it into a python dictionary\n cart = json.loads(request.COOKIES['cart'])\n except:\n cart = {} # to prevent key error when cookie cart is deleted\n\n print('Cart:', cart)\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0}\n cartItems = order['get_cart_items']\n\n # displays cart items in nav bar\n for i in cart:\n try:\n cartItems += cart[i]['quantity']\n\n product = Product.objects.get(id=i)\n total = (product.price * cart[i]['quantity'])\n\n # cart total price plus price of an items's quantity when added to cart\n order['get_cart_total'] += total\n order['get_cart_items'] += cart[i]['quantity']\n\n # displays items of Anonymous user and stores in cookie\n item = {\n 'product': {\n 'id': product.id,\n 'name': product.name,\n 'price': product.price,\n # 'imageURL':product.imageURL\n },\n 'quantity': cart[i]['quantity'],\n 'get_total': total,\n }\n items.append(item)\n\n if product.stock >= 1:\n order['shipping'] = True\n except:\n pass\n return {'cartItems': cartItems, 'order': order, 'items': items}\n\n\ndef cartData(request):\n if request.user.is_authenticated:\n customer = request.user.profile\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItems = order.get_cart_items\n else:\n cookieData = cookieCart(request)\n cartItems = cookieData['cartItems']\n order = cookieData['order']\n items = cookieData['items']\n return {'cartItems': cartItems, 'order': order, 'items': items}\n\n\ndef guestOrder(request, data):\n print('User is not logged in')\n print('COOKIES:', request.COOKIES)\n name = data['form']['name']\n email = data['form']['email']\n\n cookieData = cookieCart(request)\n items = cookieData['items']\n\n customer, created = Profile.objects.get_or_create(\n email=email,)\n customer.name = name\n customer.save()\n\n order = Order.objects.create(\n customer=customer,\n complete=False,\n )\n\n for item in items:\n product = Product.objects.get(id=item['product']['id'])\n\n orderItem = OrderItem.objects.create(\n product=product,\n order=order,\n quantity=item['quantity']\n )\n return customer, order\n","sub_path":"pharma/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275383456","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/vandorjw/Workspace/public/django-silent-auction/silent_auction/__init__.py\n# Compiled at: 2016-10-23 22:34:16\n# Size of source mod 2**32: 230 bytes\n\"\"\"Django Silent Auction\"\"\"\n__version__ = '0.1.3'\n__license__ = 'MIT'\n__author__ = 'friends-collaborating'\n__email__ = 'info@friends-collaborating.info'\n__url__ = 'https://github.com/friends-collaborating/django-silent-auction'","sub_path":"pycfiles/silent_auction-0.1.3-py3-none-any/__init__.cpython-35.py","file_name":"__init__.cpython-35.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"84392127","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 29 18:56:01 2021\r\n\r\n@author: akasa\r\n\"\"\"\r\n\r\nimport sympy as sym\r\n\r\n#原点Oを2次元座標平面の原点とし、A,P1,P2の座標から面積Sを求める。\r\ns=sym.Symbol('s')\r\nA_x=2\r\nA_y=0\r\nP1_x=5*sym.cos(2*s)\r\nP1_y=5*sym.sin(2*s)\r\nP2_x=10*sym.cos(s)\r\nP2_y=10*sym.sin(s)\r\n\r\nS=sym.simplify((1/2)*((P1_x-A_x)*(P2_y-A_y)-(P2_x-A_x)*(P1_y-A_y)))\r\n\r\n#微分して極値を求める\r\ndiff1=sym.diff(S,s)\r\nsolves=sym.solve(diff1,s)\r\n\r\n#極値から最大値を求める\r\nMax_S=0\r\nfor solve in solves:\r\n S_=abs(S.subs(s,solve))\r\n if S_>Max_S:\r\n Max_S=S_\r\n \r\nprint('最大値:',Max_S)","sub_path":"0527/2210104028/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262602249","text":"## Copyright 2020 Martin J. Steil\n##\n## Permission is hereby granted, free of charge, to any person obtaining\n## a copy of this software and associated documentation files (the \"Software\"),\n## to deal in the Software without restriction, including without limitation\n## the rights to use, copy, modify, merge, publish, distribute, sublicense,\n## and/or sell copies of the Software, and to permit persons to whom the\n## Software is furnished to do so, subject to the following conditions:\n##\n## The above copyright notice and this permission notice shall be included\n## in all copies or substantial portions of the Software.\n##\n## THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n## SOFTWARE.\n\nimport sys\nimport glob\nimport numpy as np\n\n#import matplotlib\n#matplotlib.use('Agg') # use Agg backend to generate matplotlib graphs without a running X server (https://stackoverflow.com/a/4935945)\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nfrom pylab import rcParams\n\nplt.style.use('classic') \nrcParams['figure.figsize'] = 12, 6.5\nrcParams['legend.fontsize'] = 14\nrcParams['font.family'] = 'sans-serif'\nrcParams['mathtext.fontset'] = 'dejavusans'\n\n\n#plt.style.use('classic')\ncolors = ['r','g','b','c','m']\n\n\nif len(sys.argv)>1:\n\tfile_names = (sys.argv)[1:] # Load data from given CSV(s)\nelse:\n\tfile_names = glob.glob('*.csv') # Load data from CSV\n\nfor file_name in file_names:\n\t\n\tfig = plt.figure(figsize=(8, 16.66)) \n\tgs = gridspec.GridSpec(5, 1) #, width_ratios=[1, 1.25]\n\n\tname=np.genfromtxt(file_name,delimiter='\\t',skip_header=0,max_rows=1,comments=\"[]\",dtype=\"|U\")[1]\n\tprint('Plotting {:s} ...'.format(name))\n\tdiscretization=np.genfromtxt(file_name,delimiter='\\t',skip_header=1,max_rows=1,comments=\"[]\")[1]\n\t\n\tx0x1=np.genfromtxt(file_name,delimiter='\\t',skip_header=2,max_rows=1,comments=\"[]\")[1:]\n\txi=np.genfromtxt(file_name,delimiter='\\t',skip_header=3,max_rows=1,comments=\"[]\")[1]\n\txRef=[x0x1[0],xi,xi,x0x1[1]]\n\t\n\tgamma=np.genfromtxt(file_name,delimiter='\\t',skip_header=4,max_rows=1,comments=\"[]\")[1]\n\tuL=np.genfromtxt(file_name,delimiter='\\t',skip_header=5,max_rows=1,comments=\"[]\")[1:]\n\tuR=np.genfromtxt(file_name,delimiter='\\t',skip_header=6,max_rows=1,comments=\"[]\")[1:]\n\tt1=np.genfromtxt(file_name,delimiter='\\t',skip_header=7,max_rows=1,comments=\"[]\")[1]\n\t\n\tpRef=[uL[2],uL[2],uR[2],uR[2]]\n\tvRef=[uL[1],uL[1],uR[1],uR[1]]\n\t\n\t# Convert inital condition [\\rho,v,p] to [\\rho,\\mu,\\epsilon]\n\tuL[1]=uL[0]*uL[1]\n\tuL[2]=uL[2]/(gamma-1.0)+0.5*uL[1]*uL[1]/uL[0]\n\t\n\tuR[1]=uR[0]*uR[1]\n\tuR[2]=uR[2]/(gamma-1.0)+0.5*uR[1]*uR[1]/uR[0]\n\t\n\trhoRef=[uL[0],uL[0],uR[0],uR[0]]\n\tmuRef=[uL[1],uL[1],uR[1],uR[1]]\n\tepsilonRef=[uL[2],uL[2],uR[2],uR[2]]\n\n\t\n\tdata = np.genfromtxt(file_name,delimiter='\\t',skip_header=9)\n\t\n\tax0 = plt.subplot(gs[0])\n\t\n\tplt.title(('{:s} shock tube problem, $\\gamma='.format(name))+'{:<.1f}'.format(gamma)+'$')\n\t\n\tax0.margins(0.01,0.05)\n\tplt.grid(True)\n\tplt.ylabel(\"$\\\\rho\\,[(\\mathrm{kg})/\\mathrm{m}^{3}]$\")\n\tax0.plot(xRef,rhoRef,color=colors[0],linewidth=1.5,linestyle='--')\n\tax0.plot(data[:,0],data[:,1],color=colors[0],linewidth=1.5,linestyle='-')\n\t\n\tax0.plot([],[],color='0',linewidth=1.5,linestyle='--',label='$t='+'{:<.3f}'.format(0)+'\\mathrm{s}$')\n\tax0.plot([],[],color='0',linewidth=1.5,linestyle='-',label='$t='+'{:<.3f}'.format(t1)+'\\mathrm{s}$')\n\tplt.legend(numpoints=3,loc=1)\n\t\n\tax1 = plt.subplot(gs[1])\n\tax1.margins(0.01,0.05)\n\tplt.grid(True)\n\tplt.ylabel(\"$\\mu\\,[(\\mathrm{kg}\\mathrm{m}\\mathrm{s}^{-1})/\\mathrm{m}^{3}]$\")\n\tax1.plot(data[:,0],data[:,2],color=colors[1],linewidth=1.5,linestyle='-')\n\tax1.plot(xRef,muRef,color=colors[1],linewidth=1.5,linestyle='--')\n\t\n\tax2 = plt.subplot(gs[2])\n\tax2.margins(0.01,0.05)\n\tplt.grid(True)\n\tplt.xlabel(\"$x\\,[\\mathrm{m}]$\")\n\tplt.ylabel(\"$\\epsilon\\,[(\\mathrm{kg}\\mathrm{m}^2\\mathrm{s}^{-2})/\\mathrm{m}^{3}]$\")\n\tax2.plot(data[:,0],data[:,3],color=colors[2],linewidth=1.5,linestyle='-')\n\tax2.plot(xRef,epsilonRef,color=colors[2],linewidth=1.5,linestyle='--')\n\t\n\tax3 = plt.subplot(gs[3])\n\tax3.margins(0.01,0.05)\n\tplt.grid(True)\n\tplt.xlabel(\"$x\\,[\\mathrm{m}]$\")\n\tplt.ylabel(\"$p\\,[(\\mathrm{kg}\\mathrm{m}\\mathrm{s}^{-2})/\\mathrm{m}^{2}]$\")\n\tax3.plot(data[:,0],data[:,4],color=colors[3],linewidth=1.5,linestyle='-')\n\tax3.plot(xRef,pRef,color=colors[3],linewidth=1.5,linestyle='--')\n\t\n\tax4 = plt.subplot(gs[4])\n\tax4.margins(0.01,0.05)\n\tplt.grid(True)\n\tplt.xlabel(\"$x\\,[\\mathrm{m}]$\")\n\tplt.ylabel(\"$v\\,[\\mathrm{m}\\mathrm{s}^{-1}]$\")\n\tax4.plot(data[:,0],data[:,6],color=colors[4],linewidth=1.5,linestyle='-')\n\tax4.plot(xRef,vRef,color=colors[4],linewidth=1.5,linestyle='--')\n\t\n\tplt.tight_layout()\n\tplt.subplots_adjust(wspace=0.2)\n\tplt.savefig('{:s}_t{:.5e}_exactPv.pdf'.format(name,t1), bbox_inches='tight', pad_inches=0.1, dpi=600,facecolor='w', edgecolor='w')\n\tplt.savefig('{:s}_t{:.5e}_exactPv.png'.format(name,t1), bbox_inches='tight', pad_inches=0.1, dpi=75,facecolor='w', edgecolor='w')\n\tplt.close()\n","sub_path":"output/plot_Pv.py","file_name":"plot_Pv.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541337274","text":"import os\nimport sqlite3\nfrom os.path import join\n\n\nclass DatabaseAPI:\n def __init__(self, db_folder):\n self.db_path = join(db_folder, 'InstaPy', 'db', 'instapy.db')\n\n def insert_profile(self, name, bio, bio_url, alias_name, posts_num, follower, following, is_private):\n query = \"INSERT INTO crawled_profile (name, bio, bio_url, alias_name, posts_num, follower, following, is_private) \" \\\n \"VALUES ('{}', '{}', '{}', '{}', {}, {}, {}, {});\"\\\n .format(name, bio, bio_url, alias_name, posts_num, follower, following, is_private)\n\n connection = sqlite3.connect(self.db_path)\n with connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n except Exception as e:\n query = \"UPDATE crawled_profile SET posts_num={}, follower={}, following={}, is_private={};\"\\\n .format(posts_num, follower, following, is_private)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute(query)\n\n\n def insert_post(self, profile_name, link, crawling_order, preview_image_url, image_url, likes, comments):\n query = \"INSERT INTO post (profile_name, link, crawling_order, preview_image_url, image_url, likes, comments, is_crawled) \" \\\n \"VALUES ('{}', '{}', {}, '{}', '{}', {}, {}, 0);\"\\\n .format(profile_name, link, crawling_order, preview_image_url, image_url, likes, comments)\n\n connection = sqlite3.connect(self.db_path)\n with connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n except Exception as e:\n print(e)\n\n def insert_liker(self, name, post_link):\n query = \"INSERT INTO liker (name, post_link) VALUES ('{}', '{}');\".format(name, post_link)\n\n connection = sqlite3.connect(self.db_path)\n with connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute(query)\n\n def insert_commenter(self, name, post_link, comment):\n query = \"INSERT INTO commenter (name, post_link, comment) VALUES ('{}', '{}', '{}');\".format(name, post_link, comment)\n\n connection = sqlite3.connect(self.db_path)\n with connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute(query)\n\n def load_posts_links(self, profile, posts_number, is_crawled = 0):\n query = \"SELECT link FROM post WHERE profile_name = '{}' AND is_crawled = {} ORDER BY crawling_order LIMIT {};\"\\\n .format(profile, is_crawled, posts_number)\n\n results = []\n\n connection = sqlite3.connect(self.db_path)\n with connection:\n connection.row_factory = sqlite3.Row\n\n try:\n results = connection.execute(query).fetchall()\n results = [dict(row_proxy)['link'] for row_proxy in results]\n except Exception as e:\n print(e)\n\n return results\n\n def update_post(self, link, image_url, likes, comments):\n query = \"UPDATE post SET image_url='{}', likes={}, comments={}, is_crawled=1 WHERE link='{}';\"\\\n .format(image_url, likes, comments, link)\n\n connection = sqlite3.connect(self.db_path)\n with connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute(query)\n","sub_path":"util/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"270092882","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Administrator\n#\n# Created: 05/01/2013\n# Copyright: (c) Administrator 2013\n# Licence: \n#-------------------------------------------------------------------------------\n\nfrom scipy.stats import norm\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\n# read data from a text file. One number per line\narch = [1,2,3,4,5,56,6,7,88,3,2,2,4,4,6,7,8,8,4,3,55,6,7,8]\n##datos = []\n##for item in open(arch,'r'):\n## item = item.strip()\n## if item != '':\n## try:\n## datos.append(float(item))\n## except ValueError:\n## pass\n\ndatos = arch\n# best fit of data\n(mu, sigma) = norm.fit(datos)\n\n# the histogram of the data\nn, bins, patches = plt.hist(datos, 60, normed=1, facecolor='green', alpha=0.75)\n\n# add a 'best fit' line\ny = mlab.normpdf( bins, mu, sigma)\nl = plt.plot(bins, y, 'r--', linewidth=2)\n\n#plot\nplt.xlabel('Smarts')\nplt.ylabel('Probability')\nplt.title(r'$\\mathrm{Histogram\\ of\\ IQ:}\\ \\mu=%.3f,\\ \\sigma=%.3f$' %(mu, sigma))\nplt.grid(True)\n\nplt.show()","sub_path":"Fit Normal Distribution to Histogram.py","file_name":"Fit Normal Distribution to Histogram.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384146553","text":"input = open(\"candy_input1.txt\",\"r\")\r\n#input = open(\"candy_input2.txt\",\"r\")\r\ncandy = input.readlines()\r\ninput.close()\r\nmycandy = []\r\nfor i in range(len(candy)):\r\n\tmycandy.append(candy[i].split(','))\r\n\r\nfor j in range(len(mycandy)):\r\n\tmycandy[j][len(mycandy[0])-1] = mycandy[j][len(mycandy[0])-1].strip()\r\nkey = True\r\nwhile key:\r\n\tm = 0\r\n\tlol = []\r\n\t#檢查橫的\r\n\twhile m < (len(mycandy)):\r\n\t\tn = 0\r\n\t\twhile n < (len(mycandy[0])-2):\r\n\t\t\tif (mycandy[m][n] != \"0\") and (mycandy[m][n] == mycandy[m][n+1] == mycandy[m][n+2]):\r\n\t\t\t\tlol.append([m,n])\r\n\t\t\t\tlol.append([m,n+1])\r\n\t\t\t\tlol.append([m,n+2])\r\n\t\t\tn = n + 1\r\n\t\tm = m + 1\r\n\t#檢查直的\r\n\tn = 0\r\n\twhile n < (len(mycandy[0])):\r\n\t\tm = 0\r\n\t\twhile m < (len(mycandy)-2):\r\n\t\t\tif (mycandy[m][n] != \"0\") and (mycandy[m][n] == mycandy[m+1][n] == mycandy[m+2][n]):\r\n\t\t\t\tlol.append([m,n])\r\n\t\t\t\tlol.append([m+1,n])\r\n\t\t\t\tlol.append([m+2,n])\r\n\t\t\tm = m + 1\r\n\t\tn = n + 1\r\n\tif len(lol)==0:\r\n\t\tkey = False\r\n\t\tbreak\r\n\t#刪掉重複的\r\n\taoa = sorted(lol)\r\n\tnow = aoa[0]\r\n\tk = 1\r\n\twhile k < len(aoa):\r\n\t\tif aoa[k] == now:\r\n\t\t\tlol.remove(aoa[k])\r\n\t\telse:\r\n\t\t\tnow = aoa[k]\r\n\t\tk = k + 1\r\n\taoa = sorted(lol)\r\n\t#要消掉的變-1\r\n\tfor x in aoa:\r\n\t\tmycandy[x[0]][x[1]] = \"-1\"\r\n\t#算補0的個數\r\n\tp = 0\r\n\tss = [] #每行要消除的個數\r\n\twhile p < len(mycandy[0]):\r\n\t\tq = 0\r\n\t\tcount = 0\r\n\t\twhile q < len(mycandy):\r\n\t\t\tif mycandy[q][p] == \"-1\":\r\n\t\t\t\tcount = count + 1\r\n\t\t\tq = q + 1\r\n\t\tss.append(count)\r\n\t\tp = p + 1\r\n\t#要消掉的位置照行排\r\n\tpop = []\r\n\tfor h in range(len(mycandy[0])):\r\n\t\tfor g in aoa:\r\n\t\t\tif g[1] == h:\r\n\t\t\t\tpop.append(g) \r\n\t#crush\r\n\tr = 0\r\n\twhile r < len(mycandy[0]):\r\n\t\tif ss[r] > 0:\r\n\t\t\tkk = []\r\n\t\t\tfor s in pop:\r\n\t\t\t\tif s[1] == r:\r\n\t\t\t\t\tkk.append(s) #某一行要消除的座標\r\n\t\t\tt = 0\r\n\t\t\twhile t < ss[r]:\r\n\t\t\t\ttarget = kk[t]\r\n\t\t\t\tu = target[0]\r\n\t\t\t\twhile u > 0:\r\n\t\t\t\t\tmycandy[u][r] = mycandy[u-1][r]\r\n\t\t\t\t\tu = u - 1\r\n\t\t\t\tt = t + 1\r\n\t\t\tv = 0\r\n\t\t\twhile v < ss[r]:\r\n\t\t\t\tmycandy[v][r] = \"0\"\r\n\t\t\t\tv = v + 1\r\n\t\tr = r + 1\r\nprint(mycandy)\r\nfile_name = \"candy_output1.txt\"\r\n#file_name = \"candy_output2.txt\"\r\nmyfile = open(file_name,\"w\")\r\nmyformate=\"%s,%s,%s,%s,%s\\n\"\r\nz=0\r\nwhile z < len(mycandy):\r\n\tmyfile.writelines(myformate%(mycandy[z][0],mycandy[z][1],mycandy[z][2],mycandy[z][3],mycandy[z][4]))\r\n\tz+=1","sub_path":"hw11.py","file_name":"hw11.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436083420","text":"\"\"\"\n FDV2 classification dataset.\n\"\"\"\n\nfrom .fdv1_cls_dataset import FDV1MetaInfo\n\n\nclass FDV2MetaInfo(FDV1MetaInfo):\n \"\"\"\n Descriptor of FDV2 dataset.\n \"\"\"\n def __init__(self):\n super(FDV2MetaInfo, self).__init__()\n self.label = \"FDV2\"\n self.short_label = \"fdv2\"\n self.root_dir_name = \"fdv2\"\n","sub_path":"gluon/datasets/fdv2_cls_dataset.py","file_name":"fdv2_cls_dataset.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"424665834","text":"import sys\nBIN = '../../'\nsys.path.append(BIN)\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\nimport time\nimport datetime\n\nimport torch\nimport torch.nn as nn\n# import torch.optim as optim\nimport torch.utils.data\n\nfrom torch.utils.data import TensorDataset\nfrom fastai.callbacks.tracker import SaveModelCallback\n\nimport my_matplotlib_style as ms\n\nfrom fastai import basic_train, basic_data\nfrom fastai.callbacks import ActivationStats\nfrom fastai import train as tr\n\nfrom my_nn_modules import AE_basic, AE_bn, AE_LeakyReLU, AE_bn_LeakyReLU, AE_big, AE_3D_50, AE_3D_50_bn_drop, AE_3D_50cone, AE_3D_100, AE_3D_100_bn_drop, AE_3D_100cone_bn_drop, AE_3D_200, AE_3D_200_bn_drop, AE_3D_500cone_bn, AE_3D_500cone_bn\nfrom my_nn_modules import get_data, RMSELoss\nfrom utils import plot_activations\n\nimport matplotlib as mpl\nmpl.rc_file(BIN + 'my_matplotlib_rcparams')\n\nprint('torch.cuda.is_available(): ' + str(torch.cuda.is_available()))\n\nlr = 1e-3\nwds = 1e-5\npp = 0\n\nsave_dict = {}\n\n# Load data\n# train = pd.read_pickle(BIN + 'processed_data/aod/scaled_all_jets_partial_train.pkl')\n# test = pd.read_pickle(BIN + 'processed_data/aod/scaled_all_jets_partial_test.pkl')\n# train = pd.read_pickle(BIN + 'processed_data/aod/scaled_all_jets_partial_train_10percent.pkl') # Smaller dataset fits in memory on Kebnekaise\n# test = pd.read_pickle(BIN + 'processed_data/aod/scaled_all_jets_partial_test_10percent.pkl')\ntrain = pd.read_pickle(BIN + 'processed_data/aod/custom_normalized_train_10percent.pkl')\ntest = pd.read_pickle(BIN + 'processed_data/aod/custom_normalized_test_10percent.pkl')\n\nbs = 2048\n# Create TensorDatasets\ntrain_ds = TensorDataset(torch.tensor(train.values, dtype=torch.float), torch.tensor(train.values, dtype=torch.float))\nvalid_ds = TensorDataset(torch.tensor(test.values, dtype=torch.float), torch.tensor(test.values, dtype=torch.float))\n# Create DataLoaders\ntrain_dl, valid_dl = get_data(train_ds, valid_ds, bs=bs)\n# Return DataBunch\ndb = basic_data.DataBunch(train_dl, valid_dl)\n\n# loss_func = RMSELoss()\nloss_func = nn.MSELoss()\n\nbn_wd = False # Don't use weight decay for batchnorm layers\ntrue_wd = True # wd will be used for all optimizers\n\n\n# Figures setup\nplt.close('all')\nunit_list = ['[GeV]', '[rad]', '[rad]', '[GeV]']\nvariable_list = [r'$p_T$', r'$\\eta$', r'$\\phi$', r'$E$']\nline_style = ['--', '-']\ncolors = ['orange', 'c']\nmarkers = ['*', 's']\n\n\ndef get_unnormalized_reconstructions(model, df, train_mean, train_std, idxs=None):\n if idxs is not None:\n data = torch.tensor(df[idxs[0]:idxs[1]].values)\n else:\n data = torch.tensor(df.values)\n pred = model(data).detach().numpy()\n pred = np.multiply(pred, train_std.values)\n pred = np.add(pred, train_mean.values)\n data = np.multiply(data, train_std.values)\n data = np.add(data, train_mean.values)\n return pred, data\n\n\ndef train_model(model, epochs, lr, wd, module_string):\n plt.close('all')\n learn = basic_train.Learner(data=db, model=model, loss_func=loss_func, wd=wd, callback_fns=ActivationStats, bn_wd=bn_wd, true_wd=true_wd)\n start = time.perf_counter()\n learn.fit_one_cycle(epochs, max_lr=lr, wd=wd, callbacks=[SaveModelCallback(learn, every='improvement', monitor='valid_loss', name='best_%s_bs%s_lr%.0e_wd%.0e' % (module_string, bs, lr, wd))])\n end = time.perf_counter()\n delta_t = end - start\n return learn, delta_t\n\n\ndef get_mod_folder(module_string, lr, pp, wd):\n if pp is None:\n curr_mod_folder = '%s_bs%d_lr%.0e_wd%.0e_ppNA/' % (module_string, bs, lr, wd)\n else:\n curr_mod_folder = '%s_bs%d_lr%.0e_wd%.0e_p%.0e/' % (module_string, bs, lr, wd, pp)\n return curr_mod_folder\n\n\ndef save_plots(learn, module_string, lr, wd, pp):\n # Make and save figures\n curr_mod_folder = get_mod_folder(module_string, lr, pp, wd)\n curr_save_folder = curr_mod_folder\n if not os.path.exists(curr_save_folder):\n os.mkdir(curr_save_folder)\n\n # Weight activation stats\n plot_activations(learn, save=curr_save_folder + 'weight_activation')\n\n # Plot losses\n batches = len(learn.recorder.losses)\n epos = len(learn.recorder.val_losses)\n val_iter = (batches / epos) * np.arange(1, epos + 1, 1)\n loss_name = str(loss_func).split(\"(\")[0]\n plt.figure()\n plt.plot(learn.recorder.losses, label='Train')\n plt.plot(val_iter, learn.recorder.val_losses, label='Validation', color='orange')\n plt.yscale(value='log')\n plt.legend()\n plt.ylabel(loss_name)\n plt.xlabel('Batches processed')\n fig_name = 'losses'\n plt.savefig(curr_save_folder + fig_name)\n plt.figure()\n plt.plot(learn.recorder.val_losses, label='Validation', color='orange')\n plt.title('Validation loss')\n plt.legend()\n plt.ylabel(loss_name)\n plt.yscale('log')\n plt.xlabel('Epoch')\n # for i_val, val in enumerate(learn.recorder.val_losses):\n # plt.text(i_val, val, str(val), horizontalalignment='center')\n fig_name = 'losses_val'\n plt.savefig(curr_save_folder + fig_name + '.png')\n with open(curr_save_folder + 'losses.txt', 'w') as f:\n for i_val, val in enumerate(learn.recorder.val_losses):\n f.write('Epoch %d Validation %s: %e Training %s: %e\\n' % (i_val, loss_name, val, loss_name, learn.recorder.losses[(i_val + 1) * (int(batches / epos - 1))]))\n\n # Uncomment this in order to plot histograms and residuals at the end of training\n # Histograms\n # idxs = (0, 100000) # Choose events to compare\n # pred, data = get_unnormalized_reconstructions(learn.model, df=test_x, idxs=idxs, train_mean=train_mean, train_std=train_std)\n # data = test[0:100000].values\n # pred = learn.model(torch.tensor(data, dtype=torch.float))\n #\n # alph = 0.8\n # n_bins = 80\n # for kk in np.arange(27):\n # plt.figure()\n # n_hist_data, bin_edges, _ = plt.hist(data[:, kk], color=colors[1], label='Input', alpha=1, bins=n_bins)\n # n_hist_pred, _, _ = plt.hist(pred[:, kk], color=colors[0], label='Output', alpha=alph, bins=bin_edges)\n # plt.suptitle(train.columns[kk])\n # # plt.xlabel(variable_list[kk] + ' ' + unit_list[kk])\n # plt.xlabel(train.columns[kk])\n # plt.ylabel('Number of events')\n # plt.yscale('log')\n # fig_name = 'hist_%s' % train.columns[kk]\n # plt.savefig(curr_save_folder + fig_name)\n\n # # Plot input on top of output\n # idxs = (0, 100) # Choose events to compare\n # pred, data = get_unnormalized_reconstructions(learn.model, df=test_x, idxs=idxs, train_mean=train_mean, train_std=train_std)\n #\n # for kk in np.arange(4):\n # plt.figure()\n # plt.plot(data[:, kk], color=colors[1], label='Input', linestyle=line_style[1], marker=markers[1])\n # plt.plot(pred[:, kk], color=colors[0], label='Output', linestyle=line_style[0], marker=markers[0])\n # plt.suptitle(train.columns[kk])\n # plt.xlabel('Event')\n # plt.ylabel(variable_list[kk] + ' ' + unit_list[kk])\n # plt.legend()\n # ms.sciy()\n # fig_name = 'plot_%s' % train_x.columns[kk]\n # plt.savefig(curr_save_folder + fig_name)\n\n # alph = 0.8\n # n_bins = 50\n # for kk in np.arange(4):\n # plt.figure()\n # n_hist_data, bin_edges, _ = plt.hist(data[:, kk], color=colors[1], label='Input', alpha=1, bins=n_bins)\n # n_hist_pred, _, _ = plt.hist(pred[:, kk], color=colors[0], label='Output', alpha=alph, bins=bin_edges)\n # plt.suptitle(train_x.columns[kk])\n # plt.xlabel(variable_list[kk] + ' ' + unit_list[kk])\n # plt.ylabel('Number of events')\n # ms.sciy()\n # plt.legend()\n # fig_name = 'lowpt_hist_%s' % train_x.columns[kk]\n # plt.savefig(curr_save_folder + fig_name)\n\n return curr_mod_folder\n\n\ndef train_and_save(model, epochs, lr, wd, pp, module_string, save_dict):\n if pp is None:\n curr_param_string = 'bs%d_lr%.0e_wd%.0e_ppNA' % (bs, lr, wd)\n else:\n curr_param_string = 'bs%d_lr%.0e_wd%.0e_pp%.0e' % (bs, lr, wd, pp)\n\n learn, delta_t = train_model(model, epochs=epochs, lr=lr, wd=wd, module_string=module_string)\n time_string = str(datetime.timedelta(seconds=delta_t))\n curr_mod_folder = save_plots(learn, module_string, lr, wd, pp)\n\n val_losses = learn.recorder.val_losses\n train_losses = learn.recorder.losses\n min_val_loss = np.min(val_losses)\n min_epoch = np.argmin(val_losses)\n\n save_dict[module_string].update({curr_param_string: {}})\n save_dict[module_string][curr_param_string].update({'val_losses': val_losses, 'train_losses': train_losses, 'hyper_parameter_names': [\n 'bs', 'lr', 'wd', 'pp'], 'hyper_parameters': [bs, lr, wd, pp], 'training_time_seconds': delta_t})\n curr_save_folder = get_mod_folder(module_string, lr, pp, wd)\n with open(curr_save_folder + 'save_dict%s.pkl' % curr_param_string, 'wb') as f:\n pickle.dump(save_dict, f, protocol=pickle.HIGHEST_PROTOCOL)\n learn.save(curr_mod_folder.split('/')[0])\n with open(curr_save_folder + 'summary.txt', 'w') as f:\n f.write('%s Minimum validation loss: %e epoch: %d lr: %.1e wd: %.1e p: %s Training time: %s\\n' % (module_string, min_val_loss, min_epoch, lr, wd, pp, time_string))\n\n\none_epochs = 1\none_lr = 1e-2\none_wd = 1e-2\none_pp = None\none_module = AE_bn_LeakyReLU\n\n\ndef one_run(module, epochs, lr, wd, pp):\n module_string = str(module).split(\"'\")[1].split(\".\")[1]\n save_dict[module_string] = {}\n if pp is not None:\n print('Training %s with lr=%.1e, p=%.1e, wd=%.1e ...' % (module_string, lr, pp, wd))\n curr_model_p = module(dropout=pp)\n train_and_save(curr_model_p, epochs, lr, wd, pp, module_string, save_dict)\n print('...done')\n else:\n print('Training %s with lr=%.1e, p=None, wd=%.1e ...' % (module_string, lr, wd))\n curr_model = module([27, 200, 200, 200, 14, 200, 200, 200, 27])\n train_and_save(curr_model, epochs, lr, wd, pp, module_string, save_dict)\n print('...done')\n\n\none_run(module=one_module, epochs=one_epochs, lr=one_lr, wd=one_wd, pp=one_pp)\n","sub_path":"examples/27D/27D_train.py","file_name":"27D_train.py","file_ext":"py","file_size_in_byte":10049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"280653203","text":"from django.test import TestCase\n\nfrom builds.models import Version\nfrom builds.version_slug import VersionSlugField\nfrom projects.models import Project\n\n\nclass VersionSlugFieldTests(TestCase):\n fixtures = [\"eric\", \"test_data\"]\n\n def setUp(self):\n self.pip = Project.objects.get(slug='pip')\n\n def test_saving(self):\n version = Version.objects.create(\n verbose_name='1.0',\n project=self.pip)\n self.assertEqual(version.slug, '1.0')\n\n def test_normalizing(self):\n version = Version.objects.create(\n verbose_name='1%0',\n project=self.pip)\n self.assertEqual(version.slug, '1-0')\n\n def test_normalizing_slashes(self):\n version = Version.objects.create(\n verbose_name='releases/1.0',\n project=self.pip)\n self.assertEqual(version.slug, 'releases-1.0')\n\n def test_uniqueness(self):\n version = Version.objects.create(\n verbose_name='1!0',\n project=self.pip)\n self.assertEqual(version.slug, '1-0')\n\n version = Version.objects.create(\n verbose_name='1%0',\n project=self.pip)\n self.assertEqual(version.slug, '1-0_a')\n\n version = Version.objects.create(\n verbose_name='1?0',\n project=self.pip)\n self.assertEqual(version.slug, '1-0_b')\n\n def test_uniquifying_suffix(self):\n field = VersionSlugField(populate_from='foo')\n self.assertEqual(field.uniquifying_suffix(0), '_a')\n self.assertEqual(field.uniquifying_suffix(25), '_z')\n self.assertEqual(field.uniquifying_suffix(26), '_ba')\n self.assertEqual(field.uniquifying_suffix(52), '_ca')\n","sub_path":"readthedocs/rtd_tests/tests/test_version_slug.py","file_name":"test_version_slug.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404406633","text":"#!/usr/bin/env python3\n\nfrom struct import Struct, pack, unpack\nfrom io import BytesIO\nfrom sftp.constants import *\nimport errno, sys, os\n\nerrno_to_portable = {\n 0: SSH2_FX_OK,\n errno.ENOENT: SSH2_FX_NO_SUCH_FILE,\n errno.ENOTDIR: SSH2_FX_NO_SUCH_FILE,\n errno.EBADF: SSH2_FX_NO_SUCH_FILE,\n errno.ELOOP: SSH2_FX_NO_SUCH_FILE,\n errno.EPERM: SSH2_FX_PERMISSION_DENIED,\n errno.EACCES: SSH2_FX_PERMISSION_DENIED,\n errno.EFAULT: SSH2_FX_PERMISSION_DENIED,\n errno.ENAMETOOLONG: SSH2_FX_BAD_MESSAGE,\n errno.EINVAL: SSH2_FX_BAD_MESSAGE,\n errno.ENOSYS: SSH2_FX_OP_UNSUPPORTED,\n -1: SSH2_FX_FAILURE\n}\n\nclass Protocol(object):\n Buffer = Struct('>IB')\n Int = Struct('>I')\n Int64 = Struct('>Q')\n\n def __init__(_, rsock, wsock):\n _.r, _.w = rsock, wsock\n _.__handles = []\n\n def __send(_, type, payload=b''):\n # + 1 for type.\n buf = _.Buffer.pack(len(payload) + 1, type) + payload\n print(\"<\", repr(buf), file=sys.stderr)\n _.w.write(buf)\n\n def __recv(_, len):\n return _.r.read(len)\n \n def __handle_new(_, use, name, fd, dirp):\n entry = { 'use': use, 'name': name, 'fd': fd, 'dirp': dirp }\n for i, handle in enumerate(_.__handles):\n if handle is None:\n _.__handles[i] = entry\n return i\n i = len(_.__handles)\n _.__handles.append(entry)\n return i\n\n def _process_init(_):\n client_version = unpack('>I', _.__buf.read(4))[0]\n print(\"Client version:\", client_version, file=sys.stderr)\n buf = pack('>I', SSH2_FILEXFER_VERSION)\n #buf += _.__put_string('') # posix-rename@openssh.com\n #buf += _.__put_string('') # statvfs@openssh.com\n #buf += _.__put_string('') # fstatvfs@openssh.com\n _.__send(SSH2_FXP_VERSION, buf)\n\n def _process_opendir(_):\n id, name = _.__get_int(), _.__get_string()\n try:\n handle = _.__handle_new('HANDLE_DIR', name, -1, 0)\n _.__send(SSH2_FXP_HANDLE, _.Int.pack(id) + _.__put_string(_.Int.pack(handle)))\n status = SSH2_FX_OK\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_readdir(_):\n status = SSH2_FX_FAILURE\n id, handle = _.__get_int(), _.__get_string()\n handle = _.Int.unpack(handle)[0]\n if 0 <= handle and handle < len(_.__handles):\n handle = _.__handles[handle]\n contents = os.listdir(handle['name'])\n if len(contents) <= handle['dirp']:\n status = SSH2_FX_EOF\n else:\n partial = contents[handle['dirp']:handle['dirp'] + 10]\n handle['dirp'] += len(partial)\n names = []\n for name in partial:\n long_name = os.path.join(handle['name'], name)\n names.append((name, long_name, os.lstat(long_name)))\n _.__send_names(id, *names)\n status = SSH2_FX_OK\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_open(_):\n id, name, pflags = _.__get_int(), _.__get_string(), _.__get_int()\n a = _.__get_attrib()\n flags = _.__flags_from_portable(pflags)\n mode = a['perm'] if 'perm' in a else 0o666\n try:\n fd = os.open(name, flags, mode)\n handle = _.__handle_new('HANDLE_FILE', name, fd, None)\n _.__send(SSH2_FXP_HANDLE, _.Int.pack(id) + _.__put_string(_.Int.pack(handle)))\n status = SSH2_FX_OK\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_close(_):\n id, handle = _.__get_int(), _.__get_string()\n handle = _.Int.unpack(handle)[0]\n status = SSH2_FX_OK\n if 0 <= handle and handle < len(_.__handles):\n try:\n if _.__handles[handle]['use'] == 'HANDLE_FILE':\n os.close(_.__handles[handle]['fd'])\n elif _.__handles[handle]['use'] == 'HANDLE_DIR':\n pass\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n _.__handles[handle] = None\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_read(_):\n status = SSH2_FX_FAILURE\n id, handle, off, leng = \\\n _.__get_int(), _.__get_string(), _.__get_int64(), _.__get_int()\n print(\"read\", id, repr(handle), off, leng, file=sys.stderr)\n handle = _.Int.unpack(handle)[0]\n if 0 <= handle and handle < len(_.__handles):\n handle = _.__handles[handle]\n try:\n os.lseek(handle['fd'], off, 0)\n buf = os.read(handle['fd'], leng)\n if buf == b'' or buf is None:\n status = SSH2_FX_EOF\n else:\n status = SSH2_FX_OK\n _.__send(SSH2_FXP_DATA, _.Int.pack(id) + _.__put_string(buf))\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_realpath(_):\n id, path = _.__get_int(), _.__get_string()\n if path == b'.':\n path = os.getcwd()\n _.__send_names(id, (path, path, os.stat(path)))\n\n def __send_names(_, id, *p):\n buf = _.__put_int(id) + _.__put_int(len(p))\n for name, long_name, attr in p:\n buf += _.__put_string(name) + _.__put_string(long_name) \\\n + _.__encode_attrib(attr)\n _.__send(SSH2_FXP_NAME, buf)\n\n def _process_stat(_):\n status = SSH2_FX_OK\n id, path = _.__get_int(), _.__get_string()\n try:\n st = os.stat(path)\n buf = _.__encode_attrib(st)\n _.__send(SSH2_FXP_ATTRS, _.Int.pack(id) + buf)\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_lstat(_):\n status = SSH2_FX_OK\n id, path = _.__get_int(), _.__get_string()\n try:\n st = os.lstat(path)\n buf = _.__encode_attrib(st)\n _.__send(SSH2_FXP_ATTRS, _.Int.pack(id) + buf)\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def _process_fstat(_):\n status = SSH2_FX_FAILURE\n id, handle = _.__get_int(), _.__get_string()\n handle = _.Int.unpack(handle)[0]\n if 0 <= handle and handle < len(_.__handles):\n handle = _.__handles[handle]\n try:\n st = os.fstat(handle['fd'])\n buf = _.__encode_attrib(st)\n _.__send(SSH2_FXP_ATTRS, _.Int.pack(id) + buf)\n status = SSH2_FX_OK\n except OSError as e:\n status = errno_to_portable.get(e.errno, errno_to_portable[-1])\n if status != SSH2_FX_OK:\n _.__send(SSH2_FXP_STATUS, _.Int.pack(id) + _.Int.pack(status))\n\n def __encode_attrib(_, st):\n flags = 0\n buf = b''\n if hasattr(st, 'st_size'):\n flags |= SSH2_FILEXFER_ATTR_SIZE\n buf += _.__put_int64(st.st_size)\n if hasattr(st, 'st_uid') and hasattr(st, 'st_gid'):\n flags |= SSH2_FILEXFER_ATTR_UIDGID\n buf += _.__put_int(st.st_uid) + _.__put_int(st.st_gid)\n if hasattr(st, 'st_mode'):\n flags |= SSH2_FILEXFER_ATTR_PERMISSIONS\n buf += _.__put_int(st.st_mode)\n if hasattr(st, 'st_atime') and hasattr(st, 'st_mtime'):\n flags |= SSH2_FILEXFER_ATTR_ACMODTIME\n buf += _.__put_int(st.st_atime) + _.__put_int(st.st_mtime)\n return _.__put_int(flags) + buf\n\n Handlers = {\n SSH2_FXP_INIT: '_process_init',\n SSH2_FXP_OPEN: '_process_open',\n SSH2_FXP_CLOSE: '_process_close',\n SSH2_FXP_READ: '_process_read',\n SSH2_FXP_REALPATH: '_process_realpath',\n SSH2_FXP_OPENDIR: '_process_opendir',\n SSH2_FXP_READDIR: '_process_readdir',\n SSH2_FXP_LSTAT: '_process_stat',\n SSH2_FXP_FSTAT: '_process_fstat',\n SSH2_FXP_STAT: '_process_stat',\n }\n\n def read(_):\n buf = _.__recv(_.Buffer.size)\n print(\">\", repr(buf), file=sys.stderr)\n if buf == b'':\n raise EOFError('End of stream')\n if len(buf) != _.Buffer.size:\n raise EOFError('Short packet header')\n buf_len, type = _.Buffer.unpack(buf)\n print(buf_len, type, file=sys.stderr)\n buf_len -= 1 # Includes type which we've read already.\n if buf_len > 0:\n buf = _.__recv(buf_len)\n print(\">\", repr(buf), file=sys.stderr)\n if len(buf) != buf_len:\n raise EOFError('Short packet data')\n _.__buf = BytesIO(buf)\n else:\n _.__buf = BytesIO(b'')\n if type in Protocol.Handlers:\n return getattr(_, Protocol.Handlers[type])()\n return None\n\n def __get_int(_):\n return _.Int.unpack(_.__buf.read(_.Int.size))[0]\n\n def __put_int(_, v):\n if type(v) == float:\n v = int(v)\n return _.Int.pack(v)\n\n def __get_int64(_):\n return _.Int64.unpack(_.__buf.read(_.Int64.size))[0]\n\n def __put_int64(_, v):\n return _.Int64.pack(v)\n\n def __get_string(_):\n return _.__buf.read(_.__get_int())\n\n def __put_string(_, v):\n if type(v) == str:\n v = v.encode('utf-8')\n return _.__put_int(len(v)) + v\n\n def __get_attrib(_):\n a = { 'flags': _.__get_int() }\n if a['flags'] & SSH2_FILEXFER_ATTR_SIZE:\n a['size'] = _.__get_int64()\n if a['flags'] & SSH2_FILEXFER_ATTR_UIDGID:\n a['uid'] = _.__get_int()\n a['gid'] = _.__get_int()\n if a['flags'] & SSH2_FILEXFER_ATTR_PERMISSIONS:\n a['perm'] = _.__get_int64()\n if a['flags'] & SSH2_FILEXFER_ATTR_ACMODTIME:\n a['atime'] = _.__get_int()\n a['mtime'] = _.__get_int()\n if a['flags'] & SSH2_FILEXFER_ATTR_EXTENDED:\n a['extended'] = {}\n count = _.__get_int()\n for i in range(count):\n type, data = _.__get.string(), _.__get_string()\n a['extended'][type] = data\n return a\n\n def __flags_from_portable(_, pflags):\n flags = 0\n if pflags & SSH2_FXF_READ and pflags & SSH2_FXF_WRITE:\n flags = os.O_RDWR\n elif pflags & SSH2_FXF_READ:\n flags = os.O_RDONLY\n elif pflags & SSH2_FXF_WRITE:\n flags = os.O_WDONLY\n if pflags & SSH2_FXF_CREAT:\n flags |= os.O_CREAT\n if pflags & SSH2_FXF_TRUNC:\n flags |= os.O_TRUNC\n if pflags & SSH2_FXF_EXCL:\n flags |= os.O_EXCL\n return flags\n","sub_path":"python3/sftp/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":11400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588239380","text":"''' Build Python wrapper for RIPE benchmark\n\n@author Marcela S. Melara\n'''\n\nfrom distutils.core import setup, Extension\n\nmodule1 = Extension('ripe_attack_generator_py',\n include_dirs = ['source'],\n libraries = ['ripe64'],\n library_dirs = ['/home/pyronia/cpython/RIPE/build'],\n sources = ['python/ripe_attack_generator_py.c'],\n)\n\nsetup (name = 'RIPE for Python',\n version = '0.1',\n description = 'Python wrapper for the RIPE security benchmark',\next_modules = [module1])\n","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"461527838","text":"from django.conf.urls import patterns, url, include\nfrom django.views.generic import TemplateView\nfrom rest_framework import viewsets, routers\nfrom django.contrib.auth.models import User, Group\nfrom seotester.web.views import GroupViewSet, UserViewSet, CrawlList, CrawlDetail\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'groups', GroupViewSet)\n\nurlpatterns = patterns(\n 'seotester.web.views',\n url(r'^$', 'index', name='index'),\n url(r'^check_links/(?P[0-9]+)/(?P[0-9]+)$', 'check_links', name='check_links'),\n url(r'^link/(?P[0-9]+)$', 'link_detail', name='link_detail'),\n url(r'^crawl/(?P[0-9]+)$', 'crawl_detail', name='crawl_detail'),\n url(r'^crawl/(?P[0-9]+)/links$', 'crawl_links', name='crawl_links'),\n url(r'^url/stats$', 'get_stats_for_url', name='get_stats_for_url'),\n)\n\nurlpatterns += patterns('',\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n)\n\ncbv_urlpatterns = patterns('',\n url(r'^crawls/$', CrawlList.as_view()),\n url(r'^crawls/(?P[0-9]+)/$', CrawlDetail.as_view(), name=\"crawl-detail\"),\n)\n\nurlpatterns += format_suffix_patterns(cbv_urlpatterns)\n","sub_path":"seotester/seotester/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99065439","text":"import os\nCSRF_ENABLED = False\nSECRET_KEY = os.environ['SECRET-KEY']\n\nADS_API_KEY= os.environ['KEY']\n\n''''from string environment variables creating a list of them\n then use that list to create a dictionary named (di)\n since i need a dictionary not string\n'''\nAUTHOR=os.environ['AUTHORS']\ndi={}\nfor name in (AUTHOR.split('&')):\n author=name.split(': ')\n di[author[0]]= author[1]\nAUTHORS=di\n\n\nFROM_EMAIL_ADDRESS= os.environ['FROM_EMAIL']\nKEYWORDS=['SAAO', 'KELT', 'Infrared Survey']\n\n''''from string environment variables making a list'''\nLIBRARIAN_EMAIL_ADDRESSES=os.environ['LIBRARIANS_EMAIL']\nemail =LIBRARIAN_EMAIL_ADDRESSES.split('&')\nLIBRARIAN_EMAIL_ADDRESSES=email\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429693151","text":"import os \nimport os.path as osp\nimport cv2\nimport numpy as np\nimport glob\nimport copy\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nfrom utils.bbox_transform import forward_convert\nfrom utils.bbox_transform import backward_convert\nfrom utils.bbox_transform import get_best_begin_point\nfrom utils.bbox_transform import reorder_vertexes_point\n\n\nclass_list = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field',\n 'small-vehicle', 'large-vehicle', 'ship',\n 'tennis-court', 'basketball-court',\n 'storage-tank', 'soccer-ball-field',\n 'roundabout', 'harbor', 'swimming-pool', 'helicopter',\n 'container-crane',\n 'airport', 'helipad']\n\nclass_name_all = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field',\n 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',\n 'basketball-court', 'storage-tank', 'soccer-ball-field',\n 'roundabout', 'harbor', 'swimming-pool', 'helicopter']\nclass_id_less = [2,4,5,6,12]\nclass_name_less = [class_name_all[id] for id in class_id_less]\n\ndef makedirs(path):\n if not os.path.exists(path):\n os.makedirs(path)\n \ndef format_label(txt_list):\n format_data = []\n for i in txt_list:\n if len(i.split(' ')) < 9:\n continue\n if 'turntable' in i:\n i = i.replace('turntable', 'roundabout')\n # if i.split(' ')[-1].split('\\n')[0] == '1':\n # print(i)\n # continue\n format_data.append(\n [float(xy) for xy in i.split(' ')[:8]] + [class_list.index(i.split(' ')[8])]\n )\n\n if i.split(' ')[8] not in class_list:\n print('warning found a new label :', i.split(' ')[8])\n exit()\n return np.array(format_data)\n\n\ndef clip_image(save_dir, file_idx, image, boxes_all, width, height, stride_w, stride_h):\n min_pixel = 2\n \n # boxes_all_5 = backward_convert(boxes_all[:,:8], False)\n # boxes_all_oc = forward_convert(boxes_all_5,False)\n\n # boxes_all_5 = backward_convert(boxes_all, True)\n # boxes_all_oc = forward_convert(boxes_all_5,True)\n\n # print(boxes_all.shape)\n # print(boxes_all_oc.shape)\n\n # cond = boxes_all[np.logical_or(boxes_all_5[:, 2] <= min_pixel, boxes_all_5[:, 3] <= min_pixel), :]\n # cond = boxes_all_oc[np.logical_or(boxes_all_5[:, 2] <= min_pixel, boxes_all_5[:, 3] <= min_pixel), :]\n\n # if len(cond)>0:\n # print(file_idx)\n # print(len(cond))\n\n # boxes_all = boxes_all[np.logical_and(boxes_all_5[:, 2] > min_pixel, boxes_all_5[:, 3] > min_pixel), :]\n # boxes_all = boxes_all_oc[np.logical_and(boxes_all_5[:, 2] > min_pixel, boxes_all_5[:, 3] > min_pixel), :]\n\n if boxes_all.shape[0] > 0:\n shape = image.shape\n for start_h in range(0, shape[0], stride_h):\n for start_w in range(0, shape[1], stride_w):\n boxes = copy.deepcopy(boxes_all)\n box = np.zeros_like(boxes_all)\n start_h_new = start_h\n start_w_new = start_w\n if start_h + height > shape[0]:\n start_h_new = shape[0] - height\n if start_w + width > shape[1]:\n start_w_new = shape[1] - width\n top_left_row = max(start_h_new, 0)\n top_left_col = max(start_w_new, 0)\n bottom_right_row = min(start_h + height, shape[0])\n bottom_right_col = min(start_w + width, shape[1])\n\n subImage = image[top_left_row:bottom_right_row, top_left_col: bottom_right_col]\n\n box[:, 0] = boxes[:, 0] - top_left_col\n box[:, 2] = boxes[:, 2] - top_left_col\n box[:, 4] = boxes[:, 4] - top_left_col\n box[:, 6] = boxes[:, 6] - top_left_col\n\n box[:, 1] = boxes[:, 1] - top_left_row\n box[:, 3] = boxes[:, 3] - top_left_row\n box[:, 5] = boxes[:, 5] - top_left_row\n box[:, 7] = boxes[:, 7] - top_left_row\n box[:, 8] = boxes[:, 8]\n\n box_5 = backward_convert(box, True)\n box_w_o_c = forward_convert(box_5[:,:5],False)\n\n box_w_o_c = get_best_begin_point(box_w_o_c)\n # box_w_o_c = reorder_vertexes_point(box_w_o_c)\n # print(box_w_o_c.shape)\n # box_w_o_c = box_w_o_c[np.logical_and(box_5[:, 2] > min_pixel, box_5[:, 3] > min_pixel), :]\n center_y = box_5[:,1]\n center_x = box_5[:,0]\n box[:,:8] = box_w_o_c\n # center_y = 0.25 * (box[:, 1] + box[:, 3] + box[:, 5] + box[:, 7])\n # center_x = 0.25 * (box[:, 0] + box[:, 2] + box[:, 4] + box[:, 6])\n\n cond1 = np.intersect1d(np.where(center_y[:] > 0)[0], np.where(center_x[:] > 0)[0])\n cond2 = np.intersect1d(np.where(center_y[:] < (bottom_right_row - top_left_row))[0],\n np.where(center_x[:] < (bottom_right_col - top_left_col))[0])\n idx = np.intersect1d(cond1, cond2)\n if len(idx) > 0 and (subImage.shape[0] > 5 and subImage.shape[1] > 5):\n makedirs(os.path.join(save_dir, 'images'))\n img = os.path.join(save_dir, 'images',\n \"%s_%04d_%04d.png\" % (file_idx, top_left_row, top_left_col))\n cv2.imwrite(img, subImage)\n\n makedirs(os.path.join(save_dir, 'labelTxt'))\n makedirs(os.path.join(save_dir, 'labelTxt_r'))\n quad_8 = os.path.join(save_dir, 'labelTxt',\n \"%s_%04d_%04d.txt\" % (file_idx, top_left_row, top_left_col))\n quad_5 = os.path.join(save_dir, 'labelTxt_r',\n \"%s_%04d_%04d.txt\" % (file_idx, top_left_row, top_left_col))\n \n with open(quad_8, 'w') as f:\n f.write('\\n'.join(f'{\" \".join(map(str, row[:8]))} {class_list[int(row[8])]}'\n for row in box[idx, :]))\n with open(quad_5, 'w') as f:\n f.write('\\n'.join(f'{\" \".join(map(str, row[:5]))} {class_list[int(row[5])]}'\n for row in box_5[idx, :]))\n # with open(xml, 'w') as f:\n # f.writelines(label_tile)\ndef split_single(img,source_path,destination_path,img_w, img_h, stride_w, stride_h):\n\n image_path = os.path.join(source_path, 'images' )\n lable_path = os.path.join(source_path, 'labelTxt')\n\n img_data = cv2.imread(os.path.join(image_path, img))\n\n txt_data = open(os.path.join(lable_path, img.replace('png', 'txt')), 'r').readlines()\n box = format_label(txt_data)\n\n if box.shape[0] > 0:\n clip_image(destination_path,img.strip('.png'), img_data, box, img_w, img_h, stride_w, stride_h) \n\nclass DOTAImageSplitTool(object):\n def __init__(self,\n in_root,\n out_root,\n tile_overlap,\n tile_shape,\n num_process=8,\n with_class_less=False\n ):\n self.in_images_dir = osp.join(in_root, 'images/')\n self.in_labels_dir = osp.join(in_root, 'labelTxt/')\n self.out_images_dir = osp.join(out_root, 'images/')\n self.out_labels_dir = osp.join(out_root, 'labelTxt/')\n self.with_class_less = with_class_less\n\n assert isinstance(tile_shape, tuple), f'argument \"tile_shape\" must be tuple but got {type(tile_shape)} instead!'\n assert isinstance(tile_overlap,\n tuple), f'argument \"tile_overlap\" must be tuple but got {type(tile_overlap)} instead!'\n self.tile_overlap = tile_overlap\n self.tile_shape = tile_shape\n images = glob.glob(self.in_images_dir + '*.png')\n labels = glob.glob(self.in_labels_dir + '*.txt')\n image_ids = [*map(lambda x: osp.splitext(osp.split(x)[-1])[0], images)]\n label_ids = [*map(lambda x: osp.splitext(osp.split(x)[-1])[0], labels)]\n assert set(image_ids) == set(label_ids)\n self.image_ids = image_ids\n if not osp.isdir(out_root):\n os.mkdir(out_root)\n if not osp.isdir(self.out_images_dir):\n os.mkdir(self.out_images_dir)\n if not osp.isdir(self.out_labels_dir):\n os.mkdir(self.out_labels_dir)\n self.num_process = num_process\n\n def _parse_annotation_single(self, image_id):\n label_dir = osp.join(self.in_labels_dir, image_id + '.txt')\n with open(label_dir, 'r') as f:\n s = f.readlines()\n header = s[:2]\n objects = []\n s = s[2:]\n for si in s:\n bbox_info = si.split()\n assert len(bbox_info) == 10\n bbox = [*map(lambda x: int(x), bbox_info[:8])]\n center = sum(bbox[0::2]) / 4.0, sum(bbox[1::2]) / 4.0\n if self.with_class_less:\n if bbox_info[8] in class_name_less:\n \n objects.append({'bbox': bbox,\n 'label': bbox_info[8],\n 'difficulty': int(bbox_info[9]),\n 'center': center})\n else:\n\n objects.append({'bbox': bbox,\n 'label': bbox_info[8],\n 'difficulty': int(bbox_info[9]),\n 'center': center})\n\n return header, objects\n\n def _split_single(self, image_id):\n hdr, objs = self._parse_annotation_single(image_id)\n if len(objs)>0:\n image_dir = osp.join(self.in_images_dir, image_id + '.png')\n img = cv2.imread(image_dir)\n h, w, _ = img.shape\n w_ovr, h_ovr = self.tile_overlap\n w_s, h_s = self.tile_shape\n for h_off in range(0, max(1, h - h_ovr), h_s - h_ovr):\n if h_off > 0:\n h_off = min(h - h_s, h_off) # h_off + hs <= h if h_off > 0\n for w_off in range(0, max(1, w - w_ovr), w_s - w_ovr):\n if w_off > 0:\n w_off = min(w - w_s, w_off) # w_off + ws <= w if w_off > 0\n objs_tile = []\n for obj in objs:\n if w_off <= obj['center'][0] <= w_off + w_s:\n if h_off <= obj['center'][1] <= h_off + h_s:\n objs_tile.append(obj)\n if len(objs_tile) > 0:\n img_tile = img[h_off:h_off + h_s, w_off:w_off + w_s, :]\n # save_image_dir = osp.join(self.out_images_dir, f'{image_id}_{w_off}_{h_off}.png')\n save_image_dir = os.path.join(self.out_images_dir, \n \"%s_%04d_%04d.png\" % (image_id, h_off, w_off))\n # save_label_dir = osp.join(self.out_labels_dir, f'{image_id}_{w_off}_{h_off}.txt')\n save_label_dir = os.path.join(self.out_labels_dir,\n \"%s_%04d_%04d.txt\" % (image_id, h_off, w_off))\n cv2.imwrite(save_image_dir, img_tile)\n label_tile = hdr[:]\n # print(label_tile)\n for obj in objs_tile:\n px, py = obj[\"bbox\"][0::2], obj[\"bbox\"][1::2]\n px = map(lambda x: str(x - w_off), px)\n py = map(lambda x: str(x - h_off), py)\n bbox_tile = sum([*zip(px, py)], ())\n obj_s = f'{\" \".join(bbox_tile)} {obj[\"label\"]} {obj[\"difficulty\"]}\\n'\n label_tile.append(obj_s) \n with open(save_label_dir, 'w') as f:\n f.writelines(label_tile)\n\n def split(self):\n with Pool(self.num_process) as p:\n # p.map(self._split_single, self.image_ids)\n r = list(\n tqdm(p.imap(self._split_single, self.image_ids),\n total=len(self.image_ids)\n )\n ) \n","sub_path":"utils/image_crop.py","file_name":"image_crop.py","file_ext":"py","file_size_in_byte":12257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331899120","text":"import datetime\nfrom analysis.static.os import os_info_extractor\nfrom analysis.static.dependencies import dep_info_extractor\nfrom api.internal.internal_server import InternalServer\n\n\nclass Analyzer:\n\n # -- Public methods\n\n # Analyzer Constructor\n def __init__(self):\n super(Analyzer, self).__init__()\n self.mongoDbDriver = InternalServer.get_mongodb_driver()\n self.dockerDriver = InternalServer.get_docker_driver()\n\n # Evaluate image from image name or container id\n def evaluate_image(self, image_name, container_id):\n # -- Static analysis\n # Get OS packages\n if image_name: # Scans the docker image\n os_packages = os_info_extractor.get_soft_from_docker_image(self.dockerDriver, image_name)\n else: # Scans the docker container\n os_packages = os_info_extractor.get_soft_from_docker_container_id(self.dockerDriver, container_id)\n image_name = self.dockerDriver.get_docker_image_name_by_container_id(container_id)\n # Get programming language dependencies\n dependencies = dep_info_extractor.get_dependencies_from_docker_image(self.dockerDriver, image_name)\n\n # -- Prepare output\n data = {}\n data['image_name'] = image_name\n data['timestamp'] = datetime.datetime.now().timestamp()\n data['status'] = 'Completed'\n data['static_analysis'] = self.generate_static_analysis(os_packages, dependencies)\n\n # -- Return\n return data\n\n # Generates the result of the static analysis\n def generate_static_analysis(self, os_packages, dependencies):\n data = {}\n data['os_packages'] = self.generate_os_report(os_packages)\n data['prog_lang_dependencies'] = self.generate_dependencies_report(dependencies)\n return data\n\n # Generates dependencies report\n def generate_dependencies_report(self, dependencies):\n data = {}\n dep_details = {}\n dep_details['java'] = []\n dep_details['python'] = []\n dep_details['nodejs'] = []\n dep_details['js'] = []\n dep_details['ruby'] = []\n dep_details['php'] = []\n for dependency in dependencies:\n d = {}\n splitted_dep = dependency.split(\"#\")\n d['product'] = splitted_dep[1]\n d['version'] = splitted_dep[2]\n d['vulnerabilities'] = self.get_vulnerabilities(d['product'], d['version'])\n dep_details[splitted_dep[0]].append(d)\n # Prepare output\n data['vuln_dependencies'] = len(dep_details['java']) + len(dep_details['python']) + \\\n len(dep_details['nodejs']) + len(dep_details['js']) + \\\n len(dep_details['ruby']) + len(dep_details['php'])\n data['dependencies_details'] = dep_details\n # Return\n return data\n\n # Generates os report\n def generate_os_report(self, os_packages):\n data = {}\n products_status = []\n vuln_products = 0\n for package in os_packages:\n p = {}\n p['product'] = package['product']\n p['version'] = package['version']\n p['vulnerabilities'] = self.get_vulnerabilities(package['product'], package['version'])\n if len(p['vulnerabilities']) > 0:\n p['is_vulnerable'] = True\n vuln_products += 1\n else:\n p['is_vulnerable'] = False\n products_status.append(p)\n # Prepare output\n data['total_os_packages'] = len(products_status)\n data['vuln_os_packages'] = vuln_products\n data['ok_os_packages'] = data['total_os_packages'] - vuln_products\n data['os_packages_details'] = products_status\n # Return\n return data\n\n # Gets vulnerabilities by product and version\n def get_vulnerabilities(self, product, version):\n return self.mongoDbDriver.get_vulnerabilities(product, version)\n","sub_path":"dagda/analysis/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"533977309","text":"import re\n\n\ndef format_mac(mac):\n if not mac:\n return '0000.0000.0000'\n mac = re.sub('[.:-]', '', mac).lower().split() # remove delimiters and convert to lower case\n mac = ''.join(mac.pop(0).split()) # remove whitespaces\n\n try:\n assert len(mac) == 12 # length should be now exactly 12 (eg. 008041aefd7e)\n except AssertionError:\n print(\"Invalid MAC: '{0}' Ignoring\".format(mac))\n return '0000.0000.0000'\n assert mac.isalnum() # should only contain letters and numbers\n # convert mac in canonical form (eg. 00:80:41:ae:fd:7e)\n mac = \":\".join([\"%s\" % (mac[i:i+2]) for i in range(0, 12, 2)])\n return mac\n\n","sub_path":"IT Management/library/formaters.py","file_name":"formaters.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637267506","text":"from conans import ConanFile, CMake, tools\nimport os\n\n\nclass OpencvConan(ConanFile):\n name = \"opencv\"\n version = \"3.4.2\"\n license = \"\"\n homepage = \"https://github.com/opencv/opencv\"\n url = \"https://github.com/labviros/is-packages\"\n description = \"\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_zlib\": [True, False],\n \"with_jpeg\": [True, False],\n \"with_png\": [True, False],\n \"with_tiff\": [True, False],\n \"with_qt\": [True, False],\n \"with_tbb\": [True, False],\n \"with_ffmpeg\": [True, False],\n \"with_lapack\": [True, False]\n }\n default_options = (\"shared=True\", \"fPIC=True\", \"with_zlib=True\", \"with_jpeg=True\",\n \"with_png=True\", \"with_tiff=True\", \"with_qt=False\", \"with_tbb=True\",\n \"with_ffmpeg=True\", \"with_lapack=True\")\n generators = \"cmake\"\n\n def requirements(self):\n if self.options.with_zlib:\n self.requires(\"zlib/1.2.11@conan/stable\")\n if self.options.with_jpeg:\n self.requires(\"libjpeg-turbo/1.5.2@bincrafters/stable\")\n if self.options.with_png:\n self.requires(\"libpng/1.6.34@bincrafters/stable\")\n if self.options.with_tiff:\n self.requires(\"libtiff/4.0.9@bincrafters/stable\")\n if self.options.with_tbb:\n self.requires(\"TBB/4.4.4@conan/stable\")\n\n def system_requirements(self):\n dependencies = []\n if self.options.with_ffmpeg:\n dependencies.extend([\n \"libavdevice-dev\", \"libavfilter-dev\", \"libavcodec-dev\",\n \"libavformat-dev\", \"libavresample-dev\", \"libswscale-dev\"\n ])\n\n if self.options.with_lapack:\n dependencies.extend(\n [\"libopenblas-dev\", \"liblapack-dev\", \"liblapacke-dev\"])\n\n if self.options.with_qt:\n dependencies.extend([\"qtbase5-dev\"])\n\n if dependencies:\n installer = tools.SystemPackageTool()\n installer.update() # Update the package database\n installer.install(\" \".join(dependencies)) # Install the package\n\n def configure(self):\n if self.options.with_zlib:\n self.options[\"zlib\"].shared = False\n if self.options.with_jpeg:\n self.options[\"libjpeg\"].shared = self.options.shared\n if self.options.with_png:\n self.options[\"libpng\"].shared = self.options.shared\n if self.options.with_tiff:\n self.options[\"libtiff\"].shared = self.options.shared\n if self.options.with_tbb:\n self.options[\"TBB\"].shared = self.options.shared\n\n def source(self):\n url, version = self.homepage, self.version\n tools.get(\"{}/archive/{}.tar.gz\".format(url, version))\n extracted_dir = self.name + \"-\" + version\n os.rename(extracted_dir, self.name)\n\n tools.get(\"{}_contrib/archive/{}.tar.gz\".format(url, version))\n extracted_dir = self.name + \"_contrib-\" + version\n os.rename(extracted_dir, \"{0}/{0}_contrib\".format(self.name))\n\n tools.replace_in_file(\n \"opencv/CMakeLists.txt\", \"project(OpenCV CXX C)\", '''project(OpenCV CXX C)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()''')\n\n def build(self):\n cmake = CMake(self)\n if not self.options.shared:\n cmake.definitions[\"CMAKE_POSITION_INDEPENDENT_CODE\"] = self.options.fPIC\n cmake.definitions[\"OPENCV_EXTRA_MODULES_PATH\"] = \"opencv/opencv_contrib/modules\"\n cmake.definitions[\"BUILD_EXAMPLES\"] = \"OFF\"\n cmake.definitions[\"BUILD_DOCS\"] = \"OFF\"\n cmake.definitions[\"BUILD_TESTS\"] = \"OFF\"\n cmake.definitions[\"BUILD_PERF_TESTS\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_apps\"] = \"OFF\"\n cmake.definitions[\"BUILD_ZLIB\"] = \"OFF\"\n cmake.definitions[\"BUILD_JPEG\"] = \"OFF\"\n cmake.definitions[\"BUILD_PNG\"] = \"OFF\"\n cmake.definitions[\"BUILD_TIFF\"] = \"OFF\"\n cmake.definitions[\"BUILD_JASPER\"] = \"OFF\"\n cmake.definitions[\"BUILD_PROTOBUF\"] = \"OFF\"\n cmake.definitions[\"BUILD_JAVA\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_apps\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_java\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_java_bindings_generator\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_python2\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_python3\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_python_bindings_generator\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_dnn\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_dnn_modern\"] = \"OFF\"\n cmake.definitions[\"BUILD_opencv_tracking\"] = \"OFF\"\n\n cmake.definitions[\"WITH_ZLIB\"] = self.options.with_zlib\n cmake.definitions[\"WITH_JPEG\"] = self.options.with_jpeg\n cmake.definitions[\"WITH_PNG\"] = self.options.with_png\n cmake.definitions[\"WITH_TIFF\"] = self.options.with_tiff\n cmake.definitions[\"WITH_QT\"] = self.options.with_qt\n cmake.definitions[\"WITH_TBB\"] = self.options.with_tbb\n cmake.definitions[\"WITH_FFMPEG\"] = self.options.with_ffmpeg\n cmake.definitions[\"WITH_LAPACK\"] = self.options.with_lapack\n\n cmake.definitions[\"WITH_IPP\"] = \"ON\"\n cmake.definitions[\"WITH_OPENMP\"] = \"ON\"\n cmake.definitions[\"WITH_WEBP\"] = \"OFF\"\n cmake.definitions[\"WITH_JASPER\"] = \"OFF\"\n\n cmake.configure(source_folder=\"opencv\")\n cmake.build()\n cmake.install()\n\n def package(self):\n self.copy(\n pattern=\"*.a\",\n dst=\"lib\",\n src=\"3rdparty/ippicv/ippicv_lnx/lib/intel64/\",\n keep_path=False)\n self.copy(pattern=\"*.a\", dst=\"lib\", src=\"lib\", keep_path=False)\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=\"lib\", keep_path=False)\n\n def package_info(self):\n libs = [\n \"opencv_stitching\",\n \"opencv_superres\",\n \"opencv_videostab\",\n \"opencv_aruco\",\n \"opencv_bgsegm\",\n \"opencv_bioinspired\",\n \"opencv_ccalib\",\n \"opencv_datasets\",\n \"opencv_dpm\",\n \"opencv_face\",\n \"opencv_photo\",\n \"opencv_fuzzy\",\n \"opencv_hfs\",\n \"opencv_img_hash\",\n \"opencv_line_descriptor\",\n \"opencv_optflow\",\n \"opencv_plot\",\n \"opencv_reg\",\n \"opencv_rgbd\",\n \"opencv_saliency\",\n \"opencv_stereo\",\n \"opencv_structured_light\",\n \"opencv_phase_unwrapping\",\n \"opencv_surface_matching\",\n \"opencv_xfeatures2d\",\n \"opencv_shape\",\n \"opencv_video\",\n \"opencv_ml\",\n \"opencv_ximgproc\",\n \"opencv_calib3d\",\n \"opencv_features2d\",\n \"opencv_highgui\",\n \"opencv_videoio\",\n \"opencv_flann\",\n \"opencv_xobjdetect\",\n \"opencv_imgcodecs\",\n \"opencv_objdetect\",\n \"opencv_xphoto\",\n \"opencv_imgproc\",\n \"opencv_core\",\n ]\n\n if self.options.with_ffmpeg:\n libs.extend([\"avformat\", \"avcodec\", \"avdevice\",\n \"avresample\", \"avutil\", \"swscale\"])\n\n if self.options.with_qt:\n libs.extend([\"opencv_cvv\"])\n\n if self.options.with_lapack:\n libs.extend([\"lapacke\", \"lapack\", \"blas\"])\n\n libs.extend([\"pthread\", \"dl\", \"IlmImf\",\n \"ittnotify\", \"ippiw\", \"ippicv\"])\n\n self.cpp_info.libs = libs\n","sub_path":"opencv/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":7592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"283055846","text":"\"\"\"\n\nProject: FTIRDB\nFile: routes.py\n\nVersion: v1.0\nDate: 10.09.2018\nFunction: provide the web address route structures\n\nThis program is released under the GNU Public Licence (GPL V3)\n\n--------------------------------------------------------------------------\nDescription:\n============\nThese routes are used to direct user to the correct views\n\n\n\"\"\"\n# import all required libraries\nfrom pyramid.httpexceptions import (\n HTTPNotFound,\n HTTPFound,\n)\nfrom pyramid.security import (\n Allow,\n Everyone,\n)\n\n#******************************************\n\n# import the models \nfrom .models import FTIRModel, User, atr, chemicals, data_aquisition, depositor, dried_film, experiment, experimental_conditions, fourier_transform_processing, gas, liquid, molecular_composition, molecule, not_atr, post_processing_and_deposited_spectra, project, protein, publication, sample, solid, spectra, spectrometer, state_of_sample\n\n\n\ndef includeme(config):\n \"\"\"Direct web address to correct page and python views \"\"\"\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('view_wiki', '/')\n config.add_route('searchdb','/searchdb')\n config.add_route('form','/form')\n config.add_route('results','/results/{results}')\n config.add_route('graph','/graph')\n config.add_route('about', '/about')\n config.add_route('jcampupload', '/jcampupload')\n config.add_route('upload', '/upload')\n config.add_route('login', '/login')\n config.add_route('logout', '/logout')\n config.add_route('add_account','/add_account')\n config.add_route('add_page', '/add_page')\n config.add_route('userArea','/{user}/userArea', factory=user_factory)\n config.add_route('view_page', '/{pagename}', factory=page_factory)\n config.add_route('edit_page', '/{pagename}/edit_page',\n factory=page_factory)\n\ndef new_page_factory(request):\n pagename = request.matchdict['pagename']\n if request.dbsession.query(FTIRModel).filter_by(name=pagename).count() > 0:\n next_url = request.route_url('edit_page', pagename=pagename)\n raise HTTPFound(location=next_url)\n return NewPage(pagename)\n\ndef user_factory(request):\n user = request.matchdict['user']\n page = request.dbsession.query(User).filter_by(name=user).first()\n if page is None:\n raise HTTPNotFound\n return PageResource(page)\n\n\nclass NewPage(object):\n def __init__(self, pagename):\n self.pagename = pagename\n\n def __acl__(self):\n return [\n (Allow, 'role:editor', 'create'),\n (Allow, 'role:basic', 'create'),\n ]\n\ndef page_factory(request):\n pagename = request.matchdict['pagename']\n page = request.dbsession.query(FTIRModel).filter_by(name=pagename).first()\n if page is None:\n raise HTTPNotFound\n return PageResource(page)\n\nclass PageResource(object):\n def __init__(self, page):\n self.page = page\n\n def __acl__(self):\n return [\n (Allow, Everyone, 'view'),\n (Allow, Everyone, 'edit'),\n (Allow, Everyone, 'edit'),\n ]\n","sub_path":"ftirdb/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"344846564","text":"\"\"\"\n.. _tutorials_native-api-basics:\n\nNative API: Basics\n==================\n\nFirst, let's consider what it looks like to train a very simple model on MNIST\nusing ``tf.keras``, taken directly from `TensorFlow documentation\n`_.\n\"\"\"\n\nimport tensorflow as tf\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n)\nmodel.compile(\n tf.keras.optimizers.Adam(name='Adam'),\n loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\nmodel.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=1)\n\n###############################################################################\n#\n# Here is what it looks like to train the exact same model using the Native API\n# to launch an experiment on a Determined cluster.\n\nimport tensorflow as tf\n\nimport determined as det\nfrom determined import experimental\nfrom determined.experimental.keras import init\n\nconfig = {\n \"searcher\": {\"name\": \"single\", \"metric\": \"val_acc\", \"max_length\": {\"batches\": 500}},\n \"hyperparameters\": {\"global_batch_size\": 32},\n}\n\n# When running this code from a notebook, add a `command` argument to init()\n# specifying the notebook file name.\ncontext = init(config, context_dir=\".\")\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n)\nmodel = context.wrap_model(model)\nmodel.compile(\n tf.keras.optimizers.Adam(name='Adam'),\n loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\nmodel.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)\n\n###############################################################################\n#\n# Paste the code above into a Python file named ``tf_keras_native.py`` and run\n# it as a Python script.\n#\n# .. note::\n#\n# Before submitting any experiments using the Native API, make sure the\n# :ref:`DET_MASTER environment variable is configured to connect to the\n# appropriate IP address `.\n#\n# .. code:: bash\n#\n# $ python tf_keras_native.py\n#\n# You can also use any environment that supports Python to launch an experiment\n# with this code, such as a Jupyter notebook or an IDE.\n#\n# Let's walk through some of the concepts introduced by the Native API.\n#\n# Configuration\n# -------------\n\nconfig = {\n \"searcher\": {\"name\": \"single\", \"metric\": \"val_acc\", \"max_length\": {\"batches\": 500}},\n \"hyperparameters\": {\"global_batch_size\": 16},\n}\n\n###############################################################################\n#\n# Configuring any experiment for use with Determined requires an\n# :ref:`experiment-configuration`. In the Native API, this is represented as a\n# Python dictionary. There are two *required* fields for every configuration\n# submitted via the Native API:\n#\n# ``searcher``:\n# This field describes how many different :ref:`Trials `\n# (models) should be trained. In this case, we've specified to\n# train a ``\"single\"`` model for 500 batches.\n# ``hyperparameters``:\n# This field describes the hyperparameters used. ``global_batch_size`` is\n# a required hyperparameter for every experiment -- we'll revisit this\n# requirement in :ref:`tutorials_native-api-dtrain`.\n#\n# Context\n# -------\n#\n# .. code:: python\n#\n# context = init(config, local=False, test=False, context_dir=\".\")\n#\n# :ref:`keras-init` is the function that initializes the Determined training\n# context. We can think of it as the moment in the training script where\n# Determined will \"assume control\" of the execution of your code. It has two\n# three in addition to the configuration:\n#\n# ``local`` (``bool``):\n# ``local=False`` will submit the experiment to a Determined cluster.\n# ``local=True`` will execute the training loop in your local Python\n# environment (although currently, local training is not implemented, so\n# you must also set ``test=True``). Defaults to False.\n#\n# ``test`` (``bool``):\n# ``test=True`` will execute a minimal training loop rather than a full\n# experiment. This can be useful for porting or debugging a model because\n# many common errors will surface quickly. Defaults to False.\n#\n# ``context_dir`` (``str``):\n# Specifies the location of the code you want submitted to the cluster.\n# This is required by Determined to execute your training script in a\n# remote environment (``local=False``). In the common case, \".\" submits\n# your entire working directory to the Determined cluster.\n#\n# Wrap Model (``tf.keras`` only)\n# ------------------------------\n#\n# .. code:: python\n#\n# model = context.wrap_model(model)\n#\n# In the case of ``tf.keras``, we will need to use the ``wrap_model`` API to\n# make the Determined context aware of the model we want to train with. After\n# calling ``wrap_model``, we proceed with the ``compile()`` and ``fit()``\n# interfaces defined by TensorFlow to begin training our model remotely.\n#\n# Next Steps\n# ----------\n#\n# * :ref:`tutorials_native-api-hparam-search`\n# * :ref:`tutorials_native-api-dtrain`\n","sub_path":"examples/tutorials/native-tf-keras/tf_keras_native.py","file_name":"tf_keras_native.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436001690","text":"# -*- encoding: utf-8 -*-\r\n##############################################################################\r\n#\r\n# OpenERP, Open Source Management Solution \r\n# Copyright (C) 2004-2009 Tiny SPRL (). All Rights Reserved\r\n# $Id$\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n#\r\n##############################################################################\r\n\r\n\r\nfrom openerp.osv import fields, osv, orm\r\nimport openerp.addons.decimal_precision as dp\r\nfrom openerp import netsvc\r\nfrom openerp import pooler\r\nfrom datetime import datetime\r\n\r\nclass formulir_1111_ab(osv.osv):\r\n _name = 'pajak.formulir_1111_ab'\r\n _description = 'Formulir 1111 AB'\r\n _inherit = ['mail.thread']\r\n \r\n def default_state(self, cr, uid, context={}):\r\n return 'draft'\r\n \r\n def default_name(self, cr, uid, context={}):\r\n return '/'\r\n \r\n def default_created_time(self, cr, uid, context={}):\r\n #TODO: Ticket #79\r\n return datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n \r\n def default_created_user_id(self, cr, uid, context={}):\r\n return uid\r\n\r\n def function_amount_all(self, cr, uid, ids, name, args, context=None):\r\n #TODO: Ticket #81\r\n res = {}\r\n itemA1 = 0.00\r\n item1B1_dpp = 0.00\r\n item1B1_ppn = 0.00\r\n item1B1_ppnbm = 0.00\r\n item2A_dpp = 0.00\r\n item2A_ppn = 0.00\r\n item2A_ppnbm = 0.00\r\n item2B_dpp = 0.00\r\n item2B_ppn = 0.00\r\n item2B_ppnbm = 0.00\r\n item2C_dpp = 0.00\r\n item2C_ppn = 0.00\r\n item2C_ppnbm = 0.00\r\n\r\n for formulir in self.browse(cr, uid, ids):\r\n res[formulir.id] = {\r\n 'itemA1' : 0.0, # Diambil dari formulir 1111 A.1\r\n 'item1B1_dpp' : 0.0, # Diambil dari formulir 1111 A.2\r\n 'item1B1_ppn' : 0.0, # Diambil dari formulir 1111 A.2\r\n 'item1B1_ppnbm' : 0.0, # Diambil dari formulir 1111 A.2\r\n 'item2A_dpp' : 0.0, # Diambil dari formulir 1111 B.1\r\n 'item2A_ppn' : 0.0, # Diambil dari formulir 1111 B.1\r\n 'item2A_ppnbm' : 0.0, # Diambil dari formulir 1111 B.1\r\n 'item2B_dpp' : 0.0, #Diambil dari formulir 1111 B.2\r\n 'item2B_ppn' : 0.0, # Diambil dari formulir 1111 B.2\r\n 'item2B_ppnbm' : 0.0, # Diambil dari formulir 1111 B.2\r\n 'item2C_dpp' : 0.0, # Diambil dari formulir 1111 B.3\r\n 'item2C_ppn' : 0.0, # Diambil dari formulir 1111 B.3\r\n 'item2C_ppnbm' : 0.0, # Diambil dari formulir 1111 B.3\r\n }\r\n return res\r\n\r\n\r\n \r\n _columns = {\r\n 'name' : fields.char(string='# SPT', size=30, required=True, readonly=True),\r\n 'company_id' : fields.many2one(string='Perusahaan', obj='res.company', required=True),\r\n 'nama_pkp' : fields.char(string='Nama PKP', size=100, required=True),\r\n 'npwp' : fields.char(string='NPWP', size=50, required=True),\r\n 'masa_pajak_id' : fields.many2one(string='Masa Pajak', obj='pajak.masa_pajak', required=True),\r\n 'pembetulan_ke' : fields.integer(string='Pembetulan Ke', required=True),\r\n 'item1A' : fields.function(fnct=function_amount_all, type='float', string='A. Ekspor BKP Berwujud/BKP Tidak Berwujud/JKP', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item1B1_dpp' : fields.function(fnct=function_amount_all, type='float', string='1. Penyerahan Dalam Negeri dengan Faktur Pajak yang tidak ditanggung', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item1B1_ppn' : fields.function(fnct=function_amount_all, type='float', string='1. Penyerahan Dalam Negeri dengan Faktur Pajak yang tidak ditanggung', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item1B1_ppnbm' : fields.function(fnct=function_amount_all, type='float', string='1. Penyerahan Dalam Negeri dengan Faktur Pajak yang tidak ditanggung', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item1B2_dpp' : fields.float(string='2. Penyerahan Dalam Negeri dengan Faktur Pajak yang ditanggung', digits_compute=dp.get_precision('Account')),\r\n 'item1B2_ppn' : fields.float(string='2. Penyerahan Dalam Negeri dengan Faktur Pajak yang ditanggung', digits_compute=dp.get_precision('Account')),\r\n 'item1B2_ppnbm' : fields.float(string='2. Penyerahan Dalam Negeri dengan Faktur Pajak yang ditanggung', digits_compute=dp.get_precision('Account')),\r\n 'item1C1_dpp' : fields.float(string='1. Penyerahan yang PPN atau PPN dan PPnBM-nya harus dipunggut sendiri', digits_compute=dp.get_precision('Account')),\r\n 'item1C1_ppn' : fields.float(string='1. Penyerahan yang PPN atau PPN dan PPnBM-nya harus dipunggut sendiri', digits_compute=dp.get_precision('Account')),\r\n 'item1C1_ppnbm' : fields.float(string='1. Penyerahan yang PPN atau PPN dan PPnBM-nya harus dipunggut sendiri', digits_compute=dp.get_precision('Account')), \r\n 'item1C2_dpp' : fields.float(string='2.Penyerahan yang PPN atau PPN dan PPnBM-nya harus dipunggut oleh pemungut PPN', digits_compute=dp.get_precision('Account')),\r\n 'item1C2_ppn' : fields.float(string='2.Penyerahan yang PPN atau PPN dan PPnBM-nya harus dipunggut oleh pemungut PPN', digits_compute=dp.get_precision('Account')),\r\n 'item1C2_ppnbm' : fields.float(string='2.Penyerahan yang PPN atau PPN dan PPnBM-nya harus dipunggut oleh pemungut PPN', digits_compute=dp.get_precision('Account')),\r\n 'item1C3_dpp' : fields.float(string='3.Penyerahan yang PPN atau PPN dan PPnBM-nya tidak dipunggut', digits_compute=dp.get_precision('Account')),\r\n 'item1C3_ppn' : fields.float(string='3.Penyerahan yang PPN atau PPN dan PPnBM-nya tidak dipunggut', digits_compute=dp.get_precision('Account')),\r\n 'item1C3_ppnbm' : fields.float(string='3.Penyerahan yang PPN atau PPN dan PPnBM-nya tidak dipunggut', digits_compute=dp.get_precision('Account')), \r\n 'item1C4_dpp' : fields.float(string='4. Penyerahan yang dibebaskan dari pengenaan PPN atau PPN dan PPnBM', digits_compute=dp.get_precision('Account')),\r\n 'item1C4_ppn' : fields.float(string='4. Penyerahan yang dibebaskan dari pengenaan PPN atau PPN dan PPnBM', digits_compute=dp.get_precision('Account')),\r\n 'item1C4_ppnbm' : fields.float(string='4. Penyerahan yang dibebaskan dari pengenaan PPN atau PPN dan PPnBM', digits_compute=dp.get_precision('Account')), \r\n 'item2A_dpp' : fields.function(fnct=function_amount_all, type='float', string='Item II.A', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2A_ppn' : fields.function(fnct=function_amount_all, type='float', string='Item II.A', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2A_ppnbm' : fields.function(fnct=function_amount_all, type='float', string='Item II.A', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2B_dpp' : fields.function(fnct=function_amount_all, type='float', string='Item II.B', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2B_ppn' : fields.function(fnct=function_amount_all, type='float', string='Item II.B', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2B_ppnbm' : fields.function(fnct=function_amount_all, type='float', string='Item II.B', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2C_dpp' : fields.function(fnct=function_amount_all, type='float', string='Item II.C', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2C_ppn' : fields.function(fnct=function_amount_all, type='float', string='Item II.C', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2C_ppnbm' : fields.function(fnct=function_amount_all, type='float', string='Item II.C', digits_compute=dp.get_precision('Account'), method=True, store=True, multi='all'), #TODO: Ticket #80\r\n 'item2D_dpp' : fields.float(string='Item II.D', digits_compute=dp.get_precision('Account')),\r\n 'item2D_ppn' : fields.float(string='Item II.D', digits_compute=dp.get_precision('Account')),\r\n 'item2D_ppnbm' : fields.float(string='Item II.D', digits_compute=dp.get_precision('Account')),\r\n 'item3A' : fields.float(string='Item III.A', digits_compute=dp.get_precision('Account')),\r\n 'item3B1' : fields.float(string='Item III.B.1', digits_compute=dp.get_precision('Account')),\r\n 'item3B2' : fields.float(string='Item III.B.2', digits_compute=dp.get_precision('Account')),\r\n 'item3B3' : fields.float(string='Item III.B.3', digits_compute=dp.get_precision('Account')),\r\n 'item3B4' : fields.float(string='Item III.B.4', digits_compute=dp.get_precision('Account')),\r\n 'item3C' : fields.float(string='Item III.C', digits_compute=dp.get_precision('Account')), \r\n 'formulir_a1_id' : fields.many2one(string='Formulir 1111 A.1', obj='pajak.formulir_1111_a1'),\r\n 'formulir_a2_id' : fields.many2one(string='Formulir 1111 A.2', obj='pajak.formulir_1111_a2'),\r\n 'formulir_b1_id' : fields.many2one(string='Formulir 1111 B.1', obj='pajak.formulir_1111_b1'),\r\n 'formulir_b2_id' : fields.many2one(string='Formulir 1111 B.2', obj='pajak.formulir_1111_b2'),\r\n 'formulir_b3_id' : fields.many2one(string='Formulir 1111 B.3', obj='pajak.formulir_1111_b3'),\r\n 'note' : fields.text(string='Note'),\r\n 'state' : fields.selection([('draft','Draft'),('confirm','Waiting For Approval'),('approve','Ready To Process'),('done','Done'),('cancel','Cancel')], 'Status', readonly=True),\r\n 'created_time' : fields.datetime(string='Created Time', readonly=True),\r\n 'created_user_id' : fields.many2one(string='Created By', obj='res.users', readonly=True),\r\n 'confirmed_time' : fields.datetime(string='Confirmed Time', readonly=True),\r\n 'confirmed_user_id' : fields.many2one(string='Confirmed By', obj='res.users', readonly=True), \r\n 'approved_time' : fields.datetime(string='Approved Time', readonly=True),\r\n 'approved_user_id' : fields.many2one(string='Approved By', obj='res.users', readonly=True), \r\n 'processed_time' : fields.datetime(string='Processed Time', readonly=True),\r\n 'processed_user_id' : fields.many2one(string='Process By', obj='res.users', readonly=True), \r\n 'cancelled_time' : fields.datetime(string='Processed Time', readonly=True),\r\n 'cancelled_user_id' : fields.many2one(string='Process By', obj='res.users', readonly=True), \r\n 'cancelled_reason' : fields.text(string='Cancelled Reason', readonly=True),\r\n } \r\n \r\n _defaults = {\r\n 'name' : default_name,\r\n 'state' : default_state,\r\n 'created_time' : default_created_time,\r\n 'created_user_id' : default_created_user_id,\r\n }\r\n\r\n def workflow_action_confirm(self, cr, uid, ids, context={}):\r\n for id in ids:\r\n if not self.log_audit_trail(cr, uid, id, 'confirmed'):\r\n return False\r\n return True\r\n\r\n def workflow_action_approve(self, cr, uid, ids, context={}):\r\n for id in ids:\r\n if not self.log_audit_trail(cr, uid, id, 'approved'):\r\n return False\r\n return True \r\n \r\n def workflow_action_done(self, cr, uid, ids, context={}):\r\n for id in ids:\r\n if not self.log_audit_trail(cr, uid, id, 'procced'):\r\n return False\r\n return True \r\n \r\n def workflow_action_cancel(self, cr, uid, ids, context={}):\r\n for id in ids:\r\n if not self.log_audit_trail(cr, uid, id, 'cancelled'):\r\n return False\r\n return True \r\n \r\n def button_action_set_to_draft(self, cr, uid, ids, context={}):\r\n for id in ids:\r\n if not self.delete_workflow_instance(self, cr, uid, id):\r\n return False\r\n\r\n if not self.create_workflow_instance(self, cr, uid, id):\r\n return False\r\n \r\n return True\r\n\r\n \r\n def button_action_cancel(self, cr, uid, ids, context={}):\r\n wkf_service = netsvc.LocalService('workflow')\r\n for id in ids:\r\n if not self.delete_workflow_instance(self, cr, uid, id):\r\n return False\r\n\r\n if not self.create_workflow_instance(self, cr, uid, id):\r\n return False\r\n\r\n wkf_service.trg_validate(uid, 'pajak.formulir_1111_ab', id, 'button_cancel', cr)\r\n\r\n return True\r\n\r\n\r\n def write_cancel_description(self, cr, uid, id, reason):\r\n self.write(cr, uid, [id], {'cancelled_reason' : reason})\r\n return True\r\n\r\n def log_audit_trail(self, cr, uid, id, event):\r\n #TODO: Ticket #82\r\n if state not in ['created','confirmed','approved','processed','cancelled']:\r\n raise osv.except_osv(_('Peringatan!'),_('Error pada method log_audit'))\r\n return False\r\n\t\t\t\r\n state_dict = \t{\r\n 'created' : 'draft',\r\n 'confirmed' : 'confirm',\r\n 'approved' : 'approve',\r\n 'processed' : 'done',\r\n 'cancelled' : 'cancel'\r\n }\r\n \r\n val =\t{\r\n '%s_user_id' % (state) : uid ,\r\n '%s_time' % (state) : datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\r\n 'state' : state_dict.get(state, False),\r\n }\r\n \r\n self.write(cr, uid, [id], val)\r\n return True\r\n\r\n def clear_log_audit(self, cr, uid, id):\r\n #TODO: Ticket #83\r\n\r\n val =\t{\r\n 'created_user_id' : False,\r\n 'created_time' : False,\t\t\r\n 'confirmed_user_id' : False,\r\n 'confirmed_time' : False,\r\n 'approved_user_id' : False,\r\n 'approved_time' : False,\r\n 'processed_user_id' : False,\r\n 'processed_time' : False,\r\n 'cancelled_user_id' : False,\r\n 'cancelled_time' : False,\r\n }\r\n\t\t\t\r\n self.write(cr, uid, [id], val)\r\n\r\n return True\r\n \r\n\r\n def delete_workflow_instance(self, cr, uid, id):\r\n #TODO: Ticket #84\r\n\r\n wkf_service = netsvc.LocalService('workflow')\r\n wkf_service.trg_delete(uid, 'pajak.formulir_1111_ab', id, cr)\r\n\r\n return True\r\n\r\n def create_workflow_instance(self, cr, uid, id):\r\n #TODO: Ticket #85\r\n\r\n wkf_service = netsvc.LocalService('workflow')\r\n wkf_service.trg_create(uid, 'pajak.formulir_1111_ab', id, cr)\r\n\r\n return True\r\n\r\n def onchange_company_id(self, cr, uid, ids, company_id):\r\n #TODO: Ticket #86\r\n obj_res_company = self.pool.get('res.company')\r\n\r\n value = {}\r\n domain = {}\r\n warning = {}\r\n \r\n if company_id:\r\n npwp = obj_res_company.browse(cr, uid, company_id).partner_id.npwp\r\n value.update({'npwp' : npwp})\r\n\r\n return {'value' : value, 'domain' : domain, 'warning' : warning}\r\n\r\n def create_sequence(self, cr, uid, id):\r\n #TODO: Ticket #87\r\n obj_sequence = self.pool.get('ir.sequence')\r\n obj_res_company = self.pool.get('res.company')\r\n\r\n formulir_1111_ab = self.browse(cr, uid, [id])[0]\r\n\r\n if formulir_1111_ab.name == '/':\r\n if formulir_1111_ab.company_id.sequence_formulir_1111_ab.id:\r\n sequence = obj_sequence.next_by_id(cr, uid, formulir_1111_ab.company_id.sequence_formulir_1111_ab.id)\r\n self.write(cr, uid, [id], {'name' : sequence})\r\n else:\r\n raise osv.except_osv(_('Perigatan'),_('Sequence Formulir 1111 AB Belum Di-Set'))\r\n return False\r\n return True\r\n\r\nformulir_1111_ab()\r\n\r\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\r\n","sub_path":"object_module/formulir_1111_ab.py","file_name":"formulir_1111_ab.py","file_ext":"py","file_size_in_byte":18577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611977557","text":"import unittest\nimport longest_lines\n\nclass LongLineTestCase(unittest.TestCase):\n\n def test_quick_sort(self):\n input1 = [(4,5),(4,2),(7,1),(8,0),(4,3),(7,7)]\n output1 = [(8,0),(7,1),(4,2),(4,3),(4,5),(7,7)]\n longest_lines.quick_sort(input1, 0, len(input1) - 1)\n self.assertEqual(input1, output1)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(LongLineTestCase)\nunittest.TextTestRunner(verbosity=2).run(suite) \n","sub_path":"longest_lines/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234638331","text":"# -*- coding: utf-8 -*-\n\"\"\"User views.\"\"\"\nfrom flask import Blueprint, render_template, jsonify, request\nfrom flask_login import login_required\n\ndata_manager = Blueprint('data_manager', __name__)\n\n@data_manager.route(\"/credential\")\n@login_required\ndef credential():\n data_dict = {\n \"@context\": \"https://weidentity.webank.com/vc/v1\",\n \"id\": \"dsfewr23sdcsdfeqeddadfd\",\n \"type\": [\"Credential\", \"cpt100\"],\n \"issuer\": \"did:weid:1:0x2323e3e3dweweewew2www124151251\",\n \"issued\": \"2010-01-01T21:19:10Z\",\n \"claim\": {\n \"primeNumberIdx\":\"1234\"\n },\n \"revocation\": {\n \"id\": \"did:weid:1:2323e3e3dweweewew2\",\n \"type\": \"SimpleRevocationList2017\"\n },\n \"signature\": [{\n \"type\": \"LinkedDataSignature2015\",\n \"created\": \"2016-06-18T21:19:10Z\",\n \"creator\": \"did:weid:1:2323e3e3dweweewew2\",\n \"domain\": \"www.diriving_card.com\",\n \"nonce\": \"598c63d6\",\n \"signatureValue\": \"BavEll0/I1zpYw8XNi1bgVg/sCneO4Jugez8RwDg/+MCRVpjOboDoe4SxxKjkCOvKiCHGDvc4krqi6Z1n0UfqzxGfmatCuFibcC1wpsPRdW+gGsutPTLzvueMWmFhwYmfIFpbBu95t501+rSLHIEuujM/+PXr9Cky6Ed+W3JT24=\"\n }]\n }\n\n return jsonify(data_dict)\n\n@data_manager.route(\"/list_data\")\ndef list_data():\n return_msg = [{\n \"id\": \"0x931fe3032b84b426cd57ec47aec2a126\",\n \"addr\": \"b17ab4dcc3ea1eafd2c97b610a893326\",\n \"record\": \"贡献github代码\",\n \"owner\": \"队长\",\n \"datetime\": \"2020-06-05\",\n \"link_credential\": \"创新大赛证书\"\n },{\n \"id\": \"0x78b268ee3859885e104b423d7a54768\",\n \"addr\": \"f82ad4015e0770aed1ac2c636a563d26\",\n \"record\": \"撰写比赛ppt\",\n \"owner\": \"队长\",\n \"datetime\": \"2020-06-04\",\n \"link_credential\": \"创新大赛证书\"\n },\n ]\n return jsonify(return_msg)\n\n\n\n\n","sub_path":"SUIBE_DID_Data_Manager/blueprints/data_manager/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5178449","text":"import json\n\nfrom rest_framework import serializers\n\nfrom accounts.api.serializers import UserSerializer\nfrom profiles.models import BabysitterProfile, ParentProfile, RecommendationsOfSitter\nfrom accounts.models import CustomUser\n\n\nclass RecommendationsOfSitterSerializer(serializers.ModelSerializer):\n author_user = serializers.SerializerMethodField(read_only=True)\n uri = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = RecommendationsOfSitter\n fields = [\n 'recommendation',\n 'author_user',\n 'publish_data',\n 'uri',\n ]\n\n def get_author_user(self, obj):\n user = self.context.get(\"user\")\n return obj.get_author_user(user)\n\n def get_uri(self, obj):\n return \"users/user_detail/{pk}\".format(pk=obj.author.pk)\n\n\nclass BabysitterProfileSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n recommendations = RecommendationsOfSitterSerializer(read_only=True, many=True)\n\n class Meta:\n model = BabysitterProfile\n fields =[\n 'email',\n 'city',\n 'age',\n 'experienceYears',\n 'about',\n 'user',\n 'recommendations',\n\n ]\n\n def get_uri(self, obj):\n return \"users/user_detail/{pk}\".format(pk=obj.author.pk)\n\n\nclass ParentProfileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = ParentProfile\n fields =[\n 'user',\n 'city',\n 'kidsAge',\n 'about',\n ]\n\n\n\n","sub_path":"profiles/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"400839407","text":"\"\"\"\nZurich Instruments LabOne Python API Example\n\nDemonstrate how to connect to a Zurich Instruments HDAWG and\nuse the precompensation module to fit filter parameters for a\nmeasured signal\n\"\"\"\n\n# Copyright 2019 Zurich Instruments AG\n\nfrom __future__ import print_function\nimport time\nimport numpy as np\nimport zhinst.ziPython\n\n\ndef get_precompensated_signal(module_handle, input_signal, amplitude, timeconstant):\n \"\"\"\n Uploads the input_signal to the precompensationAdvisor module and returns the\n simulated forward transformed signal with an exponential filter(amplitude,timeconstant).\n \"\"\"\n module_handle.set('exponentials/0/amplitude', amplitude)\n module_handle.set('exponentials/0/timeconstant', timeconstant)\n module_handle.set(\"wave/input/inputvector\", input_signal)\n return np.array(module_handle.get(\"wave/output/forwardwave\", True)['/wave/output/forwardwave'][0]['x'])\n\n\ndef run_example(device_id, do_plot=True):\n \"\"\"\n Run the example: Connect to a Zurich Instruments HDAWG. The example uploads a signal to\n the precompensationAdvisor module and reads back the filtered signal. This functionality\n is used to feed a fitting algorithm for fitting filter parameters.\n\n Requirements:\n HDAWG\n\n\n Arguments:\n\n device_id (str): The ID of the device to run the example with. For\n example, `dev8050`.\n\n do_plot (bool, optional): Specify whether to plot the initial, target and fitted signals.\n\n\n See the \"LabOne Programming Manual\" for further help, available:\n - On Windows via the Start-Menu:\n Programs -> Zurich Instruments -> Documentation\n - On Linux in the LabOne .tar.gz archive in the \"Documentation\"\n sub-folder.\n \"\"\"\n # Settings\n apilevel_example = 6 # The API level supported by this example.\n err_msg = \"This example can only be ran on an HDAWG.\"\n # Call a zhinst utility function that returns:\n # - an API session `daq` in order to communicate with devices via the data server.\n # - the device ID string that specifies the device branch in the server's node hierarchy.\n # - the device's discovery properties.\n (daq, device, _) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='HDAWG',\n required_err_msg=err_msg)\n zhinst.utils.api_server_version_check(daq)\n\n # Create a base configuration: Disable all available outputs, awgs, demods, scopes,...\n zhinst.utils.disable_everything(daq, device)\n\n pre = daq.precompensationAdvisor()\n\n sampling_rate = 2.4e9\n\n x, target_signal = generate_target_signal(sampling_rate=sampling_rate)\n actual_signal = generate_actual_signal(target_signal, sampling_rate=sampling_rate)\n\n # prepare the precompensationAdvisor module\n pre.set(\"exponentials/0/enable\", 1)\n pre.set(\"wave/input/source\", 3)\n pre.set(\"device\", device_id)\n daq.setDouble(\"/\" + device_id + \"/system/clocks/sampleclock/freq\", sampling_rate)\n # a short pause is needed for the precompensationAdvisor module to read\n # the updated the sampling rate from the device node\n time.sleep(0.05)\n sampling_rate = pre.getDouble(\"samplingfreq\")\n\n # Fitting the parameters\n from lmfit import Model\n gmodel = Model(get_precompensated_signal, independent_vars=['module_handle', 'input_signal'])\n result = gmodel.fit(target_signal,\n input_signal=actual_signal,\n module_handle=pre,\n amplitude=0.,\n timeconstant=1e-4,\n fit_kws={'epsfcn': 1e-3}) # 'epsfcn' is needed as filter parameters are discretized\n # in precompensationAdvisor module, otherwise fitting will\n # not converge\n\n print(result.fit_report())\n if do_plot:\n import matplotlib.pyplot as plt\n plt.plot(x, result.init_fit, 'k', label='initial signal')\n plt.plot(x, result.best_fit, 'r', label='fitted signal')\n plt.plot(x, target_signal, 'b', label='target signal')\n plt.legend()\n plt.ticklabel_format(axis='both', style='sci', scilimits=(-2, 2))\n plt.xlabel(\"time [s]\")\n plt.ylabel(\"Amplitude\")\n plt.show()\n\n\ndef generate_target_signal(min_x=-96, max_x=5904, sampling_rate=2.4e9):\n \"\"\"Returns a step function with given length and sampling interval.\"\"\"\n x_values = np.array(range(min_x, max_x))\n x_values = [element/sampling_rate for element in x_values]\n signal2 = np.array(np.concatenate((np.zeros(-min_x), np.ones(max_x))))\n return x_values, signal2\n\n\ndef generate_actual_signal(initial_signal, amp=0.4, tau=100e-9, sampling_rate=2.4e9):\n \"\"\"\n generate \"actual signal\" through filtering the initial signal with\n an exponential filter and add noise\n \"\"\"\n from scipy import signal\n # calculate a and b from amplitude and tau\n alpha = 1 - np.exp(-1/(sampling_rate*tau*(1+amp)))\n if amp >= 0.0:\n k = amp/(1+amp-alpha)\n a = [(1-k + k*alpha), -(1-k)*(1-alpha)]\n else:\n k = -amp/(1+amp)/(1-alpha)\n a = [(1 + k - k*alpha), -(1+k)*(1-alpha)]\n b = [1, -(1-alpha)]\n\n distorted_signal = np.array(signal.lfilter(b, a, initial_signal) +\n 0.01 * np.random.normal(size=initial_signal.size))\n return distorted_signal\n","sub_path":"ZIUHFLI/zhinst/examples/hdawg/example_precompensation_curve_fit.py","file_name":"example_precompensation_curve_fit.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508706529","text":"# -*- coding: utf-8 -*-\n\"\"\"Analysis plugin to look up files in VirusTotal and tag events.\"\"\"\n\nfrom plaso.analysis import hash_tagging\nfrom plaso.analysis import logger\nfrom plaso.analysis import manager\nfrom plaso.lib import errors\n\n\nclass VirusTotalAnalyzer(hash_tagging.HTTPHashAnalyzer):\n \"\"\"Class that analyzes file hashes by consulting VirusTotal.\"\"\"\n\n _VIRUSTOTAL_API_REPORT_URL = (\n 'https://www.virustotal.com/vtapi/v2/file/report')\n\n _EICAR_SHA256 = (\n '275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f')\n\n SUPPORTED_HASHES = ['md5', 'sha1', 'sha256']\n\n def __init__(self, hash_queue, hash_analysis_queue, **kwargs):\n \"\"\"Initializes a VirusTotal analyzer.\n\n Args:\n hash_queue (Queue.queue): queue that contains hashes to be analyzed.\n hash_analysis_queue (Queue.queue): queue the analyzer will append\n HashAnalysis objects to.\n \"\"\"\n super(VirusTotalAnalyzer, self).__init__(\n hash_queue, hash_analysis_queue, **kwargs)\n self._api_key = None\n self._checked_for_old_python_version = False\n\n def _QueryHashes(self, digests):\n \"\"\"Queries VirusTotal for a specfic hashes.\n\n Args:\n digests (list[str]): hashes to look up.\n\n Returns:\n dict[str, object]: JSON response or None on error.\n \"\"\"\n url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}\n\n try:\n json_response = self.MakeRequestAndDecodeJSON(\n self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)\n except errors.ConnectionError as exception:\n json_response = None\n logger.error('Unable to query VirusTotal with error: {0!s}.'.format(\n exception))\n\n return json_response\n\n def Analyze(self, hashes):\n \"\"\"Looks up hashes in VirusTotal using the VirusTotal HTTP API.\n\n The API is documented here:\n https://developers.virustotal.com/reference\n\n Args:\n hashes (list[str]): hashes to look up.\n\n Returns:\n list[HashAnalysis]: analysis results.\n\n Raises:\n RuntimeError: If the VirusTotal API key has not been set.\n \"\"\"\n if not self._api_key:\n raise RuntimeError('No API key specified for VirusTotal lookup.')\n\n hash_analyses = []\n\n json_response = self._QueryHashes(hashes) or []\n\n # VirusTotal returns a dictionary when a single hash is queried\n # and a list when multiple hashes are queried.\n if isinstance(json_response, dict):\n json_response = [json_response]\n\n for result in json_response:\n resource = result['resource']\n hash_analysis = hash_tagging.HashAnalysis(resource, result)\n hash_analyses.append(hash_analysis)\n\n return hash_analyses\n\n def SetAPIKey(self, api_key):\n \"\"\"Sets the VirusTotal API key to use in queries.\n\n Args:\n api_key (str): VirusTotal API key\n \"\"\"\n self._api_key = api_key\n\n def TestConnection(self):\n \"\"\"Tests the connection to VirusTotal\n\n Returns:\n bool: True if VirusTotal is reachable.\n \"\"\"\n json_response = self._QueryHashes([self._EICAR_SHA256])\n return json_response is not None\n\n\nclass VirusTotalAnalysisPlugin(hash_tagging.HashTaggingAnalysisPlugin):\n \"\"\"An analysis plugin for looking up hashes in VirusTotal.\"\"\"\n\n # TODO: Check if there are other file types worth checking VirusTotal for.\n DATA_TYPES = ['pe:compilation:compilation_time']\n\n NAME = 'virustotal'\n\n _VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE = 0\n _VIRUSTOTAL_PRESENT_RESPONSE_CODE = 1\n _VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE = -2\n\n def __init__(self):\n \"\"\"Initializes a VirusTotal analysis plugin.\"\"\"\n super(VirusTotalAnalysisPlugin, self).__init__(VirusTotalAnalyzer)\n self._api_key = None\n\n def EnableFreeAPIKeyRateLimit(self):\n \"\"\"Configures Rate limiting for queries to VirusTotal.\n\n The default rate limit for free VirusTotal API keys is 4 requests per\n minute.\n \"\"\"\n self._analyzer.hashes_per_batch = 4\n self._analyzer.wait_after_analysis = 60\n self._analysis_queue_timeout = self._analyzer.wait_after_analysis + 1\n\n def GenerateLabels(self, hash_information):\n \"\"\"Generates a list of strings that will be used in the event tag.\n\n Args:\n hash_information (dict[str, object]): the JSON decoded contents of the\n result of a VirusTotal lookup, as produced by the VirusTotalAnalyzer.\n\n Returns:\n list[str]: strings describing the results from VirusTotal.\n \"\"\"\n response_code = hash_information['response_code']\n if response_code == self._VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE:\n return ['virustotal_not_present']\n\n if response_code == self._VIRUSTOTAL_PRESENT_RESPONSE_CODE:\n positives = hash_information['positives']\n if positives > 0:\n return ['virustotal_detections_{0:d}'.format(positives)]\n\n return ['virsutotal_no_detections']\n\n if response_code == self._VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE:\n return ['virustotal_analysis_pending']\n\n logger.error(\n 'VirusTotal returned unknown response code {0!s}'.format(\n response_code))\n return ['virustotal_unknown_response_code_{0:d}'.format(response_code)]\n\n def SetAPIKey(self, api_key):\n \"\"\"Sets the VirusTotal API key to use in queries.\n\n Args:\n api_key (str): VirusTotal API key\n \"\"\"\n self._analyzer.SetAPIKey(api_key)\n\n def TestConnection(self):\n \"\"\"Tests the connection to VirusTotal\n\n Returns:\n bool: True if VirusTotal is reachable.\n \"\"\"\n return self._analyzer.TestConnection()\n\n\nmanager.AnalysisPluginManager.RegisterPlugin(VirusTotalAnalysisPlugin)\n","sub_path":"plaso/analysis/virustotal.py","file_name":"virustotal.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"486390927","text":"import os\nimport json\n\nfrom document import Document\nfrom doc_id import DocID\nfrom doc_url import DocURL\n# for using bookkeeping.json\n\nBAD_EXTENSIONS = [\".jpg\", \".zip\", \".png\", \".css\"]\nBAD_FILES = [\"39/373\", \"35/269\", \"8/121\"]\n\n\ndef read_json(file_name):\n with open(file_name) as json_data:\n json_dict = json.load(json_data)\n return json_dict\n\n\ndef make_document(json_dict, count=None):\n doc_list = []\n for doc_id, doc_url in json_dict.items()[:count]:\n for ext in BAD_EXTENSIONS:\n if doc_url.endswith(ext):\n continue\n # if \".jpg\" in doc_url or \".zip\" in doc_url or \".png\" in doc_url or \".css\" in doc_url:\n # continue\n if \".txt\" in doc_url and \"Wumpus\" in doc_url:\n continue\n if doc_id in BAD_FILES:\n continue\n doc_list.append(Document(DocID(doc_id), DocURL(doc_url)))\n return doc_list\n\n# for iterating over WEBPAGES_RAW\n\n\ndef get_docs_in_dir(doc_dir):\n return [os.path.join(doc_dir, doc) for doc in os.listdir(doc_dir)]\n\n\ndef get_all_docs(directory):\n all_docs = []\n for sub_dir in os.listdir(directory):\n sub_dir = os.path.join(directory, sub_dir)\n if os.path.isdir(sub_dir):\n all_docs.append(get_docs_in_dir(sub_dir))\n return all_docs\n\n\nif __name__ == \"__main__\":\n\n json_dict = read_json(\"***REDACTED***\")\n for doc in make_document(json_dict, 10):\n print(doc)\n\n","sub_path":"Document/get_document.py","file_name":"get_document.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"493848449","text":"# coding: UTF-8\nimport discord\nimport settings\nimport commands\n\nclient = discord.Client()\n\nAPP_ENV = settings.APP_ENV\nAPP_ENV_PROD = 'prod'\nAPP_ENV_DEV = 'dev'\nBOT_TOKEN = settings.BOT_TOKEN\n# 開発検証用チャンネルのID\nTEST_CHANNEL_ID = 696641960047542333\n\n\ndef isLocal():\n return APP_ENV == APP_ENV_DEV\n\n\ndef isTestChannel(channel):\n return channel.id == TEST_CHANNEL_ID\n\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n\n@client.event\nasync def on_message(message):\n if message.author.bot:\n return\n if isLocal() and not isTestChannel(message.channel):\n return\n if not isLocal() and isTestChannel(message.channel):\n return\n if message.content == '/neko':\n await commands.neko(message.channel)\n if message.content == '/members':\n await commands.members(message.guild.members, message.author)\n if message.content == '/roles':\n await commands.roles(message.guild.roles, message.author)\n\nclient.run(BOT_TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"72802477","text":"from testtools import TestCase\nfrom testtools.matchers import raises\n\nfrom .retry import retry\nfrom . import Effect, ErrorIntent, FuncIntent, ConstantIntent\nfrom .testing import StubIntent, resolve_stubs\n\n\nConstant = lambda x: StubIntent(ConstantIntent(x))\n\n\nclass RetryTests(TestCase):\n\n def test_should_not_retry(self):\n \"\"\"retry raises the last error if should_retry returns False.\"\"\"\n result = retry(Effect(StubIntent(ErrorIntent(RuntimeError(\"oh no!\")))),\n lambda e: Effect(StubIntent(ConstantIntent(False))))\n self.assertThat(lambda: resolve_stubs(result),\n raises(RuntimeError(\"oh no!\")))\n\n def _repeated_effect_func(self, *funcs):\n \"\"\"\n Return an (impure) function which does different things based on the\n number of times it's been called.\n \"\"\"\n counter = [0]\n\n def func():\n count = counter[0]\n counter[0] += 1\n return funcs[count]()\n\n return func\n\n def test_retry(self):\n \"\"\"\n When should_retry returns an Effect of True, the func will be called\n again.\n \"\"\"\n func = self._repeated_effect_func(\n lambda: raise_(RuntimeError(\"foo\")),\n lambda: \"final\")\n result = retry(Effect(StubIntent(FuncIntent(func))),\n lambda e: Effect(StubIntent(ConstantIntent(True))))\n self.assertEqual(resolve_stubs(result), \"final\")\n\n def test_continue_retrying(self):\n \"\"\"\n should_retry is passed the exception information, and will be\n called until it returns False.\n \"\"\"\n\n func = self._repeated_effect_func(\n lambda: raise_(RuntimeError(\"1\")),\n lambda: raise_(RuntimeError(\"2\")),\n lambda: raise_(RuntimeError(\"3\")))\n\n def should_retry(e):\n return Effect(StubIntent(ConstantIntent(str(e[1]) != \"3\")))\n\n result = retry(Effect(StubIntent(FuncIntent(func))), should_retry)\n self.assertThat(lambda: resolve_stubs(result),\n raises(RuntimeError(\"3\")))\n\n\ndef raise_(exc):\n raise exc\n","sub_path":"effect/test_retry.py","file_name":"test_retry.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260004715","text":"# O(n * log(26)) ~ O(n)\nimport collections, heapq\ndef task_scheduler(tasks, n):\n count = collections.Counter(tasks)\n heap = [-val for val in count.values()]\n heapq.heapify(heap)\n ans = 0\n while heap:\n interval = n + 1\n temp = []\n while interval > 0 and heap:\n count = heapq.heappop(heap)\n count += 1\n temp.append(count)\n interval -= 1\n ans += 1\n\n for item in temp:\n if item < 0:\n heapq.heappush(heap, item)\n\n if not heap:\n break\n\n # if interval > 0, the machine will become idle\n ans += interval\n\n return ans\n\nprint(task_scheduler([\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"], 2))\n","sub_path":"621_task_scheduler.py","file_name":"621_task_scheduler.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"584603695","text":"import socket\n\nHOST = '192.168.1.245'\nPORT = 1234\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall('Connection establish'.encode())\n while True:\n data = s.recv(1024).decode()\n print('Received', data)\n if not data:\n break\n s.sendall(input('Enter: ').encode())\n \n\n","sub_path":"Level 3/05. Sockets/05. Multiplayer Online Game/Client 2.py","file_name":"Client 2.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"349870144","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport redactor.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Question', '0010_auto_20160331_1841'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='compositionanswer',\n name='text',\n field=redactor.fields.RedactorField(verbose_name=b'\\xd0\\xa2\\xd0\\xb5\\xd0\\xba\\xd1\\x81\\xd1\\x82'),\n ),\n migrations.AlterField(\n model_name='manychoiceanswer',\n name='text',\n field=redactor.fields.RedactorField(verbose_name=b'\\xd0\\xa2\\xd0\\xb5\\xd0\\xba\\xd1\\x81\\xd1\\x82'),\n ),\n migrations.AlterField(\n model_name='selfanswer',\n name='text',\n field=redactor.fields.RedactorField(verbose_name=b'\\xd0\\xa2\\xd0\\xb5\\xd0\\xba\\xd1\\x81\\xd1\\x82'),\n ),\n ]\n","sub_path":"Question/migrations/0011_auto_20160331_1842.py","file_name":"0011_auto_20160331_1842.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415342936","text":"\"\"\"Dla danego stringa x stwórz słownik przechowujący\ninformację, ile razy dana litera wystąpiła w stringu.\"\"\"\n\nx = \"myszydokazujągdykotanieczują\"\nletters = {}\n\nfor letter in x:\n if letters.get(letter):\n letters[letter] += 1\n else:\n letters[letter] = 1\n\nprint(letters)","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"4044590","text":"class ElementDoubleLinkList:\n def __init__(self, value: int):\n self.value: int = value\n self.previous: ElementDoubleLinkList = None\n self.next: ElementDoubleLinkList = None\n\n\nclass DoubleLinkList:\n def __init__(self):\n self.head: ElementDoubleLinkList = None\n\n def insert_front(self, element: ElementDoubleLinkList):\n if self.head is None:\n self.head = element\n else:\n old_head: ElementDoubleLinkList = self.head\n self.head = element\n self.head.next = old_head\n self.head.previous = None\n old_head.previous = self.head\n\n def insert_end(self, element: ElementDoubleLinkList):\n if self.head is None:\n self.head = element\n else:\n prev = None\n curr = self.head\n while curr:\n prev = curr\n curr = curr.next\n\n curr = element\n curr.previous = prev\n prev.next = curr\n curr.next = None\n","sub_path":"python/src/datastructures/double_link_list.py","file_name":"double_link_list.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"645996892","text":"import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfor fname in glob.glob('data/city*.csv'):\n year = fname[23:27]\n print('Loading data for year ', year)\n data = np.loadtxt(fname, delimiter=',')\n\n fig = plt.figure(figsize=(10.0, 3.0))\n\n axes1 = fig.add_subplot(1, 3, 1)\n axes2 = fig.add_subplot(1, 3, 2)\n axes3 = fig.add_subplot(1, 3, 3)\n\n axes1.set_ylabel('average')\n axes1.plot(np.mean(data, axis=0), 'rs')\n\n axes2.set_ylabel('max')\n axes2.plot(np.max(data, axis=0), 'b--')\n\n axes3.set_ylabel('min')\n axes3.plot(np.min(data, axis=0), 'go')\n\n fig.tight_layout()\n\nplt.show()","sub_path":"solutions/02-1a.py","file_name":"02-1a.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"549797978","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom foodsite.library import views\n# from api import ChefResource\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\n# chef_resource = ChefResource()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n (r'^index/', TemplateView.as_view(template_name=\"index.html\")),\n (r'^$', TemplateView.as_view(template_name=\"index.html\")),\n # (r'^chefs/gordon_ramsay', TemplateView.as_view(template_name=\"chefs/gordon_ramsay.html\")),\n (r'^about/', TemplateView.as_view(template_name=\"about.html\")),\n (r'^chefs/$', views.chefmain),\n (r'^recipes/$', views.recipemain),\n (r'^regions/$', views.regionmain),\n (r'^sochi/$', views.sochimain),\n (r'^recipes/(?P\\w+)/$', views.recipe),\n (r'^chefs/(?P\\w+)/$', views.chef),\n (r'^regions/(?P\\w+)/$', views.region),\n (r'^api/chef/(?P\\w+)?', views.get_chef),\n (r'^api/region/(?P\\w+)?', views.get_region),\n (r'^api/recipe/(?P\\w+)?', views.get_recipe),\n (r'^search/search_result/?', views.search),\n)","sub_path":"foodsite/foodsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"15901814","text":"# The Expat License\n#\n# Copyright (c) 2017, Shlomi Fish\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport re\nfrom subprocess import check_output\n\nif sys.version_info > (3,):\n long = int\n xrange = range\n\n\ndef count_primes_up_to(n):\n out = check_output([\"primesieve\", str(n), \"-c1\"])\n m = re.search(r'(?:Prime numbers|Primes)\\s*:\\s*([0-9]+)', out)\n return long(m.group(1))\n\n\ndef brute_force_calc_s(n):\n out = check_output([\"primesieve\", str(n), \"-p1\"])\n primes = [long(x) for x in out.split(\"\\n\") if len(x)]\n h1 = {}\n for x in primes:\n h1[x] = True\n s = len(h1.keys())\n h = h1\n for k in xrange(2, n):\n next_h = {}\n for num in h.keys():\n for p in primes:\n next_num = num + p\n if next_num <= n:\n next_h[next_num] = True\n s += len(next_h.keys())\n h = next_h\n return s\n\n\ndef calc_s(n):\n if n == 2:\n return len([2])\n if n == 3:\n return len([2, 3])\n if n == 5:\n return len([2, 3, 5, 2+2, 2+3])\n if n == 8:\n return len([2, 3, 5, 7, 2+2, 2+3, 2+5, 3+3, 3+5, 2+2+2, 2+2+3,\n 2+3+3, 2+2+2+2])\n # Calc s[k=1].\n s_1 = count_primes_up_to(n)\n # Calc s[k=2] for i odd.\n s_2 = count_primes_up_to((n if ((n & 0x1) == 1) else n-1)-2)-1\n # Calc the higher s-s for even numbers.\n top_even = (n & (~0x1))\n bottom_even = 4\n even_count_for_k_2 = ((top_even - bottom_even) >> 1) + 1\n even_s = ((even_count_for_k_2 * (1+even_count_for_k_2)) >> 1)\n\n # Calc the higher s-s for odd numbers.\n top_odd = (n if ((n & 0x1) == 1) else n-1)\n bottom_odd = 2+2+3\n odd_count_for_k_3 = ((top_odd - bottom_odd) >> 1) + 1\n odd_s = ((odd_count_for_k_3 * (1+odd_count_for_k_3)) >> 1)\n\n return s_1 + s_2 + even_s + odd_s\n\n\nfibs = [long(0), long(1)]\n\nwhile len(fibs) < 45:\n fibs.append(fibs[-1] + fibs[-2])\n\nprint(fibs)\n\n\ndef print_s(n):\n print((\"S[%d] = %d\" % (n, calc_s(n))))\n return\n\n\nprint_s(10)\nprint_s(100)\nprint_s(1000)\n\n\ndef check_print_s(n):\n calced = calc_s(n)\n real = brute_force_calc_s(n)\n print((\"S[%d] = Real = %d ; Calc = %d\" % (n, real, calced)))\n if (real != calced):\n raise BaseException\n return\n\n\ncheck_print_s(10)\ncheck_print_s(100)\ncheck_print_s(11)\ncheck_print_s(21)\ncheck_print_s(101)\nprint(\"Result = %d\" % (sum([calc_s(fibs[k]) for k in xrange(3, 45)])))\n","sub_path":"project-euler/543/euler_543_v1.py","file_name":"euler_543_v1.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"189697257","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 Elitumdevelop S.A, Ing. Mario Rangel\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).\n\nfrom odoo import api, fields, models\nfrom datetime import datetime, time\nfrom odoo.exceptions import ValidationError\n\n\nclass SectoralCode(models.Model):\n _name = 'eliterp.sectoral.code'\n\n _description = 'Código sectorial IESS'\n\n name = fields.Char('Código de cargo', size=13, required=True)\n\n _sql_constraints = [\n ('name_unique', 'unique(name)', 'EL Código de cargo ya existe en registros.'),\n ]\n\n\nclass TypeHistory(models.Model):\n _name = 'eliterp.type.history'\n\n _description = 'Tipo de historial'\n\n name = fields.Char('Nombre')\n\n\nclass TypeEquiment(models.Model):\n _name = 'eliterp.type.equipment'\n\n _description = 'Tipo de Equipos'\n\n name = fields.Char('Nombre')\n\n\nclass LinesHistoryEmployee(models.Model):\n _name = 'eliterp.lines.history.employee'\n\n _description = 'Líneas de documentos de empleado'\n\n type = fields.Many2one('eliterp.type.history', 'Tipo', required=True)\n date = fields.Date('Fecha de registro', default=fields.Date.context_today, required=True)\n comment = fields.Text('Comentarios')\n date_validity = fields.Date('Fecha de vigencia')\n employee_id = fields.Many2one('hr.employee', string='Empleado')\n adjunt = fields.Binary('Documento')\n adjunt_name = fields.Char('Nombre')\n\n\nclass LinesEquipmentEmployee(models.Model):\n _name = 'eliterp.lines.equipment.employee'\n\n _description = 'Líneas de equipos de empleado'\n\n type = fields.Many2one('eliterp.type.equipment', 'Tipo', required=True)\n date = fields.Date('Fecha de registro', default=fields.Date.context_today, required=True)\n article = fields.Char('Descripción')\n accessories = fields.Char('Accesorios')\n movement = fields.Selection([('delivery', 'Entrega'),\n ('returned', 'Devuelto')], 'Movimiento', default='delivery')\n state_product = fields.Selection([('new_product', 'Nuevo'),\n ('returned', 'Usado')], 'Estado', default='new_product')\n\n select = fields.Boolean('Imprimir?')\n adjunt = fields.Binary('Documento', attachment=True)\n adjunt_name = fields.Char('Nombre')\n employee_id = fields.Many2one('hr.employee', string='Empleado')\n\n\nclass LinesEmployeeDocuments(models.Model):\n _name = 'eliterp.lines.employee.documents'\n\n _description = 'Líneas de documentos de empleado'\n\n document_name = fields.Char('Nombre de documento')\n adjunt = fields.Binary('Documento')\n adjunt_name = fields.Char('Nombre')\n documents_id = fields.Many2one('eliterp.employee.documents', string='Documentos')\n\n\nclass EmployeeDocuments(models.Model):\n _name = 'eliterp.employee.documents'\n\n _description = 'Documentos de empleado'\n\n @api.model\n def _get_lines_documents(self):\n \"\"\"\n Obtenemos las líneas de documentos\n :param type:\n :return: object\n \"\"\"\n list_documents = []\n list_names = [\n 'Acuerdo de Confidencialidad',\n 'Aviso de Entrada IESS',\n 'Contrato de Trabajo',\n 'Hoja de Vida',\n 'Copia de certificados de cursos, seminarios, talleres',\n 'Copia de título o acta de grado',\n 'Copia de título o prefesional registrado en Senescyt',\n 'Copia a color Cédula de identidad',\n 'Copia a color Certificado de Votación',\n 'Fotografía tamaño carnet a color',\n 'Copia acta de matrimonio ó declaración juramentada unión libre',\n 'Copia de cédula de cargas familiares',\n 'Certificado de salud del MSP',\n 'Certificado de trabajo con números de contacto',\n 'Copia de planilla de servicios básicos',\n 'Referencias personales con números de contacto',\n 'Aviso de Salida IESS',\n 'Acta de Finiquito',\n ]\n for line in list_names:\n list_documents.append([0, 0, {'document_name': line, }])\n return list_documents\n\n @api.model\n def create(self, values):\n if 'default_employee_id' in self._context: # Cambiar el nombre del documento, para presentación\n employee_id = self.env['hr.employee'].search([('id', '=', self._context['default_employee_id'])], limit=1)\n values.update({'name': 'Documentos de ' + employee_id[0].name})\n return super(EmployeeDocuments, self).create(values)\n\n name = fields.Char('Nombre')\n employee_id = fields.Many2one('hr.employee', 'Empleado')\n lines_documents = fields.One2many('eliterp.lines.employee.documents', 'documents_id',\n 'Líneas de documento', default=_get_lines_documents)\n\n\nclass EmployeesChildren(models.Model):\n _name = 'eliterp.employees.children'\n\n _description = 'Hijos de empleados'\n\n @api.depends('birthday')\n def _get_age_children(self):\n \"\"\"\n Obtenemos la edad de cada hijo\n \"\"\"\n for children in self:\n age = 0\n if children.birthday:\n age = (datetime.now().date() - datetime.strptime(children.birthday, '%Y-%m-%d').date()).days / 365\n children.update({'age': age})\n\n names = fields.Char('Nombres', required=True)\n documentation_number = fields.Char('Nº de identificación', size=10)\n birthday = fields.Date('Fecha de nacimiento', required=True)\n age = fields.Integer('Edad', compute='_get_age_children')\n employee_id = fields.Many2one('hr.employee', string='Empleado')\n\n\nclass Employee(models.Model):\n _inherit = 'hr.employee'\n\n @api.model_cr_context\n def _init_column(self, column_name):\n \"\"\"\n Actualizamos columnas vacías\n :param column_name:\n \"\"\"\n field = self._fields[column_name]\n if field.default:\n value = field.default(self)\n else:\n value = None\n necessary = (value is not None) if field.type != 'boolean' else value\n if necessary:\n query = 'UPDATE \"%s\" SET \"%s\"=%s WHERE \"%s\" IS NULL' % (\n self._table, column_name, field.column_format, column_name)\n self._cr.execute(query, (value,))\n\n @api.onchange('names', 'surnames')\n def _onchange_names(self):\n \"\"\"\n Actualizamos nombre de empleado\n :return: dict\n \"\"\"\n value = {}\n if self.names and self.surnames:\n value['name'] = self.surnames + ' ' + self.names\n return {'value': value}\n\n @api.depends('birthday')\n @api.one\n def _get_age(self):\n \"\"\"\n Obtenemos la edad del empleado\n \"\"\"\n for employee in self:\n age = 0\n if employee.birthday:\n age = (datetime.now().date() - datetime.strptime(employee.birthday, '%Y-%m-%d').date()).days / 365\n employee.age = age\n\n @api.onchange('user_id')\n def _onchange_user(self):\n \"\"\"\n MM\n \"\"\"\n pass\n\n @api.multi\n def open_documents(self):\n \"\"\"\n Abrimos los documentos realacionados al empleado\n :return: dict\n \"\"\"\n documents_id = self.env['eliterp.employee.documents'].search([('employee_id', '=', self[0].id)])\n res = {\n 'type': 'ir.actions.act_window',\n 'res_model': 'eliterp.employee.documents',\n 'view_mode': 'form',\n 'view_type': 'form',\n }\n if documents_id:\n res['res_id'] = documents_id[0].id\n res['context'] = \"{}\"\n else:\n res['context'] = \"{'default_employee_id': \" + str(self[0].id) + \"}\"\n return res\n\n @api.multi\n def re_entry(self):\n \"\"\"\n Reingreso de empleado\n \"\"\"\n self.write({\n 'active': True,\n 'departure_date': False\n })\n\n @api.depends('memo_ids')\n @api.one\n def _compute_memo_quantity(self):\n \"\"\"\n Calculamos la cantidad de memos por empleado (Soló validados)\n \"\"\"\n self.memo_quantity = len(self.memo_ids.filtered(lambda x: x.state == 'validate'))\n\n @api.model\n def _get_date_formattoday(self):\n today = fields.Date.today()\n return self.env['eliterp.global.functions'].get_date_format_invoice(today)\n\n @api.model\n def _get_date_format(self):\n return self.env['eliterp.global.functions'].get_date_format_invoice(self.equipment_history.date)\n\n @api.multi\n def imprimir_acta_delivery_equiment(self):\n \"\"\"\n Imprimimo Acta de entrega equipos\n \"\"\"\n self.ensure_one()\n if all(x.movement == 'delivery' for x in self.equipment_history.filtered(lambda x: x.select)):\n return self.env.ref('eliterp_hr.eliterp_action_report_employee_acta_delivery_equiment').report_action(self)\n elif all(x.movement == 'returned' for x in self.equipment_history.filtered(lambda x: x.select)):\n return self.env.ref('eliterp_hr.eliterp_action_report_employee_acta_delivery_equiment1').report_action(self)\n else:\n return\n\n @api.multi\n def imprimir_acta_delivery(self):\n \"\"\"\n Imprimimo Acta de entrega uniforme\n \"\"\"\n\n self.ensure_one()\n return self.env.ref('eliterp_hr.eliterp_action_report_employee_acta_delivery').report_action(self)\n\n @api.multi\n def write(self, vals):\n \"\"\"\n Modificamos la fecha de ingreso del contrato al cambiar la del empleado\n :param vals:\n :return: object\n \"\"\"\n res = super(Employee, self).write(vals)\n if self.contract_id and self.active and 'departure_date' in vals:\n if vals['departure_date']:\n self.contract_id.update({\n 'departure_date': vals['departure_date'],\n 'state_customize': 'finalized'\n })\n self.active = False\n return res\n\n @api.depends('apply_overtime', 'wage')\n @api.one\n def _get_amount_hours(self):\n \"\"\"\n Obtenemos valor de HE por empleado\n \"\"\"\n if self.apply_overtime:\n self.extra_hours = round((self.wage / 240) * 2, 2)\n self.additional_hours = round((self.wage / 240) * 1.5, 2)\n\n names = fields.Char('Nombres', required=True)\n surnames = fields.Char('Apellidos', required=True)\n education_level = fields.Selection([\n ('basic', 'Educación básica'),\n ('graduate', 'Bachiller'),\n ('professional', 'Tercer nivel'),\n ('master', 'Postgrado')\n ], 'Nivel de educación', default='basic')\n blood_type = fields.Selection([\n ('a_most', 'A+'),\n ('a_minus', 'A-'),\n ('b_most', 'B+'),\n ('b_minus', 'B-'),\n ('ab_most', 'AB+'),\n ('ab_minus', 'AB-'),\n ('o_most', 'O+'),\n ('o_minus', 'O-')\n ], 'Tipo de sangre', default='o_most')\n sectoral_code = fields.Many2one('eliterp.sectoral.code', 'Código sectorial')\n wage = fields.Float('Sueldo', required=True)\n age = fields.Integer('Edad', compute='_get_age')\n benefits = fields.Selection([('yes', 'Si'), ('no', 'No')], string='Acumula beneficios?', default='no',\n required=True)\n bank_id = fields.Many2one('res.bank', 'Nombre de banco', domain=[('type_use', '=', 'employees')])\n bank_account = fields.Char('Cuenta bancaria')\n lines_children = fields.One2many('eliterp.employees.children', 'employee_id', 'Hijos')\n contact_1 = fields.Char('Contacto')\n relationship_1 = fields.Char('Parentesco')\n phone_1 = fields.Char('Teléfono')\n contact_2 = fields.Char('Contacto')\n relationship_2 = fields.Char('Parentesco')\n phone_2 = fields.Char('Teléfono')\n admission_date = fields.Date('Fecha de ingreso', required=True, default=fields.Date.context_today)\n struct_id = fields.Many2one('hr.payroll.structure', string='Estructura salarial')\n extension = fields.Char('Extensión', size=3)\n personal_phone = fields.Char('Teléfono personal')\n home_address = fields.Char('Dirección de domicilio')\n\n lines_history = fields.One2many('eliterp.lines.history.employee', 'employee_id', string='Historial de empleado')\n equipment_history = fields.One2many('eliterp.lines.equipment.employee', 'employee_id', string='Equipos de empleado')\n\n apply_overtime = fields.Boolean('Aplica?', default=False)\n extra_hours = fields.Float('Monto HE 100%', compute='_get_amount_hours', store=True)\n additional_hours = fields.Float('Monto HE 50%', compute='_get_amount_hours', store=True)\n mobilization = fields.Float('Movilización',\n help='Será dado al empleado la mitad en ADQ y la otra mitad en Rol consolidado.')\n spouses = fields.Boolean('Ext. conyugues', default=False)\n departure_date = fields.Date('Fecha de salida',\n help=\"Si se registra este campo se anulará el contrato del empleado relacionado.\")\n previous_contract_days = fields.Integer('Días ant. dis.', help='Días para antiguedad discontinua para un empleado.')\n memo_quantity = fields.Integer('Memorandums', compute='_compute_memo_quantity')\n memo_ids = fields.One2many('eliterp.memo', 'employee', string='Memos')\n commentary = fields.Text('Comentario')\n wage_bono = fields.Float('Bono variable ref')","sub_path":"eliterp_hr/models/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":13354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638588737","text":"\"\"\"\nImplemented Vanilla GAN from this Course:\n\nhttp://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport config\n\ndtype = config.dtype\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n nin, nout = 3, 32\n self.conv1_depthwise = nn.Conv2d(nin, nout, 4, stride=2, padding=1, groups=1).type(dtype)\n #self.conv1_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.conv1_depthwise.weight)\n #nn.init.xavier_normal(self.conv1_pointwise.weight)\n self.bn1 = nn.BatchNorm2d(32).type(dtype)\n\n nin, nout = 32, 64\n self.conv2_depthwise = nn.Conv2d(nin, nout, 4, stride=2, padding=1, groups=1).type(dtype)\n #self.conv2_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.conv2_depthwise.weight)\n #nn.init.xavier_normal(self.conv2_pointwise.weight)\n self.bn2 = nn.BatchNorm2d(64).type(dtype)\n\n nin, nout = 64, 128\n self.conv3_depthwise = nn.Conv2d(nin, nout, 4, stride=2, padding=1, groups=1).type(dtype)\n #self.conv3_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.conv3_depthwise.weight)\n #nn.init.xavier_normal(self.conv3_pointwise.weight)\n self.bn3 = nn.BatchNorm2d(128).type(dtype)\n\n nin, nout = 128, 1\n self.conv4_depthwise = nn.Conv2d(nin, nout, 4, stride=1, padding=1, groups=1).type(dtype)\n #self.conv4_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.conv4_depthwise.weight)\n #nn.init.xavier_normal(self.conv4_pointwise.weight)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = x.type(dtype)\n # Conv 1\n out = self.conv1_depthwise(x)\n #out = self.conv1_pointwise(out)\n out = self.bn1(out)\n out = F.relu(out)\n\n # Conv 2\n out = self.conv2_depthwise(out)\n #out = self.conv2_pointwise(out)\n out = self.bn2(out)\n out = F.relu(out)\n\n # Conv 3\n out = self.conv3_depthwise(out)\n #out = self.conv3_pointwise(out)\n out = self.bn3(out)\n out = F.relu(out)\n\n # Conv 4\n out = self.conv4_depthwise(out)\n #out = self.conv4_pointwise(out)\n if not config.use_wgan_loss:\n out = self.sigmoid(out)\n\n return out\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.deconv1 = nn.ConvTranspose2d(100, 128, 4, stride=4, padding=0).type(dtype)\n nn.init.xavier_normal(self.deconv1.weight)\n self.bn1 = nn.BatchNorm2d(128).type(dtype)\n\n self.deconv2 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1).type(dtype)\n nn.init.xavier_normal(self.deconv2.weight)\n self.bn2 = nn.BatchNorm2d(64).type(dtype)\n\n self.deconv3 = nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1).type(dtype)\n nn.init.xavier_normal(self.deconv3.weight)\n self.bn3 = nn.BatchNorm2d(32).type(dtype)\n\n self.deconv4 = nn.ConvTranspose2d(32, 3, 4, stride=2, padding=1).type(dtype)\n nn.init.xavier_normal(self.deconv4.weight)\n\n def forward(self, x):\n out = self.deconv1(x.type(dtype))\n # TODO: Investigate putting Batch Norm before versus after the RELU layer\n # Resources:\n # https://www.reddit.com/r/MachineLearning/comments/67gonq/d_batch_normalization_before_or_after_relu/\n # https://www.youtube.com/watch?v=Xogn6veSyxA&feature=youtu.be&t=325\n out = self.bn1(out)\n out = F.relu(out)\n\n out = self.deconv2(out)\n out = self.bn2(out)\n out = F.relu(out)\n\n out = self.deconv3(out)\n out = self.bn3(out)\n out = F.relu(out)\n\n out = self.deconv4(out)\n out = torch.tanh(out)\n\n return out\n\nclass GeneratorSkipConnections(nn.Module):\n def make_resblock(self, map_size):\n conv1_depthwise = nn.ConvTranspose2d(map_size, map_size, 3, stride=1, padding=1, groups=map_size).type(dtype)\n conv1_pointwise = nn.ConvTranspose2d(map_size, map_size, 1).type(dtype)\n nn.init.xavier_normal(conv1_depthwise.weight)\n nn.init.xavier_normal(conv1_pointwise.weight)\n bn = nn.BatchNorm2d(map_size).type(dtype)\n conv2_depthwise = nn.ConvTranspose2d(map_size, map_size, 3, stride=1, padding=1, groups=map_size).type(dtype)\n conv2_pointwise = nn.ConvTranspose2d(map_size, map_size, 1).type(dtype)\n nn.init.xavier_normal(conv2_depthwise.weight)\n nn.init.xavier_normal(conv2_pointwise.weight)\n\n resblock = nn.ModuleList()\n resblock.append(conv1_depthwise)\n resblock.append(conv1_pointwise)\n resblock.append(bn)\n resblock.append(conv2_depthwise)\n resblock.append(conv2_pointwise)\n\n return resblock\n\n def apply_resblock(self, out, resblock):\n out = resblock[0](out)\n out = resblock[1](out)\n out = resblock[2](out)\n out = F.relu(out)\n out = resblock[3](out)\n out = resblock[4](out)\n\n return out\n\n def __init__(self):\n super(GeneratorSkipConnections, self).__init__()\n\n # TODO: Change convolutions to DepthWise Seperable convolutions\n # TODO: Need to fix Mode Collapse that is occuring in the GAN\n # More info: https://www.quora.com/What-does-it-mean-if-all-produced-images-of-a-GAN-look-the-same\n\n # Upsampling layer\n nin, nout = 100, 128\n self.deconv1_depthwise = nn.ConvTranspose2d(nin, nin, 4, stride=4, padding=0, groups=nin).type(dtype)\n self.deconv1_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.deconv1_depthwise.weight)\n nn.init.xavier_normal(self.deconv1_pointwise.weight)\n self.bn1 = nn.BatchNorm2d(128).type(dtype)\n\n # Resnet block\n self.resblock1A = self.make_resblock(128)\n\n # Upsampling layer\n nin, nout = 128, 64\n self.deconv2_depthwise = nn.ConvTranspose2d(nin, nin, 4, stride=2, padding=1, groups=nin).type(dtype)\n self.deconv2_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.deconv2_depthwise.weight)\n nn.init.xavier_normal(self.deconv2_pointwise.weight)\n self.bn2 = nn.BatchNorm2d(64).type(dtype)\n\n # Resnet block\n self.resblock2A = self.make_resblock(64)\n\n # Upsampling layer 3\n nin, nout = 64, 32\n self.deconv3_depthwise = nn.ConvTranspose2d(nin, nin, 4, stride=2, padding=1, groups=nin).type(dtype)\n self.deconv3_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.deconv3_depthwise.weight)\n nn.init.xavier_normal(self.deconv3_pointwise.weight)\n self.bn3 = nn.BatchNorm2d(32).type(dtype)\n\n # Resnet block\n self.resblock3A = self.make_resblock(32)\n\n # Upsampling layer 4\n nin, nout = 32, 3\n self.deconv4_depthwise = nn.ConvTranspose2d(nin, nin, 4, stride=2, padding=1, groups=nin).type(dtype)\n self.deconv4_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)\n nn.init.xavier_normal(self.deconv4_depthwise.weight)\n nn.init.xavier_normal(self.deconv4_pointwise.weight)\n\n # Resnet block\n self.resblock4A = self.make_resblock(3)\n\n def forward(self, x):\n x = x.type(dtype)\n out = x\n\n # Multi scale image generation seems quite similar to using ResNet skip connections\n # In this case, we only use a single Resnet block instead of the entire Generator so the network is small enough to run on my laptop\n #\n # Upsample 1\n out = self.deconv1_depthwise(out)\n out = self.deconv1_pointwise(out)\n out = self.bn1(out)\n out = upsampled = F.relu(out)\n\n # Resnet block 1\n out += self.apply_resblock(out.clone(), self.resblock1A)\n\n # Upsample 2\n out = self.deconv2_depthwise(out)\n out = self.deconv2_pointwise(out)\n out = self.bn2(out)\n out = upsampled = F.relu(out)\n # Resnet block 2\n out += self.apply_resblock(out.clone(), self.resblock2A)\n\n # Upsample 3\n out = self.deconv3_depthwise(out)\n out = self.deconv3_pointwise(out)\n out = self.bn3(out)\n out = upsampled = F.relu(out)\n # Resnet block 3\n out += self.apply_resblock(out.clone(), self.resblock3A)\n\n # Upsample 4\n out = self.deconv4_depthwise(out)\n out = self.deconv4_pointwise(out)\n\n # Resnet block 4\n out += self.apply_resblock(out.clone(), self.resblock4A)\n\n out = torch.tanh(out)\n\n return out\n","sub_path":"vanilla_gan/vanilla_gan.py","file_name":"vanilla_gan.py","file_ext":"py","file_size_in_byte":8785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271911848","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.cm as cm\nimport os\nimport random\n\n#%%\nimage_dir = r'G:\\Project\\paper2\\other_image\\MSRA-1000_images'\nmsd_dir = r'G:\\Project\\paper2\\test_image\\final\\out_msra-1000'\ngt_dir = r'G:\\Project\\paper2\\other_image\\binarymasks'\nsf_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\SF'\ngc_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\GC'\nrc_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\RC'\nft_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\FT'\nac_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\AC'\nlc_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\LC'\nhc_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\HC'\nca_dir = r'G:\\Project\\paper2\\other_image\\MSAR1000\\CA'\n\nimage_name_list = ['0_0_899', '0_5_5189', '0_12_12048', '1_44_44321', '1_53_53905', '2_68_68619', '2_70_70693',\n '2_81_81637', '3_115_115573', '4_125_125923', '5_145_145732']\n\ndir_list = [image_dir, lc_dir, ca_dir, ac_dir, hc_dir, ft_dir, rc_dir, sf_dir, gc_dir, msd_dir, gt_dir]\ndir_dic = {d: ext for d in dir_list for ext in [os.listdir(d)[1].split('.')[-1]]}\n\nrow = 9\nrandom.shuffle(image_name_list)\nname_list = image_name_list[0:row]\n\ncolumn = len(dir_list)\nplt.figure(1)\nfor d in dir_list:\n n = dir_list.index(d)\n if n == 0:\n cmap = None\n else:\n cmap = cm.Greys_r\n for i, name in enumerate(name_list):\n m = name_list.index(name) * column + 1 + n\n plt.subplot(row, column, m)\n plt.imshow(plt.imread(d + os.sep + name + '.' + dir_dic[d]), cmap=cmap)\n plt.axis('off')\n\nplt.show()\n\n\n# %%\ndest_dir = r'G:\\PycharmProjects\\FT\\show_images'\nimage_order = ['o.jpg', 'ft.jpg', 'cc.jpg', 'ca.jpg', 'u.jpg', 'd.jpg', 'e.jpg', 'gt.bmp']\nlabels = [(lambda x: '(' + x + ')')(a) for a in 'abcdefgh']\n\nrow = 2\ncolumn = 4\nplt.figure(2)\nfor label, name in zip(labels, image_order):\n n = image_order.index(name)\n if n == 0:\n cmap = None\n else:\n cmap = cm.Greys_r\n plt.subplot(row, column, n + 1)\n plt.imshow(plt.imread(dest_dir + os.sep + name), cmap=cmap)\n # plt.axis('off')\n plt.grid(False)\n plt.xticks([]) # hide x axis\n plt.yticks([]) # hide y axis\n plt.xlabel(label)\n\nplt.show()\n\n#%% feature images\nfeature_image_dir = r'G:\\PycharmProjects\\FT\\feature_images'\nfeature_image_list = filter(lambda s: s.split('.')[-1] == 'jpg', os.listdir(feature_image_dir))\n\nrow = 1\ncolumn = 5\nplt.figure(3)\nfor name in feature_image_list:\n plt.subplot(row, column, feature_image_list.index(name) + 1)\n plt.imshow(plt.imread(feature_image_dir + os.sep + name))\n plt.axis('off')","sub_path":"plot_images.py","file_name":"plot_images.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503591203","text":"import random\nimport math\nimport numpy as np\nimport cv2\nimport time\nimport src.Auxilary as aux\nrandom.seed()\n\nclass RRT_PLANNER:\n\tclass Tree:\n\t\tclass Vertex:\n\t\t\tdef __init__(self, position, parent,child):\n\t\t\t\tself.position = position\n\t\t\t\tself.parent = parent\n\t\t\t\tself.child = child\n\n\n\t\tdef __init__(self):\n\t\t\tself.edges = []\n\t\t\tself.vertexes = []\n\n\n\t\tdef AddVertex(self, q_new):\n\t\t\tself.vertexes.append(q_new)\n\n\n\t\tdef AddEdge(self, qnear, qnew):\n\t\t\tself.edges.append([qnear, qnew])\n\n\n\t\tdef NearestNeighbor(self, new_vertex):\n\t\t\tmin_dist = math.hypot(self.vertexes[0].position[0] - new_vertex.position[0], self.vertexes[0].position[1] - new_vertex.position[1])\n\t\t\tn_min = 0\n\n\t\t\tfor n in range(len(self.vertexes)):\n\t\t\t\tcandidate = math.hypot(self.vertexes[n].position[0] - new_vertex.position[0],self.vertexes[n].position[1] - new_vertex.position[1])\n\t\t\t\tif candidate < min_dist:\n\t\t\t\t\tmin_dist = candidate\n\t\t\t\t\tn_min = n\n\n\t\t\treturn n_min\n\n\n\t\tdef PaintOnImg(self, img, obstacles):\n\t\t\tfor vertex in self.vertexes:\n\t\t\t\tcv2.circle(img, (math.ceil(vertex.position[0]), math.ceil(vertex.position[1])), 2, (255,0,0))\n\t\t\tfor edge in self.edges:\n\t\t\t\tcv2.line(img, (math.ceil(edge[0].position[0]), math.ceil(edge[0].position[1])),\n\t\t\t\t\t\t\t(math.ceil(edge[1].position[0]), math.ceil(edge[1].position[1])), (255,0,0))\n\t\t\tfor obstacle in obstacles:\n\t\t\t\tfor i in range(len(obstacle)-1):\n\t\t\t\t\tcv2.line(img, (obstacle[i][0], obstacle[i][1]), (obstacle[i+1][0], obstacle[i+1][1]), (0,255,0))\n\t\t\treturn img\n\n\n\t\tdef get_right_path(self, q_goal):\n\t\t\tpath = []\n\t\t\tpath.append(q_goal)\n\t\t\tnext_point = q_goal.parent\n\t\t\twhile next_point is not None:\n\t\t\t\tpath.append(next_point)\n\t\t\t\tnext_point = next_point.parent\n\n\t\t\treturn path\n\n\t\tdef draw_path(self, path, obstacles, img):\n\t\t\tfor i in range(len(path) - 1):\n\t\t\t\tcv2.line(img, (math.ceil(path[i].position[0]), math.ceil(path[i].position[1])),\n\t\t\t\t\t\t\t(math.ceil(path[i+1].position[0]), math.ceil(path[i+1].position[1])), (0,0,255))\n\t\t\tfor obstacle in obstacles:\n\t\t\t\tfor i in range(len(obstacle) - 1):\n\t\t\t\t\tcv2.line(img, (obstacle[i][0],obstacle[i][1]), (obstacle[i+1][0],obstacle[i+1][1]), (0,255,0))\n\t\t\treturn img\n\n\tdef __init__(self):\n\t\tself.Tree = self.Tree()\n\n\n\tdef get_path(self, img, start_pose, obstacles, goal_pose, n_of_iterations = 20, max_edge_px = 50):\n\t\theight, width = img.shape[:2]\n\t\timg_with_path = img\n\t\tq_root = self.Tree.Vertex(start_pose, None, None)\n\t\tself.Tree.AddVertex(q_root)\n\t\tfor k in range(n_of_iterations):\n\t\t\tqrand_pos = [random.randint(0,width), random.randint(0,height)]\n\t\t\tqrand = self.Tree.Vertex(qrand_pos, None, None)\n\t\t\tn_qnear = self.Tree.NearestNeighbor(qrand)\n\t\t\tqnear = self.Tree.vertexes[n_qnear]\n\t\t\tqnew_pos = self.connect(qnear, qrand, obstacles, max_edge_px)\n\t\t\tqnew = self.Tree.Vertex(qnew_pos, None, None)\n\t\t\tself.Tree.AddVertex(qnew)\n\t\t\tself.Tree.AddEdge(qnear, qnew)\n\t\t\tqnear.child = qnew\n\t\t\tqnew.parent = qnear\n\t\t\tif (self.isInGoalCircle(qnew, goal_pose)):\n\t\t\t\tpath = self.Tree.get_right_path(qnew)\n\t\t\t\timg_with_path = self.Tree.draw_path(path, obstacles, img_with_path)\n\t\t\t\tbreak\n\n\t\treturn path\n\n\n\tdef connect(self, vertex_1, vertex_2, obstacles, max_edge_px):\n\t\tcandidate_dist = math.hypot(vertex_1.position[0] - vertex_2.position[0],vertex_1.position[1] - vertex_2.position[1])\n\t\tif(candidate_dist <= max_edge_px and not aux.CrossesObstacles(vertex_1.position, vertex_2.position, obstacles) and not aux.LiesBeneathObstacles(vertex_2.position, obstacles)):\n\t\t\tnew_vertex = vertex_2.position\n\t\telse:\n\t\t\tnew_vertex = [\n\t\t\t\t\t\t\tvertex_1.position[0] - max_edge_px*math.cos(math.atan2(vertex_1.position[1] - vertex_2.position[1], vertex_1.position[0] - vertex_2.position[0])),\n\t\t\t\t\t\t\tvertex_1.position[1] - max_edge_px*math.sin(math.atan2(vertex_1.position[1] - vertex_2.position[1], vertex_1.position[0] - vertex_2.position[0]))\n\t\t\t\t\t\t ]\n\n\t\t\tnew_vertex = [int(new_vertex[0]), int(new_vertex[1])]\n\t\t\tfor obstacle in obstacles:\n\t\t\t\tInObstacle=True\n\t\t\t\tCrossesObstacle =True\n\t\t\t\twhile InObstacle or CrossesObstacle:\n\t\t\t\t\tnew_vertex = [\n\t\t\t\t\t\t\t\t\tvertex_1.position[0] - max_edge_px*math.cos(math.atan2(vertex_1.position[1] - vertex_2.position[1], vertex_1.position[0] - vertex_2.position[0])),\n\t\t\t\t\t\t\t\t\tvertex_1.position[1] - max_edge_px*math.sin(math.atan2(vertex_1.position[1] - vertex_2.position[1], vertex_1.position[0] -vertex_2.position[0]))\n\t\t\t\t\t\t\t\t ]\n\t\t\t\t\tCrossesObstacle = util.doIntersect(new_vertex, vertex_1.position, obstacle[0], obstacle[-1])\n\t\t\t\t\tInObstacle = False\n\t\t\t\t\tfor point in obstacle:\n\t\t\t\t\t\tif(self.isInObstCircle(new_vertex, point)):\n\t\t\t\t\t\t\tInObstacle = True\n\n\t\t\t\t\tmax_edge_px -= 1\n\t\treturn new_vertex\n\n\n\tdef onMouse(self, event, x, y, flags, param):\n\t\tif event == cv2.EVENT_LBUTTONUP:\n\t\t\tself.clicked = True\n\t\tcv2.destroyWindow(self.win_name)\n","sub_path":"cv_part/libs/rrt_planner.py","file_name":"rrt_planner.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196802483","text":"from django.conf.urls import url\nfrom . import views\n\n\napp_name = 'polls'\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^(?P[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n url(r'^addpoll/$', views.addpoll, name='addpoll'),\n url(r'^editpoll/(?P\\w+)/', views.editpoll, name='editpoll')\n]\n","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"623224157","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom db_setup import Todo, Base\n\nengine = create_engine(\"sqlite:///todos.db\")\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\ntestTodo = Todo(title=\"Test DB\", completed=True)\nsession.add(testTodo)\nsession.commit()","sub_path":"week_13/day_5/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5661890","text":"import pandas as pd\r\n\r\n# Update countries' names\r\ndef update_countries_names(df):\r\n return df.replace({\r\n \"Cabo Verde\":\"Cape Verde\",\r\n \"Congo (Brazzaville)\":\"Congo\",\r\n \"Congo (Kinshasa)\":\"Congo, the Democratic Republic of the\",\r\n \"Cote d'Ivoire\":\"Ivory Coast\",\r\n \"Czechia\":\"Czech Republic\",\r\n \"Eswatini\":\"Swaziland\",\r\n \"Holy See\":\"Holy See (Vatican City State)\",\r\n \"Iran\":\"Iran, Islamic Republic of\",\r\n \"Korea, South\":\"South Korea\",\r\n \"Laos\":\"Lao People's Democratic Republic\",\r\n \"Moldova\":\"Moldova, Republic of\",\r\n \"North Macedonia\":\"Macedonia, the former Yugoslav Republic of\",\r\n \"Syria\":\"Syrian Arab Republic\",\r\n \"Tanzania\":\"Tanzania, United Republic of\",\r\n \"US\":\"United States\",\r\n \"West Bank and Gaza\":\"Palestinian Territory, Occupied\",\r\n \"Micronesia\":\"Micronesia, Federated States of\"\r\n })\r\n\r\n# Read CSSE data from github\r\ndef read_csse(file, numvars, namecol):\r\n df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'+\r\n 'csse_covid_19_data/csse_covid_19_time_series/'+file)\r\n # change de wide shape to long shape\r\n df = df.melt(id_vars=list(df.columns[0:numvars]),var_name='date',value_name=namecol\r\n ).astype({'date':'datetime64[ns]', namecol:'int64'})\r\n return df\r\n\r\n# get current CSSE data\r\ndef get_cssedata(updatenames = False):\r\n df = read_csse('time_series_covid19_confirmed_global.csv', 4, 'I_cum').\\\r\n merge(read_csse('time_series_covid19_deaths_global.csv', 4, 'D_cum'),how='left').\\\r\n merge(read_csse('time_series_covid19_recovered_global.csv', 4, 'R_cum'),how='left')\r\n df['country_name'] = df['Country/Region'].apply(lambda x: x.replace('*', ''))\r\n df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.date())\r\n # only countries\r\n df = df.groupby(['country_name','date'],as_index=False).agg({'I_cum':'sum','D_cum':'sum','R_cum':'sum'})\r\n # I, D, R\r\n df = df.assign(I=lambda x: x['I_cum']-x.groupby(['country_name'])['I_cum'].shift(1),\r\n D=lambda x: x['D_cum']-x.groupby(['country_name'])['D_cum'].shift(1),\r\n R=lambda x: x['R_cum']-x.groupby(['country_name'])['R_cum'].shift(1)\r\n )[['country_name', 'date', 'I', 'D', 'R', 'I_cum', 'D_cum', 'R_cum']]\r\n # not ships\r\n df = df.fillna(0).query('country_name not in [\"Diamond Princess\", \"MS Zaandam\"]')\r\n if updatenames: df = update_countries_names(df)\r\n return df\r\n\r\n# get Isocodes\r\ndef get_Isocodes():\r\n df = pd.read_csv('https://gist.githubusercontent.com/tadast/8827699/raw/f5cac3d42d16b78348610fc4ec301e9234f82821/countries_codes_and_coordinates.csv')\r\n df[['country_alphacode','country_isocode','latitude','longitude']] = \\\r\n df[['Alpha-3 code','Numeric code','Latitude (average)','Longitude (average)']].\\\r\n apply(lambda x: x.str.replace('\"','').str.strip())\r\n df = df.rename(columns={'Country':'country_name'}\r\n )[['country_name','country_alphacode','country_isocode','latitude','longitude']].\\\r\n astype({'country_isocode':'int','latitude':'float64','longitude':'float64'})\r\n # Sudan Iso code\r\n df.loc[df['country_name'] == 'Sudan', 'country_isocode'] = 729\r\n # Insert Kosovo\r\n df = df.append({'country_name': 'Kosovo', 'country_alphacode': 'XXK', 'country_isocode': 383,\r\n 'latitude': 42.6675, 'longitude': 21.1662}, ignore_index=True)\r\n return df\r\n\r\n# Population\r\ndef get_population(year=2020):\r\n return pd.read_csv('https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2019_TotalPopulationBySex.csv'\r\n ).query('Time == '+str(year)+' and VarID == 2'\r\n ).rename(columns={'Location':'country_name', 'LocID':'country_isocode'}\r\n ).astype({'country_isocode':'int', 'PopTotal':'float64'}\r\n )[['country_name', 'country_isocode', 'PopTotal']]\r\n\r\n# Continents and Regions\r\ndef get_countrycode():\r\n df = pd.read_csv('countrycode_data.csv'\r\n ).rename(columns={'iso3c':'country_alphacode',\r\n 'continent':'continent_name',\r\n 'region':'region_name'}\r\n )[['country_name', 'country_alphacode', 'continent_name', 'region_name']]\r\n df['country_name'] = df['country_name'].str.capitalize()\r\n return df\r\n\r\n# read ACAPS data\r\ndef read_acaps():\r\n return pd.read_excel(\r\n 'https://www.acaps.org/sites/acaps/files/resources/files/acaps_covid19_government_measures_dataset_0.xlsx',\r\n engine='openpyxl', sheet_name='Dataset'\r\n ).rename(columns={'DATE_IMPLEMENTED':'date', 'ISO':'country_alphacode',\r\n 'CATEGORY':'measure_category', 'MEASURE':'measure_name'}\r\n ).astype({'date':'datetime64[ns]'})[['country_alphacode', 'date', 'measure_name', 'measure_category']]\r\n\r\n# Measures Category\r\ndef measures_category():\r\n return pd.DataFrame({'measure_category': ['None',\r\n 'Humanitarian exemption',\r\n 'Governance and socio-economic measures',\r\n 'Public health measures',\r\n 'Social distancing',\r\n 'Movement restrictions',\r\n 'Lockdown'],\r\n 'measure_level': range(0, 7)})\r\n\r\n# Measures\r\ndef get_measures():\r\n df = read_acaps().merge(measures_category(), how='left')\r\n df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.date())\r\n return df\r\n\r\n# Test Positivity Rate\r\ndef get_test_data():\r\n df = pd.read_csv('https://github.com/owid/covid-19-data/raw/master/public/data/owid-covid-data.csv'\r\n ).rename(columns={'iso_code':'country_alphacode'}\r\n ).astype({'date':'datetime64[ns]', 'positive_rate':'float64'}\r\n )[['country_alphacode', 'date', 'positive_rate']]\r\n df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.date())\r\n return df\r\n\r\n# Covid data\r\ndef get_covid19_data(level=9):\r\n # === level 1:\r\n # Read CSSE data\r\n df = get_cssedata(updatenames=True).\\\r\n merge(get_Isocodes(), how='left').\\\r\n merge(get_countrycode().drop(columns=['country_name']), how='left').\\\r\n merge(get_population().drop(columns=['country_name']), how='left', on='country_isocode').\\\r\n assign(N=lambda x: x['PopTotal'] * 1000).drop(columns=['PopTotal'])\r\n # Add Kosovo data\r\n df.loc[df['country_name'] == 'Kosovo', 'continent_name'] = 'Europe'\r\n df.loc[df['country_name'] == 'Kosovo', 'region_name'] = 'Southern Europe'\r\n df.loc[df['country_name'] == 'Kosovo', 'N'] = 1811285\r\n # Taiwan\r\n df.loc[df['country_name'] == 'Taiwan', 'continent_name'] = 'Asia'\r\n df.loc[df['country_name'] == 'Taiwan', 'region_name'] = 'Eastern Asia'\r\n # Add Region Western Sahara\r\n df.loc[df['country_name'] == 'Western Sahara', 'region_name'] = 'Sub-Saharan Africa'\r\n\r\n if level < 2: return df\r\n\r\n # === level 2:\r\n # Add S (susceptible)\r\n df['S'] = df['N'] - df['I_cum'] - df['R_cum']\r\n # Add IR (Incidence Rate)\r\n df['IR'] = (df['I_cum'] / df['N']) * 10 ** 5\r\n\r\n #### Add CFR (Confirmed Fatality rate) and M (Mortality)\r\n df['CFR'] = (df['D_cum'] / df['I_cum'] * 100).fillna(0)\r\n df['M'] = (df['D_cum'] / df['N']) * 10 ** 5\r\n\r\n if level < 3: return df\r\n\r\n # === level 3:\r\n # Add Measures\r\n df = df.merge(get_measures().drop(columns=['measure_category']).\r\n groupby(['country_alphacode', 'date'], as_index=False).agg({'measure_level': 'max'}),\r\n how='left', on=['country_alphacode', 'date'])\r\n df = df.assign(ml_ant=lambda x: x.groupby(['country_alphacode'])['measure_level'].shift(1).fillna(0))\r\n df = df.assign(measure_level=df[['measure_level', 'ml_ant']].max(axis=1)).drop(columns=['ml_ant'])\r\n\r\n if level < 4: return df\r\n\r\n # === level 4:\r\n # Add Test\r\n df = df.merge(get_test_data(), how='left').assign(TPR = lambda x: x['positive_rate']*100)\\\r\n .drop(columns=['positive_rate'])\r\n\r\n return df\r\n\r\ndef make_covid_map(data, locations='country_alphacode',\r\n color='I_cum', color_scale='YlOrRd',\r\n name='country_name', var_animation=None,\r\n facet_col=None, facet_col_wrap=None,\r\n title='Infected by Countries', lcolor='Infected',\r\n save=True, outfile='covid19map'):\r\n import plotly.express as px\r\n\r\n if var_animation == 'date':\r\n data = data.assign(date=data.date.apply(lambda x: x.strftime('%b %d, %Y')))\r\n elif var_animation == 'week':\r\n data = data.assign(week=data.date.apply(lambda x: x.strftime('%Y-%WW')))\r\n\r\n if facet_col == 'year':\r\n data = data.assign(year=data.date.apply(lambda x: x.strftime('%Y')))\r\n elif facet_col == 'month':\r\n data = data.assign(month=data.date.apply(lambda x: x.strftime('%b')))\r\n if facet_col_wrap == None: facet_col_wrap = 4\r\n\r\n fig = px.choropleth(data, locations=locations, hover_name=name,\r\n color=color, color_continuous_scale=color_scale,\r\n title=title, labels={color: lcolor},\r\n animation_frame=var_animation, animation_group=name,\r\n facet_col=facet_col, facet_col_wrap=facet_col_wrap)\r\n\r\n if save: fig.write_html(outfile+'.html')\r\n return fig\r\n\r\ndef make_covid_map2(data, column='I', legend_name='',\r\n fill_color='YlOrRd',\r\n save=True, outfile='covid19map'):\r\n import folium\r\n\r\n url_geo = 'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data/world-countries.json'\r\n m = folium.Map(min_zoom=2, max_bounds=True, tiles='cartodbpositron')\r\n folium.Choropleth(geo_data=url_geo, key_on='feature.id',\r\n data=data, columns=['country_alphacode',column], name='choropleth',\r\n fill_color=fill_color, fill_opacity=0.7, line_opacity=0.2,\r\n legend_name=legend_name).add_to(m)\r\n if save: m.save(outfile=outfile+'.html')\r\n return m\r\n\r\ndef make_covid_bar(data, x='country_alphacode', y='I_cum', name='country_name',\r\n color='continent_name', var_animation=None,\r\n facet_col=None, facet_col_wrap=None,\r\n title='Infected by Countries',\r\n xtitle='Countries', ytitle='Infected Accumulated', ltitle='Continents',\r\n save=True, outfile='covid19bars'):\r\n import plotly.express as px\r\n\r\n if var_animation == 'date':\r\n data = data.assign(date=data.date.apply(lambda x: x.strftime('%b %d, %Y')))\r\n elif var_animation == 'week':\r\n data = data.assign(week=data.date.apply(lambda x: x.strftime('%Y-%WW')))\r\n\r\n if facet_col == 'year':\r\n data = data.assign(year=data.date.apply(lambda x: x.strftime('%Y')))\r\n elif facet_col == 'month':\r\n data = data.assign(month=data.date.apply(lambda x: x.strftime('%b')))\r\n if facet_col_wrap == None: facet_col_wrap = 4\r\n\r\n fig = px.bar(data, x=x, y=y, range_y=[0, data[y].max()],\r\n color=color, hover_name=name,\r\n animation_frame=var_animation, animation_group=name,\r\n facet_col=facet_col, facet_col_wrap=facet_col_wrap)\r\n fig.update_layout(title=title, legend_title=ltitle)\r\n fig.update_xaxes(title=xtitle, categoryorder='total descending', nticks=len(data[x].unique()),\r\n tickfont=dict(size=8, color='black'),\r\n rangeslider_visible=True)\r\n fig.update_yaxes(title=ytitle)\r\n if save: fig.write_html(outfile+'.html')\r\n return fig\r\n","sub_path":"covid19_functions.py","file_name":"covid19_functions.py","file_ext":"py","file_size_in_byte":12016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"295446853","text":"from auto_test.YH.common.base import get_response\nfrom auto_test.YH.test_data.read_data import get_test_data\nfrom auto_test.YH.common.logger import Log\n\n\nclass getAppToken(object):\n def __init__(self):\n self.log = Log()\n\n # @staticmethod\n def get_token(self):\n \"\"\"\n 获取app登录token\n :return:token\n \"\"\"\n try:\n test_data = get_test_data('/fresh_data.xlsx', 'app', 0)\n res = get_response(test_data['route'], test_data['method'],\n data=test_data['data'], headers=eval(test_data['header']))\n return res.json()['response']['token']\n except Exception as e:\n self.log.error('获取token出现异常:{}'.format(str(e)))\n","sub_path":"auto_test/YH/case/fresh_purchase/apis_test/app_api/app_login.py","file_name":"app_login.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"62658028","text":"\"\"\"Botucatu, February 11th 2019\nAuthor: Marco Poli\nThis program gives the highest divisible triangular number.\n\"\"\"\nfrom math import sqrt\n\ndef is_it_prime(num):\n \"\"\"This function returns True if the integer is prime.\"\"\"\n if (num != 2 and num % 2 == 0) or num < 2:\n return False\n\n aux = int(sqrt(num))\n\n if aux % 2 == 0:\n aux -= 1\n\n while aux > 2:\n if num % aux == 0:\n return False\n aux -= 2\n return True\n\ndef triangular_number(n):\n return (n*(n+1))/2\n\ndef number_of_divisors(num):\n divisors = []\n if num == 1:\n divisors = [1]\n return [num, len(divisors), divisors]\n elif is_it_prime(num):\n divisors = [1, num]\n return [num, len(divisors), divisors]\n else:\n aux = 1\n while aux <= num/2:\n if num % aux == 0:\n divisors.append(aux)\n aux += 1\n\n divisors.append(num)\n\n return [num, len(divisors), divisors]\n\ndef factorization(num):\n divisors = [1]\n if num == 1:\n return [num, len(divisors), divisors]\n elif is_it_prime(num):\n divisors = [1, num]\n return [num, len(divisors), divisors]\n else:\n aux = num\n count = 2\n while aux != 1:\n if aux % count == 0:\n aux /= count\n divisors.append(count)\n else:\n count += 1\n\n aux_list =[]\n\n for i in range(0,len(divisors),1):\n for j in range(i, len(divisors), 1):\n if num % (divisors[i] * divisors[j]) == 0 and (divisors[i] * divisors[j]) not in divisors:\n divisors.append(divisors[i] * divisors[j])\n\n divisors.append(num)\n divisors = list(set(divisors))\n divisors.sort()\n\n return [num, len(divisors), divisors]\n\nn = 1\nwhile True:\n if factorization(triangular_number(n))[1] > 5e2:\n print(number_of_divisors(triangular_number(n)))\n break\n n += 1 \n","sub_path":"problem_012_highly_divisible_triangular_number.py","file_name":"problem_012_highly_divisible_triangular_number.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309892587","text":"import discord\nfrom discord import utils\nfrom discord.ext import commands\nfrom datetime import datetime, timezone, timedelta\nimport os\nfrom smartfile import BasicClient\n\n\nclass PingLogger:\n def __init__(self, bot):\n self.bot = bot\n\n async def on_message(self, message):\n if message.server is None:\n return\n me = message.server.get_member(\"111158853839654912\")\n if me is None:\n return\n if not os.path.exists(\"log_blacklist.txt\"):\n sf_client = BasicClient(os.environ[\"SF_KEY\"], os.environ[\"SF_PW\"])\n sf_client.download(\"log_blacklist.txt\")\n with open(\"log_blacklist.txt\") as f:\n if message.server.id+\"\\n\" in f.readlines() or message.channel.id+\"\\n\" in f.readlines():\n print(\"Mentioned, but hit blacklist.\")\n return\n if me.mentioned_in(message):\n for role in message.role_mentions:\n has_role = utils.get(me.roles, id=role.id) is not None\n if has_role:\n embed = discord.Embed(title=\":loudspeaker: ROLE MENTION RECEIVED\", colour=discord.Colour(0x800000), timestamp=message.timestamp)\n embed.set_thumbnail(url=message.author.avatar_url)\n embed.set_footer(text=\"Mention date\")\n embed.add_field(name=\"Sender\", value=message.author.name)\n embed.add_field(name=\"Role\", value=role.name, inline=True)\n embed.add_field(name=\"Location\", value=\"%s (%s)\" % (message.channel, message.server))\n embed.add_field(name=\"Message\", value=message.content)\n\n await self.bot.send_message(discord.Object(id=\"246630505943007232\"), embed=embed)\n return\n\n embed = discord.Embed(title=\":bell: MENTION RECEIVED\", colour=discord.Colour(0xFFFF00), timestamp=message.timestamp)\n embed.set_thumbnail(url=message.author.avatar_url)\n embed.set_footer(text=\"Mention date\")\n embed.add_field(name=\"Sender\", value=message.author.name)\n embed.add_field(name=\"Location\", value=\"%s (%s)\" % (message.channel, message.server))\n embed.add_field(name=\"Message\", value=message.content)\n await self.bot.send_message(discord.Object(id=\"246630505943007232\"), embed=embed)\n print(\"Mention from %s(%s)\" % (message.channel, message.server))\n return\n if \"synder\" in message.content.lower():\n embed = discord.Embed(title=\":exclamation: NAME SAID\", colour=discord.Colour(0x007f00), timestamp=message.timestamp)\n embed.set_thumbnail(url=message.author.avatar_url)\n embed.set_footer(text=\"Mention date\")\n embed.add_field(name=\"Sender\", value=message.author.name)\n embed.add_field(name=\"Location\", value=\"%s (%s)\" % (message.channel, message.server))\n embed.add_field(name=\"Message\", value=message.content)\n\n await self.bot.send_message(discord.Object(id=\"258330960733405184\"), embed=embed)\n print(\"Mentioned in %s(%s)\" % (message.channel, message.server))\n return\n\n @commands.command(pass_context=True)\n async def ignore(self, ctx, ignore_type, *ignore):\n ignore = list(ignore)\n try:\n sf_client = BasicClient(os.environ[\"SF_KEY\"], os.environ[\"SF_PW\"])\n except KeyError:\n pass\n if not os.path.exists(\"log_blacklist.txt\"):\n sf_client.download(\"log_blacklist.txt\")\n if len(ignore) == 0:\n if ignore_type.lower() == \"server\" or ignore_type.lower() == \"s\":\n ignore.append(ctx.message.server.id)\n elif ignore_type.lower() == \"channel\" or ignore_type.lower() == \"c\":\n ignore.append(ctx.message.channel.id)\n else:\n await ctx.bot.say(\":moyai: No type named `%s` found! Try `s` or `c`.\" % ignore_type, delete_after=7)\n return\n if ignore_type.lower() == \"server\" or ignore_type.lower() == \"s\":\n for server in ignore:\n server.strip()\n with open(\"log_blacklist.txt\", \"a\") as f:\n f.write(server+\"\\n\")\n if len(ignore) != 1:\n await ctx.bot.say(\":moyai: Successfully added the servers `%s` to the blacklist!\" % [ctx.bot.get_server(server_id).name for server_id in ignore], delete_after=7)\n else:\n await ctx.bot.say(\":moyai: Successfully added the server `%s` to the blacklist!\" % ctx.bot.get_server(ignore[0]).name, delete_after=7)\n if ignore_type.lower() == \"channel\" or ignore_type.lower() == \"c\":\n for channel in ignore:\n channel.strip()\n with open(\"log_blacklist.txt\", \"a\") as f:\n f.write(channel+\"\\n\")\n if len(ignore) > 1:\n await ctx.bot.say(\":moyai: Successfully added the channels `%s` to the blacklist!\" % [ctx.bot.get_channel(channel_id).name for channel_id in ignore], delete_after=7)\n else:\n await ctx.bot.say(\":moyai: Successfully added the channel `%s` to the blacklist!\" % ctx.bot.get_channel(ignore[0]).name, delete_after=7)\n else:\n await ctx.bot.say(\":moyai: No type named `%s` found! Try `s` or `c`.\" % ignore_type, delete_after=7)\n return\n try:\n with open(\"log_blacklist.txt\") as f:\n sf_client.upload(\"log_blacklist.txt\", f)\n except NameError:\n pass\n\n\ndef setup(bot):\n bot.add_cog(PingLogger(bot))\n","sub_path":"Cogs/mention_log.py","file_name":"mention_log.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395702415","text":"# ############################################################################################################ #\r\n# Programme : \"OLAP\" #\r\n# #\r\n# Dernière modification : 10/03/2021 #\r\n# #\r\n# Réalise par : EL KHADIMI Mohcine / SLIM Malik #\r\n# # #\r\n# #\r\n# #\r\n# ############################################################################################################ #\r\n\r\n#librairies à importer\r\n\r\n#Import Python GUI \r\nimport tkinter \r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n# Import pivot table ui \r\nfrom pivottablejs import pivot_ui\r\n# Import web libraries\r\nfrom IPython.display import HTML\r\nimport webbrowser\r\n# Import Elasticsearch package \r\nfrom elasticsearch import Elasticsearch \r\n\r\nimport json\r\nimport requests\r\nimport pandas as pd\r\n\r\n# Connexion au serveur d'elasticsearch\r\nes=Elasticsearch([{'host':'localhost','port':9200}])\r\n\r\n#configuration de l'interface graphique\r\nroot = Tk()\r\nroot.title(\"Sensor Data\")\r\nroot.geometry(\"920x520\")\r\nroot.minsize(480,360)\r\nroot.config(background='#4065A4')\r\n\r\n#créer le frame principal \r\nmain_frame = Frame(root,bg='#4065A4')\r\nmain_frame.pack(fill=BOTH, expand=1)\r\n\r\n# Créer un widget Canvas \r\nmy_canvas = Canvas(main_frame,bg='#4065A4')\r\nmy_canvas.pack(side=LEFT, fill=BOTH, expand=1)\r\n\r\n# Ajout d'une Scrollbar au Canvas\r\nmy_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)\r\nmy_scrollbar.pack(side=RIGHT, fill=Y)\r\n\r\n# Configuration du Canvas\r\nmy_canvas.configure(yscrollcommand=my_scrollbar.set)\r\nmy_canvas.bind('', lambda e: my_canvas.configure(scrollregion = my_canvas.bbox(\"all\")))\r\n\r\n\r\n# Créer un second frame dans le canvas\r\nsecond_frame = Frame(my_canvas,bg='#4065A4')\r\n\r\n# Add that New frame To a Window In The Canvas\r\nmy_canvas.create_window((0,0), window=second_frame, anchor=\"center\")\r\n\r\n# créer des frames pour chaque partie de l'interface graphique \r\ndimension_frame = Frame(second_frame,bg='#4065A4')\r\ngeospatial_frame = Frame(second_frame,bg='#4065A4')\r\nbutton_frame = Frame(second_frame,bg='#4065A4')\r\n\r\n\r\n#######################################################################################################################\r\n#- la fonction generate_geoquery renvoie un dictionnaire contenant la requête dsl pour effectuer le filtre géospatial #\r\n# #\r\n#######################################################################################################################\r\n\r\ndef generate_geoquery():\r\n query = {}\r\n\r\n #si l'utilisateur a saisi les coordonnées d'un seul point (ie: il veut calculer une distance)\r\n if (len(liste) == 1):\r\n #récupérer la distance saisie\r\n distance = enter_distance.get()\r\n #générer la requête dsl\r\n query = {\r\n \"geo_distance\": {\r\n \"distance\": distance,\r\n \"Coord\": liste[0]\r\n }\r\n }\r\n #sinon si l'utilisateur a saisi les coordonnées d'au moins 3 points (polygone)\r\n elif (len(liste) >= 3):\r\n #ajouter à la fin de la liste son premier élément pour avoir tous les points constituant le polygone (forme fermée\r\n # donc le premier point est lui même le dernier)\r\n liste.append(liste[0])\r\n #récupérer la relation choisie par l'utilisateur parmi celles proposées\r\n relation = choose_relation.get() \r\n #générer la requête dsl\r\n query = {\r\n \"geo_shape\": {\r\n \"ignore_unmapped\": True,\r\n \"Coord\": {\r\n \"relation\": relation,\r\n \"shape\": {\r\n \"coordinates\": [\r\n liste \r\n ],\r\n \"type\": \"Polygon\"\r\n }\r\n }\r\n }\r\n }\r\n return query\r\n\r\n#####################################################################################################################\r\n# #\r\n#- la fonction joined_data prend comme paramètres la dimension temps ainsi que la (ou les) mesure(s) #\r\n#choisies par l'utilisateur sur l'interface graphique et crée le dataframe qui joint des données sensor et network #\r\n# #\r\n#####################################################################################################################\r\ndef joined_data(my_dimension_temps,my_measure):\r\n \r\n geo_query = generate_geoquery()\r\n query_network={}\r\n\r\n #Si l'utilisateur n'a pas choisi de filtrer avec une requête géospatiale\r\n if (geo_query == {}):\r\n #générer la requête SQL qui récupère les ids des noeuds, leur type et leurs coordonnées à partir de l'index network\r\n query_network = json.dumps({\"query\":\"SELECT myNodeId, nodeType, Coord FROM network\",\r\n \"fetch_size\":10000})\r\n #sinon, on génère la même requête SQL en la filtrant grâce à la requête géospatiale DSL\r\n else : \r\n query_network= json.dumps({\"query\":\"SELECT myNodeId, nodeType, Coord FROM network\",\r\n \"filter\" : geo_query,\r\n \"fetch_size\":10000})\r\n\r\n #envoyer la requête au serveur d'elasticsearch et récupérer le résultat dans le dataframe df_network\r\n req_network = requests.post(\"http://localhost:9200/_sql\", data=query_network, headers={'Content-Type':'application/json'})\r\n response_network = req_network.json()\r\n df_network = pd.DataFrame(response_network['rows'],\r\n columns=[d['name'] for d in response_network['columns']])\r\n \r\n\r\n query_sensor=\"\"\r\n #si l'utilisateur a choisi un niveau dans la dimension temps\r\n if (my_dimension_temps) :\r\n #générer la requête SQL qui récupère la dimension temps, la (ou les) mesure(s) et l'id des noeuds à partir de l'index sensor\r\n query_sensor=json.dumps({\"query\":\"SELECT \"+my_dimension_temps+\",myNodeId, \"+my_measure+ \" FROM sensor \",\r\n \"fetch_size\": 10000})\r\n\r\n #sinon, la requête ne récupère que les ids des noeuds et la(ou les) mesure(s)\r\n else :\r\n query_sensor= json.dumps({\"query\":\"SELECT myNodeId, \"+my_measure+ \" FROM sensor \",\r\n \"fetch_size\" : 10000})\r\n\r\n #envoyer la requête au serveur d'elasticsearch et récupérer le résultat dans le dataframe df_sensor\r\n req_sensor = requests.post(\"http://localhost:9200/_sql\", data=query_sensor, headers={'Content-Type':'application/json'})\r\n response_sensor = req_sensor.json()\r\n df_sensor = pd.DataFrame(response_sensor['rows'],\r\n columns=[d['name'] for d in response_sensor['columns']])\r\n \r\n \r\n # Cette partie de code commentée est dédiée aux cas où la requête SQL sur les données sensor renvoie plus de 10 000 lignes comme résultat\r\n '''\r\n # Tant qu'il y a des lignes à récupérer \r\n while (response_sensor['rows'] != []):\r\n\r\n # Récupère le curseur qui pointe sur les 10000 lignes suivantes\r\n cursor= json.dumps({ \"cursor\": response_sensor['cursor']})\r\n \r\n # Renvoyer une requête POST au serveur de Elasticsearch en passant en paramètre le curseur pour récupérer les lignes suivantes\r\n request = requests.post(\"http://localhost:9200/_sql\", data=cursor, headers={'Content-Type':'application/json'})\r\n \r\n # Récupérer le résultat sous forme json\r\n response_sensor = request.json()\r\n\r\n # Stocker les lignes récupérées dans un dataframe qui a les mêmes colonnes que le premier dataframe contenant le résultat de la première requête SQL sur les données sensor\r\n df2 = pd.DataFrame(response_sensor['rows'],\r\n columns=df_sensor.columns)\r\n # Mettre à jour le contenu du dataframe df_sensor avec les nouvelles lignes récupérées\r\n frames = [df_sensor,df2]\r\n df_sensor = pd.concat(frames)\r\n '''\r\n \r\n #créer le dataframe qui joint les deux dataframe précédents par rapport au champ \"myNodeId\" des identifiants des noeuds\r\n df_merged = pd.merge(df_network, df_sensor, on='myNodeId')\r\n\r\n return df_merged\r\n\r\n\r\ndef flat(measures,liste):\r\n for sublist in measures:\r\n liste.append(sublist)\r\n return liste\r\n\r\n#####################################################################################################################\r\n# - Fonction qui crée la table pivot à partir des dimensions et mesures choisies #\r\n# #\r\n#####################################################################################################################\r\ndef olap_configuration():\r\n\r\n global liste\r\n global i\r\n global label\r\n global text\r\n #récupérer les dimensions choisies\r\n my_dimension_temps = choose_dimension_temps.get()\r\n my_dimension_node = choose_dimension_node.get()\r\n my_dimension_spatiale = choose_dimension_spatiale.get()\r\n # Récupérer la liste des mesures dans une chaine de caractères sous la forme \"mesure1,mesure2...\"\r\n my_measure = str(measures_listbox.get(0))\r\n for item in range(1,len(measures_listbox.curselection())):\r\n my_measure = str(measures_listbox.get(item))+\",\"+my_measure \r\n\r\n\r\n #Créer le dataframe utilisé dans la table pivot\r\n df = joined_data(my_dimension_temps,my_measure)\r\n\r\n booleen = True \r\n # choisir les champs à garder du dataframe df en distinguant les cas selon les dimensions choisies par l'utilisateur (il se peut\r\n # que l'utilisateur ne choisisse pas des dimensions) \r\n if (my_dimension_spatiale) :\r\n if(my_dimension_node) :\r\n if (my_dimension_temps):\r\n final_data = df[flat(my_measure.split(','), [my_dimension_temps,my_dimension_node,my_dimension_spatiale])]\r\n \r\n else :\r\n final_data = df[[my_dimension_node,my_dimension_spatiale,my_measure.split(',')]]\r\n elif (my_dimension_temps):\r\n final_data = df[[my_dimension_temps,my_dimension_spatiale,my_measure.split(',')]]\r\n else :\r\n final_data = df[[my_dimension_spatiale,my_measure.split(',')]]\r\n else :\r\n if(my_dimension_node) :\r\n if (my_dimension_temps):\r\n final_data = df[[my_dimension_temps,my_dimension_node,my_measure.split(',')]]\r\n else :\r\n final_data = df[[my_dimension_node,my_measure.split(',')]]\r\n elif (my_dimension_temps):\r\n final_data = df[[my_dimension_temps,my_measure.split(',')]]\r\n \r\n #si l'utilisateur n'a choisi aucune dimension\r\n else : \r\n \r\n booleen = False\r\n\r\n # générer l'interface de la table pivot si l'utilisateur a choisi au moins une dimension\r\n if (booleen) :\r\n pivot_ui(final_data ,rows=[my_dimension_temps,my_dimension_node,my_dimension_spatiale],exclusions={final_data.columns[0] : [\"null\"]},outfile_path='cube.html')\r\n HTML('cube.html')\r\n # Ouvrir l'interface dans le navigateur\r\n webbrowser.open(\"cube.html\")\r\n # si l'utilisateur n'a choisi aucune dimension, un message d'erreur s'affiche\r\n else: \r\n error = Label(button_frame, text=\"Il faut choisir des dimensions\", font=(\"Courrier\", 14),bg='#4065A4',fg='red')\r\n error.pack()\r\n # réinitialiser la liste des saisies dans le filtre géospatial (liste des points + affichage des points saisis dans la variable text) \r\n liste = []\r\n i=0\r\n text = \"\"\r\n label.config(text=text)\r\n \r\n\r\n#Configuration de l'interface graphique \r\n\r\n#---------------------------------- partie des dimensions et mesures --------------------------------------------\r\n\r\n#dimensions---------------------------------------------------------------------------\r\n\r\ndimension_temps = [\r\n \"Annee_sensor\",\r\n \"mois_sensor\",\r\n \"jour_sensor\",\r\n \"packetTimeSensor\"\r\n \r\n]\r\n\r\ndimension_node = [\r\n \"myNodeId\",\r\n \"nodeType\"\r\n]\r\n\r\ndimension_spatiale = [\r\n \"Coord\"\r\n]\r\n\r\n# Time dimension\r\ndimension_temps_label = Label(dimension_frame, text=\"Dimension Temps\", font=(\"Courrier\", 14),bg='#4065A4',fg='white')\r\ndimension_temps_label.pack(pady=10)\r\nchoose_dimension_temps = ttk.Combobox(dimension_frame, width = 27, value=dimension_temps)\r\nchoose_dimension_temps.pack()\r\n\r\n#Node dimension\r\ndimension_node_label = Label(dimension_frame, text=\"Dimension Noeud\", font=(\"Courrier\", 14),bg='#4065A4',fg='white')\r\ndimension_node_label.pack(pady=10)\r\nchoose_dimension_node = ttk.Combobox(dimension_frame, width = 27, value=dimension_node)\r\nchoose_dimension_node.pack()\r\n\r\n#Spatial dimension\r\ndimension_spatiale_label = Label(dimension_frame, text=\"Dimension Spatiale\", font=(\"Courrier\", 14),bg='#4065A4',fg='white')\r\ndimension_spatiale_label.pack(pady=10)\r\nchoose_dimension_spatiale = ttk.Combobox(dimension_frame, width = 27, value=dimension_spatiale)\r\nchoose_dimension_spatiale.pack()\r\n\r\n#mesures-------------------------------------------------------------------------------\r\n\r\nmesures = [\r\n \"temperature\",\r\n \"humidity\",\r\n \"light\",\r\n \"battery\",\r\n \"decagon1\",\r\n \"degacon2\",\r\n \"decagon3\",\r\n \"watermark1\",\r\n \"watermark2\",\r\n \"watermark3\",\r\n \"watermark4\"\r\n]\r\n\r\nmeasure_label = Label(dimension_frame, text=\"Mesure\", font=(\"Courrier\", 14),bg='#4065A4',fg='white')\r\nmeasure_label.pack(pady=10)\r\n\r\nmy_scrollbar2 = ttk.Scrollbar(dimension_frame, orient=VERTICAL)\r\n\r\nmeasures_listbox = Listbox(dimension_frame,width=30,height=7,selectmode=MULTIPLE,exportselection=0,yscrollcommand=my_scrollbar2.set)\r\n\r\nmy_scrollbar2.config(command=measures_listbox.yview)\r\nmy_scrollbar2.pack(side=RIGHT, fill=Y)\r\nmeasures_listbox.pack(fill=X)\r\nfor item in mesures:\r\n measures_listbox.insert(END,item)\r\n\r\n#------------------------------------- Bouton valider ----------------------------------------------\r\n\r\nvalidate_button = Button(button_frame,text = \"Valider\",font=(\"Courrier\", 14),bg='white',fg='#4065A4',command=olap_configuration)\r\nvalidate_button.pack(pady=25,fill =X,expand=1)\r\nbooleen = True\r\n\r\n#------------------------------------- partie du filtre géospatial ---------------------------------\r\n# Geospatial queries\r\n\r\nliste = []\r\nlabel = Label(geospatial_frame, font=(\"Courrier\", 11),bg='#4065A4',fg='white')\r\ni=0\r\ntext=\"\"\r\ndef generate_points():\r\n global i\r\n global text\r\n global liste\r\n i = i+1\r\n liste_lat_lon = []\r\n lon = float(enter_lon.get())\r\n lat = float(enter_lat.get())\r\n \r\n liste_lat_lon.append(lon)\r\n liste_lat_lon.append(lat)\r\n liste.append(liste_lat_lon)\r\n enter_lon.delete(0,'end')\r\n enter_lat.delete(0,'end')\r\n text = text + \"Point \"+str(i)+\": \"+str(liste[i-1]) + \"\\n\"\r\n label.config(text=text)\r\n label.grid(row=5+len(liste),column=0,pady=5)\r\n \r\n \r\n\r\n\r\ngeo_label = Label(geospatial_frame, text=\"Calcul géospatial\", font=(\"Courrier\", 16),bg='#4065A4',fg='white')\r\ngeo_label.grid(row=0,column=0,pady=10)\r\n\r\nlon_label = Label(geospatial_frame, text=\"Longitude :\", font=(\"Courrier\", 12),bg='#4065A4',fg='white')\r\nlon_label.grid(row=3,column=0,pady=10)\r\nenter_lon = Entry(geospatial_frame)\r\nenter_lon.grid(row=3,column=1,pady=10)\r\nlat_label = Label(geospatial_frame, text=\"Latitude :\", font=(\"Courrier\", 12),bg='#4065A4',fg='white')\r\nlat_label.grid(row=4,column=0,pady=10)\r\nenter_lat = Entry(geospatial_frame)\r\nenter_lat.grid(row=4,column=1,pady=10)\r\ngenerate_button = Button(geospatial_frame,text = \"Ajouter le point\",font=(\"Courrier\", 12),bg='white',fg='#4065A4',command= generate_points)\r\ngenerate_button.grid(row=5,column=0)\r\n\r\n\r\n\r\nrelations =[\r\n \"INTERSECTS\",\r\n \"CONTAIN\",\r\n \"WITHIN\",\r\n \"DISJOINT\"\r\n]\r\nrelation_label = Label(geospatial_frame, text=\"Relation (>= 3 points) :\", font=(\"Courrier\", 11),bg='#4065A4',fg='white')\r\nrelation_label.grid(row=1,column=0,pady=10)\r\nchoose_relation = ttk.Combobox(geospatial_frame, value=relations)\r\nchoose_relation.grid(row=1,column=1,pady=10)\r\ndistance_label = Label(geospatial_frame, text=\"Distance (1 point) :\", font=(\"Courrier\", 11),bg='#4065A4',fg='white')\r\ndistance_label.grid(row=2,column=0,pady=10)\r\nenter_distance = Entry(geospatial_frame)\r\nenter_distance.grid(row=2,column=1,pady=10)\r\n\r\n\r\n# ----------------------------------------- Ajuster la disposition des frames + ajout d'un onglet menu ----------------------------\r\n\r\ngeospatial_frame.grid(row=0,column=3,padx=60,pady=20,sticky=N+S+E+W)\r\ndimension_frame.grid(row=0,column=0,padx=20,pady=20,sticky=N+S+E+W)\r\nbutton_frame.grid(row= 1,column=2,padx=20,pady=20,sticky=N+S+E+W)\r\nsecond_frame.pack(fill=\"both\", expand=True)\r\n\r\n\r\n\r\n# create menu\r\nmenu_bar = Menu(main_frame)\r\n\r\n#first menu\r\nmain_menu = Menu(menu_bar,tearoff=0)\r\nmain_menu.add_command(label=\"Quitter\",command = root.quit)\r\nmenu_bar.add_cascade(label=\"Menu\",menu=main_menu)\r\n\r\n# Add the menu to the main window \"root\"\r\nroot.config(menu=menu_bar)\r\nroot.mainloop()\r\n","sub_path":"OLAP.py","file_name":"OLAP.py","file_ext":"py","file_size_in_byte":18018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"301488098","text":"\"\"\" This script uses uses core masonry objects to analyze missing parts of the\ncertification \"\"\"\nimport os\n\nfrom masonry.core import Certification, Standard, Control\nfrom src import utils\n\n\ndef analyze_attribute(attribute):\n \"\"\" Check how many elements an attribute has otherwise if it's a list\n if it's not a list return that it's present otherwise return \"Missing \"\"\"\n if isinstance(attribute, list) or isinstance(attribute, dict):\n return len(attribute)\n elif attribute:\n return \"Present\"\n return \"Missing\"\n\n\nclass InventoryControl(Control):\n \"\"\" InventoryControl inherits from the Control class and adds a method\n to create an inventory of justifications \"\"\"\n def inventory(self):\n \"\"\" Create a catalog for a specific controls \"\"\"\n control_dict = {}\n if not self.justifications:\n control_dict = \"Missing Justifications\"\n for component in self.justifications:\n system_key = component.get('system', 'No System')\n component_key = component.get('component', 'No Name')\n if system_key not in control_dict:\n control_dict[system_key] = {}\n control_dict[system_key][component_key] = {\n 'implementation_status': component.get('implementation_status', 'Missing'),\n 'narrative': analyze_attribute(component.get('narrative')),\n 'references': analyze_attribute(component.get('references'))\n }\n return control_dict\n\n\nclass InventoryStandard(Standard):\n \"\"\" InventoryStandard inherits from Standard, while overriding the Control\n class with InventoryControl as the default storage for control data. This\n class also adds a method to help analyze missing certification gaps \"\"\"\n def __init__(self, standards_yaml_path=None, standard_dict=None):\n super().__init__(\n standards_yaml_path=standards_yaml_path,\n standard_dict=standard_dict,\n control_class=InventoryControl\n )\n\n def inventory(self):\n \"\"\" Creates a catalog of controls in the system \"\"\"\n control_inventory = {}\n for control_key, control in self:\n control_inventory[control_key] = control.inventory()\n return control_inventory\n\n\nclass InventoryBuilder(Certification):\n \"\"\" InventoryBuilder load certification data and exports a yaml gap analysis \"\"\"\n def __init__(self, certification_yaml_path):\n super().__init__(certification_yaml_path, standard_class=InventoryStandard)\n self.inventory = {}\n self.systems_inventory = self.inventory_systems()\n self.standard_inventory = self.inventory_standards()\n\n def inventory_systems(self):\n \"\"\" Creates an system/components catalog \"\"\"\n systems_inventory = {}\n for system_key, system in self.systems.items():\n systems_inventory[system_key] = {}\n for component in system:\n systems_inventory[system_key][component.component_key] = {\n 'references': analyze_attribute(component.meta.get('references')),\n 'verifications': analyze_attribute(component.meta.get('verifications')),\n 'documentation_completed': component.meta.get('documentation_complete'),\n }\n return systems_inventory\n\n def inventory_standards(self):\n \"\"\" Creates an standards/controls catalog \"\"\"\n standard_inventory = {}\n for standard_key, standard in self.standards_dict.items():\n standard_inventory[standard_key] = standard.inventory()\n return standard_inventory\n\n def make_export_dict(self):\n \"\"\" Creates a dict version of the inventory report \"\"\"\n return {\n 'certification': self.name,\n 'components': self.systems_inventory,\n 'standards': self.standard_inventory\n }\n\n def export(self, export_path):\n \"\"\" Exports the inventory report to a yaml file \"\"\"\n inventory_path = os.path.join(\n export_path,\n self.name + '.yaml'\n )\n utils.yaml_writer(self.make_export_dict(), inventory_path)\n return inventory_path\n","sub_path":"masonry/inventory_builder.py","file_name":"inventory_builder.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"536955332","text":"from django.contrib import admin\n\nfrom .models import FieldCondition\n\n\n\n# Register your models here.\nclass FieldConditionAdmin(admin.ModelAdmin):\n list_display = ['reservation_number', 'park_name','property_name','Report_Time_Date', 'Property_Status_Description', 'Personnel_Time', 'Expenses', 'Status', 'comments']\n list_filter = ['reservation_number']\n search_fields = ['reservation_number']\n ordering = ['reservation_number']\n\nadmin.site.register(FieldCondition, FieldConditionAdmin)","sub_path":"GroundKeeper/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385941670","text":"\"\"\"\nParse IgBLAST output and write out a tab-separated table.\n\nIgBLAST must have been run with -outfmt \"7 sseqid qstart qseq sstart sseq pident slen\"\n\nA few extra things are done in addition to parsing:\n- The CDR3 is detected by using a regular expression\n- The leader is detected within the sequence before the found V gene (by\n searching for the start codon).\n- If the V sequence hit starts at base 2 in the reference, it is extended\n one to the left.\n\"\"\"\nimport csv\nimport logging\nfrom collections import namedtuple\nimport functools\n\nfrom tinyalign import edit_distance, hamming_distance\n\nfrom .utils import nt_to_aa\nfrom .dna import reverse_complement\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef none_if_na(s):\n \"\"\"Return None if s == 'N/A'. Return s otherwise.\"\"\"\n return None if s == 'N/A' else s\n\n\ndef gene_without_prefix(s):\n if s == \"N/A\":\n return None\n else:\n assert s.startswith(\"%\")\n return s[1:]\n\n\ndef split_by_section(iterable, section_starts):\n \"\"\"\n Parse a stream of lines into chunks of sections. When one of the lines\n starts with a string given in section_starts, a new section is started, and\n a tuple (head, lines) is returned where head is the matching line and lines\n contains a list of the lines following the section header, up to (but\n excluding) the next section header.\n\n Works a bit like str.split(), but on lines.\n \"\"\"\n lines = None\n header = None\n for line in iterable:\n line = line.strip()\n for start in section_starts:\n if line.startswith(start):\n if header is not None:\n yield (header, lines)\n header = line\n lines = []\n break\n else:\n if header is None:\n raise ParseError(\"Expected a line starting with one of {}\".format(\n ', '.join(section_starts)))\n lines.append(line)\n if header is not None:\n yield (header, lines)\n\n\nJunctionVDJ = namedtuple('JunctionVDJ', 'v_end vd_junction d_region dj_junction j_start')\nJunctionVJ = namedtuple('JunctionVJ', 'v_end vj_junction j_start')\n\n\nclass AlignmentSummary:\n \"\"\"An alignment summary describes a framework region or complementarity-determining region\n (FR1/2/3, CDR1/2/3)\"\"\"\n\n def __init__(self, start, stop, length, matches, mismatches, gaps, percent_identity):\n self.start = start\n self.stop = stop\n self.length = length\n self.matches = matches\n self.mismatches = mismatches\n self.gaps = gaps\n if matches is not None:\n assert matches + mismatches + gaps == length\n if percent_identity is not None:\n assert matches is not None and length is not None\n assert abs(100. * matches / length - percent_identity) < 0.1\n self.percent_identity = percent_identity\n\n\nclass Hit:\n def __init__(\n self,\n subject_id: str, # name of database record, such as \"VH4.11\"\n query_start: int,\n query_alignment: str, # aligned part of the query, with '-' for deletions\n subject_start: int,\n subject_alignment: str, # aligned part of reference, with '-' for insertions\n subject_length: int, # total length of reference, depends only on subject_id\n percent_identity: float,\n evalue: float,\n ):\n assert len(subject_alignment) == len(query_alignment)\n self.subject_id = subject_id\n self.query_start = query_start\n self.query_alignment = query_alignment\n self.subject_start = subject_start\n self.subject_alignment = subject_alignment\n self.subject_length = subject_length\n self.percent_identity = percent_identity\n self.evalue = evalue\n\n # Derived attributes\n self.errors = self._errors(self.subject_alignment, self.query_alignment)\n self.query_sequence = self.query_alignment.replace('-', '')\n self.subject_sequence = self.subject_alignment.replace('-', '')\n\n assert abs(self.percent_identity - self._percent_identity()) < 0.01\n\n def extend_left_ungapped(self, query_sequence, subject_sequence):\n \"\"\"\n Extend this hit to the left until it reaches the first nucleotide of the subject sequence.\n Used for extending V hits to the 5' end.\n \"\"\"\n query_bases = []\n subject_bases = []\n\n while self.subject_start > 0 and self.query_start > 0:\n self.query_start -= 1\n self.subject_start -= 1\n query_base = query_sequence[self.query_start]\n query_bases.append(query_base)\n if subject_sequence is not None:\n subject_base = subject_sequence[self.subject_start]\n else:\n subject_base = 'N'\n subject_bases.append(subject_base)\n\n query_bases = ''.join(query_bases[::-1])\n subject_bases = ''.join(subject_bases[::-1])\n self.query_alignment = query_bases + self.query_alignment\n self.subject_alignment = subject_bases + self.subject_alignment\n self.query_sequence = query_bases + self.query_sequence\n self.subject_sequence = subject_bases + self.subject_sequence\n self.errors += self._errors(query_bases, subject_bases)\n self.percent_identity = self._percent_identity()\n\n def _percent_identity(self):\n \"\"\"This is how IgBLAST computes percent identity\"\"\"\n matches = len(self.subject_alignment) - self.errors\n return 100. * matches / len(self.subject_alignment)\n\n def covered(self):\n \"\"\"\n Return fraction of bases in the original subject sequence that are\n covered by this hit.\n \"\"\"\n return len(self.subject_sequence) / self.subject_length\n\n @property\n def query_end(self):\n return self.query_start + len(self.query_sequence)\n\n @property\n def subject_end(self):\n return self.subject_start + len(self.subject_sequence)\n\n @staticmethod\n def _errors(alignment1, alignment2):\n return sum(a != b for a, b in zip(alignment1, alignment2))\n\n def query_position(self, reference_position):\n \"\"\"\n Given a position on the reference, return the same position but relative to\n the full query sequence.\n \"\"\"\n # Iterate over alignment columns\n ref_pos = self.subject_start\n query_pos = self.query_start\n if ref_pos == reference_position:\n return query_pos\n for ref_c, query_c in zip(self.subject_alignment, self.query_alignment):\n if ref_c != '-':\n ref_pos += 1\n if query_c != '-':\n query_pos += 1\n if ref_pos == reference_position:\n return query_pos\n return None\n\n\ndef parse_header(header):\n \"\"\"\n Extract size= and barcode= fields from the FASTA/FASTQ header line\n\n >>> parse_header(\"name;size=12;barcode=ACG;\")\n ('name', 12, 'ACG')\n >>> parse_header(\"another name;size=200;foo=bar;\")\n ('another name', 200, None)\n \"\"\"\n fields = header.split(';')\n query_name = fields[0]\n size = barcode = None\n for field in fields[1:]:\n if field == '':\n continue\n if '=' in field:\n key, value = field.split('=', maxsplit=1)\n if key == 'size':\n size = int(value)\n elif key == 'barcode':\n barcode = value\n return query_name, size, barcode\n\n\nclass IgBlastRecord:\n def __init__(\n self,\n full_sequence,\n query_name,\n alignments,\n hits,\n v_gene,\n d_gene,\n j_gene,\n chain,\n has_stop,\n in_frame,\n is_productive,\n strand,\n junction\n ):\n self.full_sequence = full_sequence\n self.query_name = query_name\n self.alignments = alignments\n self.hits = hits\n self.v_gene = v_gene\n self.d_gene = d_gene\n self.j_gene = j_gene\n self.chain = chain\n self.has_stop = has_stop\n self.in_frame = in_frame\n self.is_productive = is_productive\n self.strand = strand\n self.junction = junction\n\n def region_sequence(self, region):\n \"\"\"\n Return the nucleotide sequence of a named region. Allowed names are:\n CDR1, CDR2, CDR3, FR1, FR2, FR3. Sequences are extracted from the full read\n using begin and end coordinates from IgBLAST’s \"alignment summary\" table.\n \"\"\"\n if region not in (\"CDR1\", \"CDR2\", \"CDR3\", \"FR1\", \"FR2\", \"FR3\"):\n raise KeyError(f\"Region {region!r} not allowed\")\n alignment = self.alignments.get(region, None)\n if alignment is None:\n return None\n if alignment.start is None or alignment.stop is None:\n return None\n return self.full_sequence[alignment.start:alignment.stop]\n\n def __repr__(self):\n return 'IgBlastRecord(query_name={query_name!r}, ' \\\n 'v_gene={v_gene!r}, d_gene={d_gene!r}, j_gene={j_gene!r}, chain={chain!r}, ...)'.format(\n **vars(self))\n\n\nclass Region:\n \"\"\"A CDR or FR region in a V(D)J rearranged sequence (FR1, CDR1, FR2, CDR2, FR3, CDR3, FR4)\"\"\"\n\n def __init__(self, nt_sequence, nt_reference, aa_reference=None, percent_identity=None):\n self.nt_sequence = nt_sequence\n self.aa_sequence = nt_to_aa(nt_sequence) if nt_sequence else None\n self.nt_reference = nt_reference\n if aa_reference is None and nt_reference is not None:\n aa_reference = nt_to_aa(nt_reference)\n self.aa_reference = aa_reference\n self.aa_mutations = self._aa_mutations()\n self.percent_identity = percent_identity\n if percent_identity is None:\n self.percent_identity = self._percent_identity()\n\n def _percent_identity(self):\n # FIXME This is not quite how IgBLAST computes percent identity\n if not self.nt_reference or not self.nt_sequence:\n return None\n dist = edit_distance(self.nt_reference, self.nt_sequence)\n return 100. - 100. * dist / len(self.nt_reference)\n\n def _aa_mutations(self):\n # Earlier versions of this code used edit distance to compute the number of mutations,\n # but some FR1 alignments are reported with a frameshift by IgBLAST. By requiring that\n # reference and query lengths are identical, we can filter out these cases (and use\n # Hamming distance to get some speedup)\n if (\n not self.aa_reference\n or not self.aa_sequence\n or len(self.nt_sequence) != len(self.nt_reference)\n ):\n return None\n dist = hamming_distance(self.aa_reference, self.aa_sequence)\n\n # If the mutation rate is still obviously too high, assume something went\n # wrong and ignore the computed value\n if dist / len(self.aa_reference) >= 0.8:\n return None\n return dist\n\n def aa_mutation_rate(self):\n if self.aa_mutations is None or not self.aa_reference:\n return None\n return 100. * self.aa_mutations / len(self.aa_reference)\n\n def nt_mutation_rate(self):\n \"\"\"Return nucleotide-level mutation rate in percent\"\"\"\n if self.percent_identity is None:\n return None\n return 100. - self.percent_identity\n\n\nclass ExtendedIgBlastRecord(IgBlastRecord):\n \"\"\"\n This extended record does a few extra things:\n - The CDR3 is detected\n - The leader is detected within the sequence before the found V gene (by\n searching for the start codon).\n - If the V sequence hit starts not at base 1 in the reference, it is extended\n to the left.\n \"\"\"\n # TODO move computation of cdr3_sequence, vdj_sequence into constructor\n # TODO maybe make all coordinates relative to full sequence\n\n # Order of columns (use with asdict())\n columns = [\n 'count',\n 'V_gene',\n 'D_gene',\n 'J_gene',\n 'chain',\n 'stop',\n 'V_covered',\n 'D_covered',\n 'J_covered',\n 'V_evalue',\n 'D_evalue',\n 'J_evalue',\n 'FR1_SHM',\n 'CDR1_SHM',\n 'FR2_SHM',\n 'CDR2_SHM',\n 'FR3_SHM',\n 'FR4_SHM',\n 'V_SHM',\n 'J_SHM',\n 'V_aa_mut',\n 'J_aa_mut',\n 'FR1_aa_mut',\n 'CDR1_aa_mut',\n 'FR2_aa_mut',\n 'CDR2_aa_mut',\n 'FR3_aa_mut',\n 'V_errors',\n 'D_errors',\n 'J_errors',\n 'UTR',\n 'leader',\n 'CDR1_nt',\n 'CDR1_aa',\n 'CDR2_nt',\n 'CDR2_aa',\n 'CDR3_nt',\n 'CDR3_aa',\n 'V_nt',\n 'V_aa',\n 'V_end',\n 'V_CDR3_start',\n 'VD_junction',\n 'D_region',\n 'DJ_junction',\n 'J_nt',\n 'VDJ_nt',\n 'VDJ_aa',\n 'name',\n 'barcode',\n 'genomic_sequence',\n ]\n\n CHAINS = {\n 'VH': 'heavy', 'VK': 'kappa', 'VL': 'lambda',\n 'VA': 'alpha', 'VB': 'beta',\n 'VG': 'gamma', 'VD': 'delta'\n }\n\n def __init__(self, database, **kwargs):\n super().__init__(**kwargs)\n self.query_name, self.size, self.barcode = parse_header(self.query_name)\n self.genomic_sequence = self.full_sequence\n self._database = database\n if 'V' in self.hits:\n subject_sequence = self._database.v[self.hits['V'].subject_id]\n self.hits['V'].extend_left_ungapped(self.full_sequence, subject_sequence)\n self.utr, self.leader = self._utr_leader()\n self.alignments['CDR3'] = self._find_cdr3()\n\n self.regions = {\n name: self._make_region(name) for name in\n ('FR1', 'FR2', 'FR3', 'CDR1', 'CDR2', 'CDR3')}\n self.regions['FR4'] = self._make_fr4_region()\n self.vdj_sequence = self._make_vdj_sequence()\n\n def _make_region(self, name: str):\n nt_sequence = self.region_sequence(name)\n if self.v_gene in self._database.v_regions_nt:\n nt_reference = self._database.v_regions_nt[self.v_gene].get(name)\n else:\n nt_reference = None\n if self.v_gene in self._database.v_regions_aa:\n aa_reference = self._database.v_regions_aa[self.v_gene].get(name)\n else:\n aa_reference = None\n if self.alignments.get(name, None) is not None:\n percent_identity = self.alignments[name].percent_identity\n else:\n percent_identity = None\n return Region(nt_sequence, nt_reference, aa_reference, percent_identity)\n\n def _make_fr4_region(self):\n if 'J' not in self.hits:\n return Region(None, None)\n j_subject_id = self.hits['J'].subject_id\n if self.chain not in self.CHAINS:\n return Region(None, None)\n cdr3_ref_end = self._database.j_cdr3_end(j_subject_id, self.CHAINS[self.chain])\n if cdr3_ref_end is None:\n return Region(None, None)\n cdr3_query_end = self.hits['J'].query_position(reference_position=cdr3_ref_end)\n if cdr3_query_end is None:\n return Region(None, None)\n\n query = self.full_sequence[cdr3_query_end:self.hits['J'].query_end]\n ref = self._database.j[j_subject_id][cdr3_ref_end:self.hits['J'].subject_end]\n\n return Region(query, ref)\n\n def _make_vdj_sequence(self):\n if 'V' not in self.hits or 'J' not in self.hits:\n return None\n hit_v = self.hits['V']\n hit_j = self.hits['J']\n vdj_start = hit_v.query_start\n vdj_stop = hit_j.query_start + len(hit_j.query_sequence)\n return self.full_sequence[vdj_start:vdj_stop]\n\n @property\n def v_cdr3_start(self):\n \"\"\"Start of CDR3 within V\"\"\"\n if 'V' not in self.hits or self.alignments['CDR3'] is None:\n return 0\n v_start = self.hits['V'].query_start\n cdr3_start = self.alignments['CDR3'].start\n return cdr3_start - v_start\n\n def _utr_leader(self):\n \"\"\"\n Split the sequence before the V gene match into UTR and leader by\n searching for the start codon.\n \"\"\"\n if 'V' not in self.hits:\n return None, None\n before_v = self.full_sequence[:self.hits['V'].query_start]\n\n # Search for the start codon\n for offset in (0, 1, 2):\n for i in range(66, 42, -3):\n if before_v[-i + offset:-i + 3 + offset] == 'ATG':\n return before_v[:-i + offset], before_v[-i + offset:]\n return None, None\n\n def _find_cdr3(self):\n \"\"\"\n Return a repaired AlignmentSummary object that describes the CDR3 region.\n Return None if no CDR3 detected.\n \"\"\"\n if 'V' not in self.hits or 'J' not in self.hits:\n return None\n if self.chain not in self.CHAINS:\n return None\n\n # CDR3 start\n cdr3_ref_start = self._database.v_cdr3_start(self.hits['V'].subject_id, self.CHAINS[self.chain])\n if cdr3_ref_start is None:\n return None\n cdr3_query_start = self.hits['V'].query_position(reference_position=cdr3_ref_start)\n if cdr3_query_start is None:\n # Alignment is not long enough to cover CDR3 start position; try to rescue it\n # by assuming that the alignment would continue without indels.\n hit = self.hits['V']\n cdr3_query_start = hit.query_end + (cdr3_ref_start - hit.subject_end)\n\n # CDR3 end\n cdr3_ref_end = self._database.j_cdr3_end(self.hits['J'].subject_id, self.CHAINS[self.chain])\n if cdr3_ref_end is None:\n return None\n\n cdr3_query_end = self.hits['J'].query_position(reference_position=cdr3_ref_end)\n if cdr3_query_end is None:\n return None\n\n return AlignmentSummary(start=cdr3_query_start, stop=cdr3_query_end, length=None,\n matches=None, mismatches=None, gaps=None, percent_identity=None)\n\n def fr4_aa_mutation_rate(self):\n if 'J' not in self.hits:\n return None\n j_subject_id = self.hits['J'].subject_id\n if self.chain not in self.CHAINS:\n return None\n cdr3_ref_end = self._database.j_cdr3_end(j_subject_id, self.CHAINS[self.chain])\n if cdr3_ref_end is None:\n return None\n cdr3_query_end = self.hits['J'].query_position(reference_position=cdr3_ref_end)\n if cdr3_query_end is None:\n return None\n\n query = self.full_sequence[cdr3_query_end:self.hits['J'].query_end]\n try:\n query_aa = nt_to_aa(query)\n except ValueError:\n return None\n ref = self._database.j[j_subject_id][cdr3_ref_end:self.hits['J'].subject_end]\n try:\n ref_aa = nt_to_aa(ref)\n except ValueError:\n return None\n if not ref_aa:\n return None\n return 100. * edit_distance(ref_aa, query_aa) / len(ref_aa)\n\n def v_aa_mutation_rate(self):\n \"\"\"\n TODO This returns actually the total mutation rate of the FR1, CDR1, FR2, CDR2, FR3 regions\n (The CDR3 part of V is excluded.)\n \"\"\"\n mutations = 0\n length = 0\n for name in ('FR1', 'CDR1', 'FR2', 'CDR2', 'FR3'):\n region = self.regions.get(name)\n if region is None:\n return None\n if region.aa_reference is None or region.aa_mutations is None:\n return None\n mutations += region.aa_mutations\n length += len(region.aa_reference)\n\n return 100. * mutations / length\n\n def asdict(self):\n \"\"\"\n Return a flattened representation of this record as a dictionary.\n The dictionary can then be used with e.g. a csv.DictWriter or\n pandas.DataFrame.from_items.\n \"\"\"\n if 'V' in self.hits:\n v_nt = self.hits['V'].query_sequence\n v_aa = nt_to_aa(v_nt)\n v_shm = 100. - self.hits['V'].percent_identity\n v_errors = self.hits['V'].errors\n v_covered = 100. * self.hits['V'].covered()\n v_evalue = self.hits['V'].evalue\n else:\n v_nt = None\n v_aa = None\n v_shm = None\n v_errors = None\n v_covered = None\n v_evalue = None\n if 'D' in self.hits:\n d_errors = self.hits['D'].errors\n d_covered = 100. * self.hits['D'].covered()\n d_evalue = self.hits['D'].evalue\n else:\n d_errors = None\n d_covered = None\n d_evalue = None\n if 'J' in self.hits:\n j_nt = self.hits['J'].query_sequence\n j_shm = 100. - self.hits['J'].percent_identity\n j_errors = self.hits['J'].errors\n j_covered = 100. * self.hits['J'].covered()\n j_evalue = self.hits['J'].evalue\n else:\n j_nt = None\n j_shm = None\n j_errors = None\n j_covered = None\n j_evalue = None\n v_end = getattr(self.junction, 'v_end', None)\n vd_junction = getattr(self.junction, 'vd_junction', None)\n d_region = getattr(self.junction, 'd_region', None)\n dj_junction = getattr(self.junction, 'dj_junction', None)\n\n return dict(\n count=self.size,\n V_gene=self.v_gene,\n D_gene=self.d_gene,\n J_gene=self.j_gene,\n chain=self.chain,\n stop=self.has_stop,\n V_covered=v_covered,\n D_covered=d_covered,\n J_covered=j_covered,\n V_evalue=v_evalue,\n D_evalue=d_evalue,\n J_evalue=j_evalue,\n FR1_SHM=self.regions['FR1'].nt_mutation_rate(),\n CDR1_SHM=self.regions['CDR1'].nt_mutation_rate(),\n FR2_SHM=self.regions['FR2'].nt_mutation_rate(),\n CDR2_SHM=self.regions['CDR2'].nt_mutation_rate(),\n FR3_SHM=self.regions['FR3'].nt_mutation_rate(),\n FR4_SHM=self.regions['FR4'].nt_mutation_rate(),\n V_SHM=v_shm,\n J_SHM=j_shm,\n V_aa_mut=self.v_aa_mutation_rate(),\n J_aa_mut=self.regions['FR4'].aa_mutation_rate(), # TODO J vs FR4\n FR1_aa_mut=self.regions['FR1'].aa_mutation_rate(),\n CDR1_aa_mut=self.regions['CDR1'].aa_mutation_rate(),\n FR2_aa_mut=self.regions['FR2'].aa_mutation_rate(),\n CDR2_aa_mut=self.regions['CDR2'].aa_mutation_rate(),\n FR3_aa_mut=self.regions['FR3'].aa_mutation_rate(),\n # FR4_aa_mut=aa_rates['FR4'], # TODO\n V_errors=v_errors,\n D_errors=d_errors,\n J_errors=j_errors,\n UTR=self.utr,\n leader=self.leader,\n CDR1_nt=self.regions['CDR1'].nt_sequence,\n CDR1_aa=self.regions['CDR1'].aa_sequence,\n CDR2_nt=self.regions['CDR2'].nt_sequence,\n CDR2_aa=self.regions['CDR2'].aa_sequence,\n CDR3_nt=self.regions['CDR3'].nt_sequence,\n CDR3_aa=self.regions['CDR3'].aa_sequence,\n V_nt=v_nt,\n V_aa=v_aa,\n V_end=v_end,\n V_CDR3_start=self.v_cdr3_start,\n VD_junction=vd_junction,\n D_region=d_region,\n DJ_junction=dj_junction,\n J_nt=j_nt,\n VDJ_nt=self.vdj_sequence,\n VDJ_aa=nt_to_aa(self.vdj_sequence) if self.vdj_sequence else None,\n name=self.query_name,\n barcode=self.barcode,\n genomic_sequence=self.genomic_sequence,\n )\n\n\nclass ParseError(Exception):\n pass\n\n\nclass IgBlastParser:\n \"\"\"\n Parser for IgBLAST results. Works only when IgBLAST was run with\n the option -outfmt \"7 sseqid qstart qseq sstart sseq pident slen\".\n \"\"\"\n BOOL = {'Yes': True, 'No': False, 'N/A': None}\n FRAME = {'In-frame': True, 'Out-of-frame': False, 'N/A': None}\n SECTIONS = frozenset([\n '# Query:',\n '# V-(D)-J rearrangement summary',\n '# V-(D)-J junction details',\n '# Alignment summary',\n '# Hit table',\n 'Total queries = ',\n ])\n\n def __init__(self, sequences, igblast_lines, database=None):\n \"\"\"\n If a database is given, iterating over this object will\n yield ExtendedIgBlastRecord objects, otherwise 'normal' IgBlastRecord objects\n \"\"\"\n self._sequences = sequences\n self._igblast_lines = igblast_lines\n self._database = database\n if self._database is None:\n self._create_record = IgBlastRecord\n else:\n self._create_record = functools.partial(ExtendedIgBlastRecord, database=self._database)\n\n def __iter__(self):\n \"\"\"\n Yield (Extended-)IgBlastRecord objects\n \"\"\"\n zipped = zip(self._sequences, split_by_section(self._igblast_lines, ['# IGBLASTN']))\n for fasta_record, (record_header, record_lines) in zipped:\n # 'IGBLASTN 2.5.1+': IgBLAST 1.6.1\n assert record_header in {\n '# IGBLASTN 2.2.29+', # IgBLAST 1.4.0\n '# IGBLASTN 2.3.1+', # IgBLAST 1.5.0\n '# IGBLASTN 2.6.1+', # IgBLAST 1.7.0\n '# IGBLASTN', # IgBLAST 1.10\n }\n yield self._parse_record(record_lines, fasta_record)\n\n def _parse_record(self, record_lines, fasta_record):\n \"\"\"\n Parse a single IgBLAST record\n \"\"\"\n hits = dict()\n # All of the sections are optional, so we need to set default values here.\n query_name = None\n junction = None\n v_gene, d_gene, j_gene, chain, has_stop, in_frame, is_productive, strand = [None] * 8\n alignments = dict()\n for section, lines in split_by_section(record_lines, self.SECTIONS):\n if section.startswith('# Query: '):\n query_name = section.split(': ')[1]\n elif section.startswith('# V-(D)-J rearrangement summary'):\n fields = lines[0].split('\\t')\n if len(fields) == 7:\n # No D assignment\n v_gene, j_gene, chain, has_stop, in_frame, is_productive, strand = fields\n d_gene = None\n else:\n v_gene, d_gene, j_gene, chain, has_stop, in_frame, is_productive, strand = fields\n d_gene = gene_without_prefix(d_gene)\n v_gene = gene_without_prefix(v_gene)\n j_gene = gene_without_prefix(j_gene)\n chain = none_if_na(chain)\n has_stop = self.BOOL[has_stop]\n in_frame = self.FRAME[in_frame]\n is_productive = self.BOOL[is_productive]\n strand = strand if strand in '+-' else None\n elif section.startswith('# V-(D)-J junction details'):\n fields = lines[0].split('\\t')\n if len(fields) == 5:\n junction = JunctionVDJ(\n v_end=fields[0],\n vd_junction=fields[1],\n d_region=fields[2],\n dj_junction=fields[3],\n j_start=fields[4]\n )\n else:\n junction = JunctionVJ(\n v_end=fields[0],\n vj_junction=fields[1],\n j_start=fields[2])\n elif section.startswith('# Alignment summary'):\n for line in lines:\n fields = line.split('\\t')\n if len(fields) == 8 and fields[0] != 'Total':\n summary = self._parse_alignment_summary(fields[1:])\n region_name, _, imgt = fields[0].partition('-')\n assert imgt in ('IMGT', 'IMGT (germline)')\n alignments[region_name] = summary\n elif section.startswith('# Hit table'):\n for line in lines:\n if not line or line.startswith('#'):\n continue\n hit, gene = self._parse_hit(line)\n assert gene in ('V', 'D', 'J')\n assert gene not in hits, \"Two hits for same gene found\"\n hits[gene] = hit\n elif section.startswith('Total queries = '):\n continue\n\n assert fasta_record.name == query_name\n full_sequence = fasta_record.sequence.upper()\n if strand == '-':\n full_sequence = reverse_complement(full_sequence)\n\n if __debug__:\n for gene in ('V', 'D', 'J'):\n if gene not in hits:\n continue\n hit = hits[gene]\n qsequence = hit.query_sequence\n\n # IgBLAST removes the trailing semicolon (why, oh why??)\n qname = query_name[:-1] if query_name.endswith(';') else query_name\n assert chain in (None, 'VL', 'VH', 'VK', 'NON', 'VA', 'VB', 'VG', 'VD'), chain\n assert qsequence == full_sequence[hit.query_start:hit.query_start+len(qsequence)]\n\n return self._create_record(\n query_name=query_name,\n alignments=alignments,\n v_gene=v_gene,\n d_gene=d_gene,\n j_gene=j_gene,\n chain=chain,\n has_stop=has_stop,\n in_frame=in_frame,\n is_productive=is_productive,\n strand=strand,\n hits=hits,\n full_sequence=full_sequence,\n junction=junction)\n\n def _parse_alignment_summary(self, fields):\n start, stop, length, matches, mismatches, gaps = (int(v) for v in fields[:6])\n percent_identity = float(fields[6])\n assert abs(percent_identity - 100. * matches / length) < 0.1\n # Note length is not necessarily equal to stop - start. Not sure why.\n return AlignmentSummary(\n start=start - 1,\n stop=stop,\n length=length,\n matches=matches,\n mismatches=mismatches,\n gaps=gaps,\n percent_identity=percent_identity\n )\n\n def _parse_hit(self, line):\n \"\"\"\n Parse a line of the \"Hit table\" section and return a tuple (hit, gene)\n where hit is a Hit object.\n \"\"\"\n (gene, subject_id, query_start, query_alignment, subject_start, subject_alignment,\n percent_identity, subject_length, evalue) = line.split('\\t')\n # Names have been mangled by adding a '%' to the beginning. Otherwise, IgBLAST\n # may recognize sequence ids such as AB123456 as being an accession and mangle\n # them to conform to BLAST’s naming scheme.\n # Undo this here.\n assert subject_id.startswith('%')\n subject_id = subject_id[1:]\n query_start = int(query_start) - 1\n subject_start = int(subject_start) - 1\n subject_length = int(subject_length) # Length of original subject sequence\n percent_identity = float(percent_identity)\n evalue = float(evalue)\n hit = Hit(subject_id, query_start, query_alignment, subject_start,\n subject_alignment, subject_length, percent_identity, evalue)\n return hit, gene\n\n\nclass TableWriter:\n def __init__(self, file):\n self._file = file\n self._writer = csv.DictWriter(file, fieldnames=ExtendedIgBlastRecord.columns, delimiter='\\t')\n self._writer.writeheader()\n\n @staticmethod\n def yesno(v):\n \"\"\"\n Return \"yes\", \"no\" or None for boolean value v, which may also be None.\n \"\"\"\n if v is None:\n return None\n return [\"no\", \"yes\"][v]\n\n def write(self, d):\n \"\"\"\n Write the IgBLAST record (must be given as dictionary) to the output\n file.\n \"\"\"\n d = d.copy()\n d['stop'] = self.yesno(d['stop'])\n for name in ('V_covered', 'D_covered', 'J_covered',\n 'FR1_SHM', 'CDR1_SHM', 'FR2_SHM', 'CDR2_SHM', 'FR3_SHM', 'FR4_SHM',\n 'V_SHM', 'J_SHM', 'V_aa_mut', 'J_aa_mut',\n 'FR1_aa_mut', 'CDR1_aa_mut', 'FR2_aa_mut', 'CDR2_aa_mut', 'FR3_aa_mut'):\n if d[name] is not None:\n d[name] = '{:.1f}'.format(d[name])\n for name in ('V_evalue', 'D_evalue', 'J_evalue'):\n if d[name] is not None:\n d[name] = '{:G}'.format(d[name])\n self._writer.writerow(d)\n","sub_path":"src/igdiscover/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":32288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"179239664","text":"import heapq\r\nfrom grafo import *\r\n\r\ndef arbol_tendido_minimo(grafo):\r\n inicio = grafo.obtener_vertice_aleatorio()\r\n visitados = set()\r\n visitados.add(inicio)\r\n heap = []\r\n\r\n for adyacente in grafo.obtener_adyacentes(inicio):\r\n heapq.heappush( heap, (inicio, adyacente, grafo.obtener_peso_arista(inicio, adyacente)) )\r\n\r\n arbol = Grafo(False)\r\n\r\n for vertice in grafo: arbol.agregar_vertice(vertice)\r\n\r\n while not len(heap) == 0:\r\n vertice, adyacente, peso = heapq.heappop(heap)\r\n\r\n if adyacente in visitados: continue\r\n\r\n arbol.agregar_arista(vertice, adyacente, grafo.obtener_peso_arista(vertice, adyacente))\r\n visitados.add(adyacente)\r\n\r\n for w in grafo.obtener_adyacentes(adyacente):\r\n heapq.heappush( heap, (adyacente, w, grafo.obtener_peso_arista(adyacente, w)) )\r\n\r\n return arbol\r\n\r\ndef reducir_caminos(grafo_ciudades, archivo, diccionario_coordenadas):\r\n arbol = arbol_tendido_minimo(grafo_ciudades)\r\n costo_total = 0\r\n visitados = set()\r\n\r\n with open(archivo, \"w\") as archivo_salida:\r\n archivo_salida.write( \"{}\\n\".format(diccionario_coordenadas[\"ciudades\"]) )\r\n\r\n for ciudad in diccionario_coordenadas:\r\n lista_coordenadas = diccionario_coordenadas[ciudad]\r\n latitud = lista_coordenadas[0]\r\n longitud = lista_coordenadas[1]\r\n archivo_salida.write( \"{},{},{}\\n\".format(ciudad, latitud, longitud) )\r\n\r\n cantidad_aristas = obtener_cantidad_vertices(grafo_ciudades) - 1\r\n archivo_salida.write( \"{}\\n\".format(cantidad_aristas) )\r\n\r\n for vertice in arbol:\r\n\r\n for adyacente in arbol.obtener_adyacentes(vertice):\r\n\r\n if adyacente not in visitados:\r\n visitados.add(adyacente)\r\n peso_arista = arbol.obtener_peso_arista(vertice, adyacente)\r\n costo_total += peso_arista\r\n archivo_salida.write( \"{},{},{}\\n\".format(vertice, adyacente, peso_arista) )\r\n\r\n print( \"Costo total: {}\".format(costo_total) )\r\n","sub_path":"tendido_minimo.py","file_name":"tendido_minimo.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"271696702","text":"\"\"\"assc project user tables added\n\nRevision ID: ab8ce088d027\nRevises: 31299edce459\nCreate Date: 2021-01-21 12:43:31.773229\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ab8ce088d027'\ndown_revision = '31299edce459'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('author_project_assc',\n sa.Column('author_id', sa.Integer(), nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),\n sa.PrimaryKeyConstraint('author_id', 'project_id')\n )\n op.create_table('leader_project_assc',\n sa.Column('leader_id', sa.Integer(), nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['leader_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),\n sa.PrimaryKeyConstraint('leader_id', 'project_id')\n )\n op.add_column('project', sa.Column('color', sa.String(length=10), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('project', 'color')\n op.drop_table('leader_project_assc')\n op.drop_table('author_project_assc')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ab8ce088d027_assc_project_user_tables_added.py","file_name":"ab8ce088d027_assc_project_user_tables_added.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"455745971","text":"# Chapter 01 Laboratory\n# Course: Program Arcade Games with Python\n# Author: Leo Dube\n# Date: April 08, 2016\n\n# 1.1 PART A\n\n# Input and Variable Definition\ntemp_fahrenheit = float(input(\"What is the current temperature in Fahrenheit:\"))\n\n# Calculations\ntemp_celsius = (temp_fahrenheit - 32) * (5/9)\n\n# Display Output\nprint(\"\\nThe temperature in Celsius is \", temp_celsius)\n\n\n# 1.2 PART B\nprint(\"This program calculates the area of a trapezoid\")\n\n# Input and Variable Definition\nheight_trap = int(input(\"Enter the height of the trapezoid:\"))\nlength_bottom = int(input(\"Enter the length of the bottom base:\"))\nlength_top = int(input(\"Enter the length of the top base:\"))\n\n# Calculations\ntrap_area = (1/2) * ((length_bottom + length_top) * height_trap)\n\n# Print outputs\nprint(\"\\nThe area is:\", trap_area)\n\n\n# 1.3 PART C\n# skipped\n","sub_path":"lab01.py","file_name":"lab01.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17967350","text":"import os\nimport SmellExtract\nimport FileUtil\n\nRESULT_ROOT_IN = \"/Users/Tushar/Documents/Research/architectureSmells/Results\"\nOUT_FILE_PATH = \"/Users/Tushar/Documents/Workspace/extractSmellInfo/\"\n\ncounter = 1\n\n# Case 1: I want to see whether there are generally a high percentage of imperative abstractions in the god components\n# present in the subject repositories. Can you get me the total number of classes, number of imperative abstractions\n# and number of unutilized abstractions present in all the god components found over all repos? I want to check,\n# e.g. whether the high correlation between god component and imperative abstraction can actually be explained\n# through a high percentage of imperative abstractions or not?\n#FileUtil.writeFile(os.path.join(OUT_FILE_PATH, \"smellsInfo1.csv\"), \"Project,Namespace,God Component,Unutilized Abstraction, Imperative Abstraction\")\n\n# Case 2: 2) Whether imperative abstractions contain complex or long methods? As God component is highly correlated to\n# complex method as well as imperative abstraction, I want to check whether most of the methods inside imperative\n# abstractions are complex/long or not. Actually we did not find the correlation between design and implementation\n# smells or intra smell type correlations (architecture-architecture, implementation - implementation),\n# perhaps that might have helped. But for this paper I think it will be sufficient if we can provide 3-4 examples\n# concerning specific repos or all repos in general.\n#FileUtil.writeFile(os.path.join(OUT_FILE_PATH, \"smellsInfo2.csv\"), \"Project,Namespace,Type,Imperative Abstraction,Long Method,Complex Method\")\n\n#case 3 : 3) Any specific repo, where God component was decided on LOC rather than classes, and that component contains\n# a high count of insufficient modularization instances. Also, which one is more common for god components (decision on\n# number of classes or LOC)?\nFileUtil.writeFile(os.path.join(OUT_FILE_PATH, \"smellsInfo3.csv\"), \"Project,Namespace,God Component,NO of classes,LOC,Insufficient Modularization\")\n\nfor dir in os.listdir(RESULT_ROOT_IN):\n if os.path.isdir(os.path.join(RESULT_ROOT_IN, dir)):\n print(\"Analyzing repo \" + str(counter) + \": \" + str(dir) + \"\\n\")\n counter += 1\n #Case 1\n #SmellExtract.extractSmellInfo(RESULT_ROOT_IN, dir, os.path.join(OUT_FILE_PATH, \"smellsInfo1.csv\"))\n #Case 2\n #SmellExtract.extractSmellInfo2(RESULT_ROOT_IN, dir, os.path.join(OUT_FILE_PATH, \"smellsInfo2.csv\"))\n #Case 3\n SmellExtract.extractSmellInfo3(RESULT_ROOT_IN, dir, os.path.join(OUT_FILE_PATH, \"smellsInfo3.csv\"))","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"561537354","text":"import numpy as np\n\nfrom time import sleep\nfrom nn.interfaces import IOptimizer\n\n\nclass GradientDecentOptimizer_v2(IOptimizer):\n\n def __init__(self, learn_rate=0.01):\n\n self.LR = learn_rate\n self.Loss = None\n self.Layers = []\n\n def optimize(self, layers):\n self.Layers = layers\n\n def set_loss(self, loss):\n self.Loss = loss\n\n def forward_propagate(self, x):\n # forward propagation\n intermediate = [x]\n for nn in self.Layers:\n intermediate.append(nn.F(intermediate[-1]))\n return intermediate\n\n def backward_propagate(self, intermediate, gradient):\n # backward propagation\n grad = gradient\n for i in range(1, len(self.Layers)+1):\n grad = self.Layers[-1*i].backpropagation(intermediate[-1 * (i+1)], grad)\n return None\n\n def calculate_gradient(self, y, label):\n return self.LR * self.Loss.gradient(y, label)\n\n def train(self, x, label):\n \"\"\"\n train the network with labeled samples\n \"\"\"\n # forward\n intermediate = self.forward_propagate(x)\n # apply learning rate\n grad = self.calculate_gradient(intermediate[-1], label)\n # backward\n self.backward_propagate(intermediate, grad)\n\n\nclass AdagradOptimizer(GradientDecentOptimizer_v2):\n\n def __init__(self, learn_rate=0.01):\n super().__init__(learn_rate)\n self.Gt = 0\n self.delta = 1e-8\n\n def train(self, x, label):\n \"\"\"\n Adagrad training process.\n \"\"\"\n # forward\n intermediate = self.forward_propagate(x)\n if self.Gt != 0:\n self.LR = self.LR / np.sqrt(self.Gt + self.delta)\n grad = self.calculate_gradient(intermediate[-1], label)\n # update Gt\n self.Gt = self.Gt + np.mean(np.square(grad))\n # backward\n self.backward_propagate(intermediate, grad)\n\n\nclass ParallelSGDOptimizer(GradientDecentOptimizer_v2):\n \"\"\"\n Parallel Stochastic Gradient Descent Optimizer\n \"\"\"\n\n def __init__(self, tags, com, batch_size, learn_rate=0.01):\n \"\"\"\n Tags and communication thread helper are required for PSGD.\n :param tags: list of codec.tag.Tag\n :param com: instance of psgd.transfer.ITransfer\n :param learn_rate: scalar\n \"\"\"\n super().__init__(learn_rate)\n self.Tags = tags\n self.TransferHelper = com\n self.Slice_To_Take = None\n self.BatchSize = batch_size\n\n def forward_propagate(self, x):\n \"\"\"\n Forward propagate process.\n \"\"\"\n # get multi-blocks\n blocks = []\n\n for tag in self.Tags:\n # save all blocks in one batch\n blocks.append(x[tag.getSliceWithinBatch()])\n\n if self.Slice_To_Take is None:\n self.Slice_To_Take = []\n start = 0\n end = 0\n for block in blocks:\n end = end + block.shape[0]\n self.Slice_To_Take.append(slice(start, end))\n start = end\n # take parts of x\n x = np.concatenate(blocks, axis=0)\n # continue calculation on partial data\n return super().forward_propagate(x)\n\n def calculate_gradient(self, y, label):\n \"\"\"\n Calculated gradients on partial data.\n \"\"\"\n block_labels = []\n\n for tag in self.Tags:\n # save all blocks in one batch\n block_labels.append(label[tag.getSliceWithinBatch()])\n\n label = np.concatenate(block_labels, axis=0)\n return super().calculate_gradient(y, label)\n\n def do_layer_wise_bp(self, x, layer, gradient):\n \"\"\"\n do backward propagation layer-wise\n :param x: input of this layer\n :param layer: instance of ILayer\n :param gradient: gradient to the output of this layer\n \"\"\"\n grad_back = []\n\n for j in range(len(self.Tags)):\n w, b, y = layer.delta_wb(x[self.Slice_To_Take[j]], gradient[self.Slice_To_Take[j]])\n grad_back.append(y)\n\n self.TransferHelper.put_weights(w, self.Tags[j], 'w')\n self.TransferHelper.put_weights(b, self.Tags[j], 'b')\n\n w_new = self.TransferHelper.get_weights(self.Tags[0], 'w') / self.BatchSize\n b_new = self.TransferHelper.get_weights(self.Tags[0], 'b') / self.BatchSize\n\n gradient = layer.apply_wb(w_new, b_new, np.concatenate(grad_back, axis=0))\n return gradient\n\n def backward_propagate(self, intermediate, gradient):\n \"\"\"\n Backward propagation process.\n \"\"\"\n for i in range(1, len(self.Layers)+1):\n nn = self.Layers[-1*i]\n gradient = self.do_layer_wise_bp(intermediate[-1*(i+1)], nn, gradient)\n # increase layer\n for tag in self.Tags:\n tag.incLayer()\n # increase batch\n for tag in self.Tags:\n tag.incBatch()\n\n return None\n\n\nclass FastParallelSGDOptimizer(ParallelSGDOptimizer):\n\n def __init__(self, tags, com, batch_size, learn_rate=0.01):\n super().__init__(tags, com, batch_size, learn_rate)\n\n def do_layer_wise_bp(self, x, layer, gradient):\n \"\"\"\n Update weight and bias delta, but do training without update new weights.\n :param x: input of this layer\n :param layer: instance of ILayer\n :param gradient: gradient to the output of this layer\n \"\"\"\n grad_back = []\n\n for j in range(len(self.Tags)):\n w, b, y = layer.delta_wb(x[self.Slice_To_Take[j]], gradient[self.Slice_To_Take[j]])\n grad_back.append(y)\n\n self.TransferHelper.put_weights(w, self.Tags[j], 'w')\n self.TransferHelper.put_weights(b, self.Tags[j], 'b')\n\n gradient = layer.apply_wb(0, 0, np.concatenate(grad_back, axis=0))\n return gradient\n\n def backward_propagate(self, intermediate, gradient):\n \"\"\"\n Backward propagation process.\n Do weights update after the backward propagate stage complete.\n \"\"\"\n for i in range(1, len(self.Layers)+1):\n nn = self.Layers[-1*i]\n gradient = self.do_layer_wise_bp(intermediate[-1*(i+1)], nn, gradient)\n # increase layer\n for tag in self.Tags:\n tag.incLayer()\n\n # reset layer and do it again\n self.Tags[0].resetLayer()\n for i in range(1, len(self.Layers)+1):\n nn = self.Layers[-1*i]\n # get newest updates\n w_new = self.TransferHelper.get_weights(self.Tags[0], 'w') / self.BatchSize\n b_new = self.TransferHelper.get_weights(self.Tags[0], 'b') / self.BatchSize\n # apply data\n nn.apply_wb(w_new, b_new, np.asarray(0))\n # inc layer\n self.Tags[0].incLayer()\n # increase batch\n for tag in self.Tags:\n tag.incBatch()\n\n return None\n\n\nclass DelayedPSGDOptimizer(ParallelSGDOptimizer):\n\n def __init__(self, tags, com, batch_size, learn_rate=0.01, delay_min=0, delay_max=2):\n super().__init__(tags, com, batch_size, learn_rate)\n self.Delay_Min = delay_min\n self.Delay_Max = delay_max\n\n def backward_propagate(self, intermediate, gradient):\n \"\"\"\n Make some lags\n \"\"\"\n sleep(np.random.uniform(self.Delay_Min, self.Delay_Max))\n return super().backward_propagate(intermediate, gradient)\n\n\nclass ParaAverageOptimizer(ParallelSGDOptimizer):\n\n def __init__(self, tags, com, batch_size, learn_rate=0.01):\n super().__init__(tags, com, batch_size, learn_rate)\n # Save the initial value of each weights\n self.initial_value = None\n\n def forward_propagate(self, x):\n if self.initial_value is None:\n self.initial_value = {}\n for nn in self.Layers:\n self.initial_value[nn] = (nn.W.copy(), nn.B.copy())\n return super().forward_propagate(x)\n\n def do_layer_wise_bp(self, x, layer, gradient):\n \"\"\"\n do backward propagation layer-wise\n using parameter server with initial value as zero.\n be aware, the initial value of parameter server is zero!!!\n :param x: input of this layer\n :param layer: instance of ILayer\n :param gradient: gradient to the output of this layer\n \"\"\"\n for j in range(len(self.Tags)):\n block_x = x[self.Slice_To_Take[j]]\n block_grad = gradient[self.Slice_To_Take[j]]\n w, b, y = layer.delta_wb(block_x, block_grad)\n\n self.TransferHelper.put_weights(w / len(block_x), self.Tags[j], 'w')\n self.TransferHelper.put_weights(b / len(block_x), self.Tags[j], 'b')\n\n w_new = self.TransferHelper.get_weights(self.Tags[0], 'w')\n b_new = self.TransferHelper.get_weights(self.Tags[0], 'b')\n\n layer.W, layer.B = self.initial_value[layer][0] + w_new, self.initial_value[layer][1] + b_new\n\n gradient = layer.forward_gradient(x, gradient)\n return gradient","sub_path":"nn/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":9059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"53474296","text":"#importations:\nimport pygame as pg\nfrom colors import *\nimport random\nimport sys\nimport time\n\n#initializations\npg.mixer.pre_init(frequency=44100,size=16,channels=1,buffer=512)\npg.init()\n\n#screen dimensions \nscreen_w=540\nscreen_h=868\nscreen=pg.display.set_mode((screen_w,screen_h))\npg.display.set_caption('Stick Hero')\nclock=pg.time.Clock()\n\n#importing the background and the first hero image\nbackground=pg.image.load('images/background.png').convert()\nhero=pg.image.load('images/idle/1.png').convert_alpha()\nhero=pg.transform.scale(hero,(100,100))\n\n#Playing starting sound\npg.mixer.music.load('Sounds/Bells2.mp3')\npg.mixer.music.play()\n\n\n\n#Importing the frames as lists to animate the hero \nwalkRight = [pg.image.load('images/walking/1.png').convert_alpha(), pg.image.load('images/walking/2.png').convert_alpha(), pg.image.load('images/walking/3.png').convert_alpha(), pg.image.load('images/walking/4.png').convert_alpha(), pg.image.load('images/walking/5.png').convert_alpha(), pg.image.load('images/walking/6.png').convert_alpha(), pg.image.load('images/walking/7.png').convert_alpha(), pg.image.load('images/walking/8.png').convert_alpha(), pg.image.load('images/walking/9.png').convert_alpha(), pg.image.load('images/walking/10.png').convert_alpha(), pg.image.load('images/walking/11.png').convert_alpha(), pg.image.load('images/walking/12.png').convert_alpha()]\nidle = [pg.image.load('images/idle/1.png').convert_alpha(), pg.image.load('images/idle/2.png').convert_alpha(), pg.image.load('images/idle/3.png').convert_alpha(), pg.image.load('images/idle/4.png').convert_alpha(), pg.image.load('images/idle/5.png').convert_alpha(), pg.image.load('images/idle/6.png').convert_alpha(), pg.image.load('images/idle/7.png').convert_alpha(), pg.image.load('images/idle/8.png').convert_alpha(), pg.image.load('images/idle/9.png').convert_alpha(), pg.image.load('images/idle/10.png').convert_alpha(), pg.image.load('images/idle/11.png').convert_alpha(), pg.image.load('images/idle/12.png').convert_alpha()]\ndying = [pg.image.load('images/die/1.png').convert_alpha(), pg.image.load('images/die/2.png').convert_alpha(), pg.image.load('images/die/3.png').convert_alpha(), pg.image.load('images/die/4.png').convert_alpha(), pg.image.load('images/die/5.png').convert_alpha(), pg.image.load('images/die/6.png').convert_alpha(), pg.image.load('images/die/7.png').convert_alpha(), pg.image.load('images/die/8.png').convert_alpha(), pg.image.load('images/die/9.png').convert_alpha(), pg.image.load('images/die/10.png').convert_alpha(), pg.image.load('images/die/11.png').convert_alpha(), pg.image.load('images/die/12.png').convert_alpha()]\n\n\n\n\n\n#The obstacle class\nclass Obstacle ():\n\tdef __init__(self, pos, width, height):\n\t\tself.color=black\n\t\tself.x,self.y=pos\n\t\tself.width=width\n\t\tself.height=height\n\n\tdef draw(self,screen):\n\t\tpg.draw.rect(screen,self.color,(self.x,self.y,self.width,self.height))\n\t\t\n\tdef move(self,dist):\n\t\tself.x-=dist\n\t\t\n\tdef get_tr_pos(self):\n\t\treturn self.x+self.width,self.y\n\n\tdef get_width(self):\n\t\treturn self.width\n\t\n\tdef get_tl_pos(self):\n\t\treturn self.x,self.y\n\n\n# Misc. functions: \ng_font1=pg.font.Font('04B_19.TTF',50)\ng_font2=pg.font.Font('04B_19.TTF',35)\n\ndef move_hero(hero,dist):#To move the hero\n\thero_rect=hero.get_rect()\n\thero_rect.centerx-=dist\n\ndef score_dis_on(s):#To show current score\n score_s=g_font1.render(str(s), True,(255,255,255))\n score_rect=score_s.get_rect(center=(270,100))\n screen.blit(score_s,score_rect)\n \ndef high_score_dis(hs):#To show high score\n high_score_s=g_font1.render(f'High score : {str(hs)}', True,(255,255,255))\n high_score_rect=high_score_s.get_rect(center=(270,768))\n screen.blit(high_score_s,high_score_rect)\n \ndef game_over_screen(new_hs):#To show the game over screen with : current score, high score and info to start again and if it is a new high score, show \"high score !\" in turquoise \n\tgame_over_screen1=g_font1.render(f'GAME OVER', True,(255,255,255))\n\tgame_over_screen2=g_font2.render(f'Right click to restart', True,(255,255,255))\n\tgame_over_screen3=g_font2.render(f'or press \\'Esc\\' to quit', True,(255,255,255))\n\tgame_over_rect1=game_over_screen1.get_rect(center=(270,384))\n\tgame_over_rect2=game_over_screen2.get_rect(center=(270,484))\n\tgame_over_rect3=game_over_screen3.get_rect(center=(270,534))\n\tscreen.blit(game_over_screen1,game_over_rect1)\n\tscreen.blit(game_over_screen2,game_over_rect2)\n\tscreen.blit(game_over_screen3,game_over_rect3)\n\tif new_hs:\n\t\tgame_over_screen4=g_font2.render(f'High score !', True,turquoise)\n\t\tgame_over_rect4=game_over_screen4.get_rect(center=(270,150))\n\t\tscreen.blit(game_over_screen4,game_over_rect4)\n\n\n#creating 3 obstacles, 2 with random width\nfirst_obstacle=Obstacle((0,screen_h-350),100,500)\nsecond_obstacle=Obstacle((screen_w/2,screen_h-350),random.randint(50, 200),500)\nthird_obstacle=Obstacle((screen_w,screen_h-350),random.randint(50, 200),500)\n\n#Global variables:\nstick_height=10\nstick_drawn=False\nrotate=True\nangle=0\ni=0\nherox=0\nheroy=440\nstick_all_set=False\nstick_horizantal=False\ngame_lost=False\ngame_won=False\nstick_180=False\ngame_over=False\nexceed_right=False\nexceed_left=False\nscore=0\nhigh_score=0\nadapt_hero=True\ngame_ended=False\nimage_ind=0\nstab=0\nis_idle=True\nis_dying=False\nis_moving=False\ndead=False\ndesactivate_click=False\n\n#the main game loop\nwhile True:\n\t\n #Animating the hero : every picture stays on for 13 frames (on 160 FPS)\n\tstab+=1\n\tif stab%13==0:\n\t\tscreen.blit(background,(0,0))\n\t\timage_ind+=1\n\t\tif image_ind>=12:\n\t\t\timage_ind=0\n\t\t\n\t\tif is_idle:\t\n\t\t\thero=idle[image_ind]\n\t\t\thero=pg.transform.scale(hero,(100,100))\n\t\telif is_moving:\n\t\t\thero=walkRight[image_ind]\n\t\t\thero=pg.transform.scale(hero,(100,100))\n\t\telif is_dying:\n\t\t\thero=dying[image_ind]\n\t\t\thero=pg.transform.scale(hero,(100,100))\n\t\t\tif image_ind==11:\n\t\t\t\tdead=True\n\t\tstab=0\n\tif dead:\n\t\thero=dying[11]\n\t\thero=pg.transform.scale(hero,(100,100))\n\n\n\t#event loop\t\n\tfor event in pg.event.get():\n\t\t#Exit strategy\n\t\tif event.type==pg.QUIT :\n\t\t\tpg.quit()\n\t\t\tsys.exit()\n\t\tif event.type==pg.KEYDOWN:\t\n\t\t\tif event.key==pg.K_ESCAPE:\n\t\t\t\tpg.quit()\n\t\t\t\tsys.exit()\n\n\t\t\n\tif stick_horizantal:\n\t\tscreen.blit(background,(0,0))\n\t#draw a growing stick while mouse left is clicked\t\n\tif pg.mouse.get_pressed()[0] and not desactivate_click:\n\t\tpg.mixer.music.load('Sounds/sus.wav')\n\t\tpg.mixer.music.play()\n\t\tx,y=first_obstacle.get_tr_pos()\n\t\tstick=pg.Rect(x-10,y+10,10,stick_height)\t\n\t\tpg.draw.rect(screen,black,stick)\n\t\tstick_height-=2\n\t\tstick_drawn=True\n\t\t\n\t#animate the stick rotation and finding if the game is lost or won\n\tif stick_drawn and not pg.mouse.get_pressed()[0]:\n\t\tdesactivate_click=True\n\t\tx,y=first_obstacle.get_tr_pos()\t\n\t\tstick_surface= pg.Surface((10,-stick_height+10),pg.SRCALPHA)\n\t\tstick_surface.fill(black)\n\t\tangle+=1\n\t\tstick_surface=pg.transform.rotozoom(stick_surface,180-angle,1)\n\t\trect=stick_surface.get_rect()\n\t\txx,yy=rect.bottomleft\n\t\tcomp=-stick_height-yy\n\t\tscreen.blit(stick_surface,(x-10,y+stick_height+comp))\t\t\n\t\ttime.sleep(.01)\n\t\tstick_horizantal=True\n\t\t\t\t\t\t\n\t\tif angle==90:\n\t\t\t\n\t\t\tscreen.blit(background,(0,0))\n\t\t\tscreen.blit(stick_surface,(x-10,y+stick_height+comp+10))\n\t\t\tstick_drawn=False\n\t\t\tstick_all_set=True\n\t\t\tstick_horizantal=False\n\t\t\tangle=0\n\t\t\tif -stick_heightabs(second_obstacle.get_tr_pos()[0]-first_obstacle.get_tr_pos()[0]):\n\t\t\t\tgame_lost=True\n\t\t\t\tadapt_hero=False\n\n\t\t\telse:\n\t\t\t\tadapt_hero=True\n\t\t\t\tgame_won=True\n\t\t\t\tdesactivate_click=False\n\t\t\t\tscore+=1\n\t\t\t\tpg.mixer.music.load('Sounds/Bells6.mp3')\n\t\t\t\tpg.mixer.music.play()\n\t\t\t\t\n\t\t\t\tif high_scoresecond_obstacle.get_width(): \n\t\t\tscreen.blit(background,(0,0))\t\n\t\t\tfirst_obstacle.move(2)\n\t\t\tsecond_obstacle.move(2)\n\t\t\tthird_obstacle.move(2)\n\t\t\ti-=2\n\t\t\tscreen.blit(stick_surface,(x-10+i,y+stick_height+comp+10))\n\t\t\tis_moving=True\n\t\t\tis_idle=False\n\t\telse:\n\t\t\tstick_all_set=False\t\t\n\t\t\tfirst_obstacle=second_obstacle\n\t\t\tsecond_obstacle=third_obstacle\n\t\t\tthird_obstacle=Obstacle((screen_w,screen_h-350),random.randint(50, 200),500)\n\t\t\tstick_height=10\n\t\t\tangle=0\n\t\t\ti=0\n\t\t\tis_moving=False\n\t\t\tis_idle=True\n\t\t\t\n\t#adapting the hero position to the new obstacle\n\tif adapt_hero:\t\t\t\n\t\tif first_obstacle.get_tr_pos()[0]-80>herox :\n\t\t\therox+=2\n\t\t\tscreen.blit(background,(0,0))\n\t\t\tscreen.blit(hero,(herox,heroy))\n\t\t\t\t\n\t\telif abs(herox-second_obstacle.get_tr_pos()[0]) < 80:\n\t\t\therox-=2\n\t\t\tscreen.blit(hero,(herox,heroy))\n\t\t\tscreen.blit(background,(0,0))\n\n\t\n\n\t# If the stick is short, rotating it to 180°\n\tif game_lost and stick_180==False and exceed_left :\n\t\tscreen.blit(background,(0,0))\n\t\tx,y=first_obstacle.get_tr_pos()\t\n\t\tstick_surface= pg.Surface((-stick_height,10),pg.SRCALPHA)\n\t\tstick_surface.fill(black)\n\t\tangle+=1\n\t\tstick_surface=pg.transform.rotozoom(stick_surface,-angle,1)\n\t\tscreen.blit(stick_surface,(x-10,y))\t\t\n\t\ttime.sleep(.01)\n\t\tdesactivate_click=True\n\t\tif angle==80:\n\t\t\tstick_180=True\t\n\n\n\t#Simulating the fall of the hero if game lost\n\tif game_lost :\n\t\tdesactivate_click=True\n\t\tif -stick_heightabs(second_obstacle.get_tr_pos()[0]-first_obstacle.get_tr_pos()[0]):\n\t\t\texceed_right=True\n\n\t\tif exceed_left and stick_180:\n\t\t\therox+=1\n\t\t\tis_moving=True\n\t\t\tis_idle=False\n\t\t\tif herox>first_obstacle.get_tr_pos()[0] :\t\t\t\t\n\t\t\t\theroy+=2\n\t\t\t\therox-=0.7\n\t\t\t\tis_dying=True\n\t\t\t\tis_moving=False\n\t\t\t\tis_idle=False\n\t\t\tscreen.blit(background,(0,0))\n\t\t\tscreen.blit(stick_surface,(x-10,y))\n\n\t\telif exceed_right:\n\t\t\therox+=2\n\t\t\tis_moving=True\n\t\t\tis_idle=False\n\t\t\tif herox>first_obstacle.get_tr_pos()[0]-stick_height-10:\t\t\t\t\n\t\t\t\theroy+=2\n\t\t\t\therox-=0.7\n\t\t\t\tis_dying=True\n\t\t\t\tis_moving=False\n\t\t\t\tis_idle=False\t\t\t\t\n\t\t\tscreen.blit(background,(0,0))\n\t\t\tscreen.blit(stick_surface,(x-10,y+stick_height+comp+10))\n\n\t\t#ending the game if hero touch screen borders\n\t\tif heroy+80>=screen_h or herox-80>=screen_w:\n\t\t\tpg.mixer.music.load('Sounds/sfx_hit.wav')\n\t\t\tpg.mixer.music.play()\n\t\t\tgame_over=True\n\t\t\tstick_180=False\n\t\t\tgame_lost=False\n\t\t\tstick_surface=pg.Surface((0,0))\n\t\t\tgame_ended=True\t\t\t\n\n\t\n\t#restarting the game \n\tif pg.mouse.get_pressed()[2] and game_over:\n\t\tstick_height=10\n\t\tstick_drawn=False\n\t\trotate=True\n\t\tangle=0\n\t\ti=0\n\t\therox=0\n\t\theroy=440\n\t\tstick_all_set=False\n\t\tstick_horizantal=False\n\t\tgame_lost=False\n\t\tgame_won=False\n\t\tstick_180=False\n\t\tgame_over=False\n\t\texceed_right=False\n\t\texceed_left=False\n\t\tscore=0\n\t\tadapt_hero=True\n\t\tgame_ended=False\n\t\timage_ind=0\t\t\n\t\tstab=0\n\t\tis_idle=True\n\t\tis_dying=False\n\t\tis_moving=False\n\t\tdead=False\n\t\tnew_hs=False\n\t\tdesactivate_click=False\n\t\tpg.mixer.music.load('Sounds/Bells2.mp3')\n\t\tpg.mixer.music.play()\n\t\n\t#Displaying the current score all the time\n\tscore_dis_on(score)\t\n\t#setting what to show depending on the outcome of the game\n\tif game_ended:\n\t\tif not pg.mouse.get_pressed()[2]:\n\t\t\thigh_score_dis(high_score)\n\t\t\tif high_score==score:\n\t\t\t\tnew_hs=True\n\n\t\t\tgame_over_screen(new_hs)\n\t\t\tscore_dis_on(score)\t\n\t\tif pg.mouse.get_pressed()[2]:\n\t\t\tgame_ended=False\n\telse:\n\t\tfirst_obstacle.draw(screen)\t\n\t\tsecond_obstacle.draw(screen)\n\t\tthird_obstacle.draw(screen)\t\t\n\t\tscreen.blit(hero,(herox,heroy))\n\t\tscore_dis_on(score)\t\n \n #updating the display\n\tpg.display.update()\n \n #setting the FPS (framerate)\n\tclock.tick(160)\n","sub_path":"stick_hero.py","file_name":"stick_hero.py","file_ext":"py","file_size_in_byte":11411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"36754819","text":"#!/usr/bin/env python\r\n\"\"\"handling mpicbg transforms in python\r\n\r\nCurrently only implemented to facilitate Affine and Polynomial2D\r\n used in Khaled Khairy's EM aligner workflow\r\n\"\"\"\r\nimport json\r\nimport logging\r\nfrom collections import Iterable\r\nimport numpy as np\r\nfrom .errors import ConversionError, EstimationError, RenderError\r\nfrom .utils import NullHandler\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogger.addHandler(NullHandler())\r\n\r\ntry:\r\n from scipy.linalg import svd, LinAlgError\r\nexcept ImportError as e:\r\n logger.info(e)\r\n logger.info('scipy-based linalg may or may not lead '\r\n 'to better parameter fitting')\r\n from numpy.linalg import svd\r\n from numpy.linalg.linalg import LinAlgError\r\n\r\n\r\nclass TransformList:\r\n \"\"\"A list of Transforms\r\n\r\n Attributes\r\n ----------\r\n tforms : :obj:`list` of :class:`Transform`\r\n transforms to apply\r\n transformId : str, optional\r\n uniqueId for this TransformList\r\n \"\"\"\r\n\r\n def __init__(self, tforms=None, transformId=None, json=None):\r\n \"\"\"Initialize TransformList\r\n\r\n Parameters\r\n ----------\r\n tforms : :obj:`list` of :class:`Transform`\r\n transforms to apply\r\n transformId : str, optional\r\n uniqueId for this TransformList\r\n json : dict, optional\r\n json compatible dictionary to create\r\n :class:`TransformList` via :method:`from_dict`\r\n (will supersede tforms and transformId if not None)\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n if tforms is None:\r\n self.tforms = []\r\n else:\r\n if not isinstance(tforms, list):\r\n raise RenderError(\r\n 'unexpected type {} for transforms!'.format(\r\n type(tforms)))\r\n self.tforms = tforms\r\n self.transformId = transformId\r\n\r\n def to_dict(self):\r\n \"\"\"serialization function\r\n\r\n Returns\r\n -------\r\n dict\r\n json & render compatible representation of this TransformList\r\n \"\"\"\r\n d = {}\r\n d['type'] = 'list'\r\n d['specList'] = [tform.to_dict() for tform in self.tforms]\r\n if self.transformId is not None:\r\n d['id'] = self.transformId\r\n return d\r\n\r\n def to_json(self):\r\n \"\"\"serialization function\r\n\r\n Returns\r\n -------\r\n str\r\n string representation of the json & render\r\n representation of this TransformList\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization function\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible dictionary representation of this TransformList\r\n \"\"\"\r\n self.tforms = []\r\n if d is not None:\r\n self.transformId = d.get('id')\r\n for td in d['specList']:\r\n self.tforms.append(load_transform_json(td))\r\n return self.tforms\r\n\r\n\r\ndef load_transform_json(d, default_type='leaf'):\r\n \"\"\"function to get the proper deserialization function\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation of Transform\r\n default_type : str\r\n what kind of transform should we assume this\r\n if it is not specified in 'type' ('leaf','list','ref','interpolated')\r\n\r\n Returns\r\n -------\r\n renderapi.transform.Transform\r\n deserialized transformation using the most appropriate class\r\n\r\n Raises\r\n ------\r\n RenderError\r\n if d['type'] isn't one of ('leaf','list','ref','interpolated')\r\n \"\"\"\r\n handle_load_tform = {'leaf': load_leaf_json,\r\n 'list': lambda x: TransformList(json=x),\r\n 'ref': lambda x: ReferenceTransform(json=x),\r\n 'interpolated':\r\n lambda x: InterpolatedTransform(json=x)}\r\n try:\r\n return handle_load_tform[d.get('type', default_type)](d)\r\n except KeyError as e:\r\n raise RenderError('Unknown Transform Type {}'.format(e))\r\n\r\n\r\ndef load_leaf_json(d):\r\n \"\"\"function to get the proper deserialization function for leaf transforms\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation of leaf transform to deserialize\r\n\r\n Returns\r\n -------\r\n renderapi.transform.Transform\r\n deserialized transformation\r\n\r\n Raises\r\n ------\r\n RenderError\r\n if d['type'] != leaf or is omitted\r\n\r\n \"\"\"\r\n handle_load_leaf = {\r\n AffineModel.className: lambda x: AffineModel(json=x),\r\n Polynomial2DTransform.className:\r\n lambda x: Polynomial2DTransform(json=x),\r\n TranslationModel.className: lambda x: TranslationModel(json=x),\r\n RigidModel.className: lambda x: RigidModel(json=x),\r\n SimilarityModel.className: lambda x: SimilarityModel(json=x),\r\n NonLinearTransform.className: lambda x: NonLinearTransform(json=x),\r\n LensCorrection.className: lambda x: LensCorrection(json=x),\r\n NonLinearCoordinateTransform.className:\r\n lambda x: NonLinearCoordinateTransform(json=x)}\r\n\r\n tform_type = d.get('type', 'leaf')\r\n if tform_type != 'leaf':\r\n raise RenderError(\r\n 'Unexpected or unknown Transform Type {}'.format(tform_type))\r\n tform_class = d['className']\r\n try:\r\n return handle_load_leaf[tform_class](d)\r\n except KeyError as e:\r\n logger.info('Leaf transform class {} not defined in '\r\n 'transform module, using generic'.format(e))\r\n return Transform(json=d)\r\n\r\n\r\nclass InterpolatedTransform:\r\n \"\"\"Transform spec defined by linear interpolation of\r\n two other transform specs\r\n\r\n Attributes\r\n ----------\r\n a : :class:`Transform` or :class:`TransformList` or :class:`InterpolatedTransform`\r\n transform at minimum weight\r\n b : :class:`Transform` or :class:`TransformList` or :class:`InterpolatedTransform`\r\n transform at maximum weight\r\n lambda_ : float\r\n value in interval [0.,1.] which defines evaluation of the\r\n linear interpolation between a (at 0) and b (at 1)\r\n \"\"\"\r\n\r\n def __init__(self, a=None, b=None, lambda_=None, json=None):\r\n \"\"\"Initialize InterpolatedTransform\r\n\r\n Parameters\r\n ----------\r\n a : :class:`Transform` or :class:`TransformList`\r\n or :class:`InterpolatedTransform`\r\n transform at minimum weight\r\n b : :class:`Transform` or :class:`TransformList`\r\n or :class:`InterpolatedTransform`\r\n transform at maximum weight\r\n lambda_ : float\r\n value in interval [0.,1.] which defines evaluation of the\r\n linear interpolation between a (at 0) and b (at 1)\r\n json : dict\r\n json compatible representation of this transform to\r\n initialize via :method:`self.from_dict`\r\n (will supersede a, b, and lambda_ if not None)\r\n\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.a = a\r\n self.b = b\r\n self.lambda_ = lambda_\r\n\r\n def to_dict(self):\r\n \"\"\"serialization routine\r\n\r\n Returns\r\n -------\r\n dict\r\n json compatible representation\r\n \"\"\"\r\n return dict(self)\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization routine\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation\r\n \"\"\"\r\n self.a = load_transform_json(d['a'])\r\n self.b = load_transform_json(d['b'])\r\n self.lambda_ = d['lambda']\r\n\r\n def __iter__(self):\r\n return iter([('type', 'interpolated'),\r\n ('a', self.a.to_dict()),\r\n ('b', self.b.to_dict()),\r\n ('lambda', self.lambda_)])\r\n\r\n\r\nclass ReferenceTransform:\r\n \"\"\"Transform which is simply a reference to a transform stored elsewhere\r\n\r\n Attributes\r\n ----------\r\n refId : str\r\n transformId of the referenced transform\r\n\r\n \"\"\"\r\n\r\n def __init__(self, refId=None, json=None):\r\n \"\"\"Initialize ReferenceTransform\r\n\r\n Parameters\r\n ----------\r\n refId : str\r\n transformId of the referenced transform\r\n json : dict\r\n json compatible representation of this transform\r\n (will supersede refId if not None)\r\n\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.refId = refId\r\n\r\n def to_dict(self):\r\n \"\"\"serialization routine\r\n\r\n Returns\r\n -------\r\n dict\r\n json compatible representation of this transform\r\n \"\"\"\r\n d = {}\r\n d['type'] = 'ref'\r\n d['refId'] = self.refId\r\n return d\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization routine\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation of this transform\r\n \"\"\"\r\n self.refId = d['refId']\r\n\r\n def __str__(self):\r\n return 'ReferenceTransform(%s)' % self.refId\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __iter__(self):\r\n return iter([('type', 'ref'), ('refId', self.refId)])\r\n\r\n\r\nclass Transform(object):\r\n \"\"\"Base transformation class\r\n\r\n Attributes\r\n ----------\r\n className : str\r\n mpicbg java classname of this transform\r\n dataString : str\r\n string reprsentation of this transform as speced by\r\n mpicbg java class library\r\n transformId : str, optional\r\n unique Id for this transform (optional)\r\n \"\"\"\r\n\r\n def __init__(self, className=None, dataString=None,\r\n transformId=None, labels=None, json=None):\r\n \"\"\"Initialize Transform\r\n\r\n Parameters\r\n ----------\r\n className : str\r\n mpicbg java classname of this transform\r\n dataString : str\r\n string reprsentation of this transform as speced\r\n by mpicbg java class library\r\n transformId : str, optional\r\n unique Id for this transform (optional)\r\n labels : list of str\r\n list of labels to give this transform\r\n json : dict\r\n json compatible representation of this transform\r\n (supersedes className, dataString, and transformId if not None)\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.className = className\r\n self.dataString = dataString\r\n self.transformId = transformId\r\n self.labels = labels\r\n\r\n def to_dict(self):\r\n \"\"\"serialization routine\r\n\r\n Returns\r\n -------\r\n dict\r\n json compatible representation of this transform\r\n \"\"\"\r\n d = {}\r\n d['type'] = 'leaf'\r\n d['className'] = self.className\r\n d['dataString'] = self.dataString\r\n if self.transformId is not None:\r\n d['id'] = self.transformId\r\n if self.labels is not None:\r\n d['metaData'] = {'labels': self.labels}\r\n return d\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization routine\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation of this transform\r\n \"\"\"\r\n self.className = d['className']\r\n self.transformId = d.get('id', None)\r\n self._process_dataString(d['dataString'])\r\n md = d.get('metaData', None)\r\n if md is not None:\r\n self.labels = md.get('labels', None)\r\n else:\r\n self.labels = None\r\n \r\n def _process_dataString(self, datastring):\r\n \"\"\"method meant to set state of transform from datastring\r\n generic implementation only saves datastring at self.dataString.\r\n should rewrite for all transform classes that want to\r\n implement tform,fit,etc\r\n\r\n Parameters\r\n ----------\r\n dataString : str\r\n string which can be used to initialize mpicbg transforms in java\r\n \"\"\"\r\n self.dataString = datastring\r\n\r\n def __str__(self):\r\n return 'className:%s\\ndataString:%s' % (\r\n self.className, self.dataString)\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __eq__(self, other):\r\n return self.__str__() == other.__str__()\r\n\r\n def __hash__(self):\r\n return hash((self.__str__()))\r\n\r\n\r\nclass AffineModel(Transform):\r\n \"\"\"Linear 2d Transformation\r\n mpicbg classname: mpicbg.trakem2.transform.AffineModel2D\r\n implements this simple math\r\n x'=M00*x + M01*x + B0\r\n y'=M10*x + M11*y + B1\r\n\r\n Attributes\r\n ----------\r\n M00 : float\r\n x'+=M00*x\r\n M01 : float\r\n x'+=M01*y\r\n M10 : float\r\n y'+=M10*x\r\n M11 : float\r\n y'+=M11*y\r\n B0 : float\r\n x'+=B0\r\n B1 : float\r\n y'+=B1\r\n transformId : str, optional\r\n unique transformId for this transform\r\n labels : list of str\r\n list of labels to give this transform\r\n M : numpy.array\r\n 3x3 numpy array representing 2d Affine with homogeneous coordinates\r\n populates with values from M00, M01, M10, M11, B0, B1 with load_M()\r\n\r\n \"\"\"\r\n\r\n className = 'mpicbg.trakem2.transform.AffineModel2D'\r\n\r\n def __init__(self, M00=1.0, M01=0.0, M10=0.0, M11=1.0, B0=0.0, B1=0.0,\r\n transformId=None, labels=None, json=None):\r\n \"\"\"Initialize AffineModel, defaulting to identity\r\n\r\n Parameters\r\n ----------\r\n M00 : float\r\n x'+=M00*x\r\n M01 : float\r\n x'+=M01*y\r\n M10 : float\r\n y'+=M10*x\r\n M11 : float\r\n y'+=M11*y\r\n B0 : float\r\n x'+=B0\r\n B1 : float\r\n y'+=B1\r\n transformId : str\r\n unique transformId for this transform (optional)\r\n labels : list of str\r\n list of labels to give this transform\r\n json : dict\r\n json compatible representation of this transform\r\n (will supersede all other parameters if not None)\r\n\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.M00 = M00\r\n self.M01 = M01\r\n self.M10 = M10\r\n self.M11 = M11\r\n self.B0 = B0\r\n self.B1 = B1\r\n self.className = 'mpicbg.trakem2.transform.AffineModel2D'\r\n self.labels = labels\r\n self.load_M()\r\n self.transformId = transformId\r\n\r\n @property\r\n def dataString(self):\r\n \"\"\"dataString string for this transform\"\"\"\r\n return \"%.10f %.10f %.10f %.10f %.10f %.10f\" % (\r\n self.M[0, 0], self.M[1, 0], self.M[0, 1],\r\n self.M[1, 1], self.M[0, 2], self.M[1, 2])\r\n\r\n def _process_dataString(self, datastring):\r\n \"\"\"generate datastring and param attributes from datastring\"\"\"\r\n dsList = datastring.split()\r\n self.M00 = float(dsList[0])\r\n self.M10 = float(dsList[1])\r\n self.M01 = float(dsList[2])\r\n self.M11 = float(dsList[3])\r\n self.B0 = float(dsList[4])\r\n self.B1 = float(dsList[5])\r\n self.load_M()\r\n\r\n def load_M(self):\r\n \"\"\"method to take the attribute of self and fill in self.M\"\"\"\r\n self.M = np.identity(3, np.double)\r\n self.M[0, 0] = self.M00\r\n self.M[0, 1] = self.M01\r\n self.M[1, 0] = self.M10\r\n self.M[1, 1] = self.M11\r\n self.M[0, 2] = self.B0\r\n self.M[1, 2] = self.B1\r\n\r\n @staticmethod\r\n def fit(A, B):\r\n \"\"\"function to fit this transform given the corresponding sets of points A & B\r\n\r\n Parameters\r\n ----------\r\n A : numpy.array\r\n a Nx2 matrix of source points\r\n B : numpy.array\r\n a Nx2 matrix of destination points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 6x1 matrix with the best fit parameters\r\n ordered M00,M01,M10,M11,B0,B1\r\n \"\"\"\r\n if not all([A.shape[0] == B.shape[0], A.shape[1] == B.shape[1] == 2]):\r\n raise EstimationError(\r\n 'shape mismatch! A shape: {}, B shape {}'.format(\r\n A.shape, B.shape))\r\n\r\n N = A.shape[0] # total points\r\n\r\n M = np.zeros((2 * N, 6))\r\n Y = np.zeros((2 * N, 1))\r\n for i in range(N):\r\n M[2 * i, :] = [A[i, 0], A[i, 1], 0, 0, 1, 0]\r\n M[2 * i + 1, :] = [0, 0, A[i, 0], A[i, 1], 0, 1]\r\n Y[2 * i] = B[i, 0]\r\n Y[2 * i + 1] = B[i, 1]\r\n\r\n (Tvec, residuals, rank, s) = np.linalg.lstsq(M, Y)\r\n return Tvec\r\n\r\n def estimate(self, A, B, return_params=True, **kwargs):\r\n \"\"\"method for setting this transformation with the best fit\r\n given the corresponding points A,B\r\n\r\n Parameters\r\n ----------\r\n A : numpy.array\r\n a Nx2 matrix of source points\r\n B : numpy.array\r\n a Nx2 matrix of destination points\r\n return_params : boolean\r\n whether to return the parameter matrix\r\n **kwargs\r\n keyword arguments to pass to self.fit\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 2x3 matrix of parameters for this matrix,\r\n laid out (x,y) x (x,y,offset)\r\n (or None if return_params=False)\r\n \"\"\"\r\n Tvec = self.fit(A, B, **kwargs)\r\n self.M00 = Tvec[0, 0]\r\n self.M10 = Tvec[2, 0]\r\n self.M01 = Tvec[1, 0]\r\n self.M11 = Tvec[3, 0]\r\n self.B0 = Tvec[4, 0]\r\n self.B1 = Tvec[5, 0]\r\n self.load_M()\r\n if return_params:\r\n return self.M\r\n\r\n def concatenate(self, model):\r\n \"\"\"concatenate a model to this model -- ported from trakEM2 below:\r\n ::\r\n\r\n final double a00 = m00 * model.m00 + m01 * model.m10;\r\n final double a01 = m00 * model.m01 + m01 * model.m11;\r\n final double a02 = m00 * model.m02 + m01 * model.m12 + m02;\r\n\r\n final double a10 = m10 * model.m00 + m11 * model.m10;\r\n final double a11 = m10 * model.m01 + m11 * model.m11;\r\n final double a12 = m10 * model.m02 + m11 * model.m12 + m12;\r\n\r\n Parameters\r\n ----------\r\n model : AffineModel\r\n model to concatenate to this one\r\n\r\n Returns\r\n -------\r\n AffineModel\r\n model after concatenating model with this model\r\n \"\"\"\r\n a00 = self.M[0, 0] * model.M[0, 0] + self.M[0, 1] * model.M[1, 0]\r\n a01 = self.M[0, 0] * model.M[0, 1] + self.M[0, 1] * model.M[1, 1]\r\n a02 = (self.M[0, 0] * model.M[0, 2] + self.M[0, 1] * model.M[1, 2] +\r\n self.M[0, 2])\r\n\r\n a10 = self.M[1, 0] * model.M[0, 0] + self.M[1, 1] * model.M[1, 0]\r\n a11 = self.M[1, 0] * model.M[0, 1] + self.M[1, 1] * model.M[1, 1]\r\n a12 = (self.M[1, 0] * model.M[0, 2] + self.M[1, 1] * model.M[1, 2] +\r\n self.M[1, 2])\r\n\r\n newmodel = AffineModel(a00, a01, a10, a11, a02, a12)\r\n return newmodel\r\n\r\n def invert(self):\r\n \"\"\"return an inverted version of this transformation\r\n\r\n Returns\r\n -------\r\n AffineModel\r\n an inverted version of this transformation\r\n \"\"\"\r\n inv_M = np.linalg.inv(self.M)\r\n Ai = AffineModel(inv_M[0, 0], inv_M[0, 1], inv_M[1, 0],\r\n inv_M[1, 1], inv_M[0, 2], inv_M[1, 2])\r\n return Ai\r\n\r\n @staticmethod\r\n def convert_to_point_vector(points):\r\n \"\"\"method to help reshape x,y points to x,y,1 vectors\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx2 array of x,y points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a Nx3 array of x,y,1 points used for transformations\r\n \"\"\"\r\n Np = points.shape[0]\r\n onevec = np.ones((Np, 1), np.double)\r\n\r\n if points.shape[1] != 2:\r\n raise ConversionError('Points must be of shape (:, 2) '\r\n '-- got {}'.format(points.shape))\r\n Nd = 2\r\n points = np.concatenate((points, onevec), axis=1)\r\n return points, Nd\r\n\r\n @staticmethod\r\n def convert_points_vector_to_array(points, Nd=2):\r\n \"\"\"method for convertion x,y,K points to x,y vectors\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx3 vector of points after transformation\r\n Nd : int\r\n the number of dimensions to cutoff (should be 2)\r\n\r\n Returns\r\n -------\r\n numpy.array: a Nx2 array of x,y points\r\n \"\"\"\r\n points = points[:, 0:Nd] / np.tile(points[:, 2], (Nd, 1)).T\r\n return points\r\n\r\n def tform(self, points):\r\n \"\"\"transform a set of points through this transformation\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx2 array of x,y points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a Nx2 array of x,y points after transformation\r\n \"\"\"\r\n points, Nd = self.convert_to_point_vector(points)\r\n pt = np.dot(self.M, points.T).T\r\n return self.convert_points_vector_to_array(pt, Nd)\r\n\r\n def inverse_tform(self, points):\r\n \"\"\"transform a set of points through the inverse of this transformation\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx2 array of x,y points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a Nx2 array of x,y points after inverse transformation\r\n \"\"\"\r\n points, Nd = self.convert_to_point_vector(points)\r\n pt = np.dot(np.linalg.inv(self.M), points.T).T\r\n return self.convert_points_vector_to_array(pt, Nd)\r\n\r\n @property\r\n def scale(self):\r\n \"\"\"tuple of scale for x, y\"\"\"\r\n return tuple([np.sqrt(sum([i ** 2 for i in self.M[:, j]]))\r\n for j in range(self.M.shape[1])])[:2]\r\n\r\n @property\r\n def shear(self):\r\n \"\"\"counter-clockwise shear angle\"\"\"\r\n return np.arctan2(-self.M[0, 1], self.M[1, 1]) - self.rotation\r\n\r\n @property\r\n def translation(self):\r\n \"\"\"tuple of translation in x, y\"\"\"\r\n return tuple(self.M[:2, 2])\r\n\r\n @property\r\n def rotation(self):\r\n \"\"\"counter-clockwise rotation\"\"\"\r\n return np.arctan2(self.M[1, 0], self.M[0, 0])\r\n\r\n def __str__(self):\r\n return \"M=[[%f,%f],[%f,%f]] B=[%f,%f]\" % (\r\n self.M[0, 0], self.M[0, 1], self.M[1, 0],\r\n self.M[1, 1], self.M[0, 2], self.M[1, 2])\r\n\r\n\r\nclass TranslationModel(AffineModel):\r\n \"\"\"Translation fitting and estimation as an :class:`AffineModel`\r\n Linear 2d Transformation\r\n mpicbg classname: mpicbg.trakem2.transform.AffineModel2D\r\n implements this simple math\r\n x'=M00*x + M01*x + B0\r\n y'=M10*x + M11*y + B1\r\n\r\n Attributes\r\n ----------\r\n M00 : float\r\n x'+=M00*x\r\n M01 : float\r\n x'+=M01*y\r\n M10 : float\r\n y'+=M10*x\r\n M11 : float\r\n y'+=M11*y\r\n B0 : float\r\n x'+=B0\r\n B1 : float\r\n y'+=B1\r\n transformId : str, optional\r\n unique transformId for this transform\r\n labels : list of str\r\n list of labels to give this transform\r\n M : numpy.array\r\n 3x3 numpy array representing 2d Affine with homogeneous coordinates\r\n populates with values from M00, M01, M10, M11, B0, B1 with load_M()\r\n \"\"\"\r\n\r\n className = 'mpicbg.trakem2.transform.TranslationModel2D'\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(TranslationModel, self).__init__(*args, **kwargs)\r\n\r\n def _process_dataString(self, dataString):\r\n \"\"\"expected dataString is 'tx ty'\"\"\"\r\n tx, ty = map(float(dataString.split(' ')))\r\n self.B0 = tx\r\n self.B1 = ty\r\n self.load_M()\r\n\r\n @staticmethod\r\n def fit(src, dst):\r\n \"\"\"function to fit Translation transform given\r\n the corresponding sets of points src & dst\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 6x1 matrix with the best fit parameters\r\n ordered M00,M01,M10,M11,B0,B1\r\n \"\"\"\r\n t = dst.mean(axis=0) - src.mean(axis=0)\r\n T = np.eye(3)\r\n T[:2, 2] = t\r\n return T\r\n\r\n def estimate(self, src, dst, return_params=True):\r\n \"\"\"method for setting this transformation with the best fit\r\n given the corresponding points src,dst\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n return_params : bool\r\n whether to return the parameter matrix\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 2x3 matrix of parameters for this matrix,\r\n laid out (x,y) x (x,y,offset)\r\n (or None if return_params=False)\r\n \"\"\"\r\n self.M = self.fit(src, dst)\r\n if return_params:\r\n return self.M\r\n\r\n\r\nclass RigidModel(AffineModel):\r\n \"\"\"model for fitting Rigid only transformations\r\n (rotation+translation)\r\n or\r\n (determinate=1, orthonormal eigenvectors)\r\n implemented as an :class:`AffineModel`\r\n\r\n\r\n Attributes\r\n ----------\r\n M00 : float\r\n x'+=M00*x\r\n M01 : float\r\n x'+=M01*y\r\n M10 : float\r\n y'+=M10*x\r\n M11 : float\r\n y'+=M11*y\r\n B0 : float\r\n x'+=B0\r\n B1 : float\r\n y'+=B1\r\n transformId : str, optional\r\n unique transformId for this transform\r\n labels : list of str\r\n list of labels to give this transform\r\n M : numpy.array\r\n 3x3 numpy array representing 2d Affine with homogeneous coordinates\r\n populates with values from M00, M01, M10, M11, B0, B1 with load_M()\r\n\r\n \"\"\"\r\n className = 'mpicbg.trakem2.transform.RigidModel2D'\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(RigidModel, self).__init__(*args, **kwargs)\r\n\r\n def _process_dataString(self, dataString):\r\n \"\"\"expected datastring is 'theta tx ty'\"\"\"\r\n theta, tx, ty = map(float(dataString.split(' ')))\r\n self.M00 = np.cos(theta)\r\n self.M01 = -np.sin(theta)\r\n self.M10 = np.sin(theta)\r\n self.M11 = np.sin(theta)\r\n self.B0 = tx\r\n self.B1 = ty\r\n self.load_M()\r\n\r\n @staticmethod\r\n def fit(src, dst, rigid=True, **kwargs):\r\n \"\"\"function to fit this transform given the corresponding\r\n sets of points src & dst\r\n Umeyama estimation of similarity transformation\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n rigid : bool\r\n whether to constrain this transform to be rigid\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 6x1 matrix with the best fit parameters\r\n ordered M00,M01,M10,M11,B0,B1\r\n \"\"\"\r\n # TODO shape assertion\r\n num, dim = src.shape\r\n src_cld = src - src.mean(axis=0)\r\n dst_cld = dst - dst.mean(axis=0)\r\n A = np.dot(dst_cld.T, src_cld) / num\r\n d = np.ones((dim, ), dtype=np.double)\r\n if np.linalg.det(A) < 0:\r\n d[dim - 1] = -1\r\n T = np.eye(dim + 1, dtype=np.double)\r\n\r\n rank = np.linalg.matrix_rank(A)\r\n if rank == 0:\r\n raise EstimationError('zero rank matrix A unacceptable -- '\r\n 'likely poorly conditioned')\r\n\r\n U, S, V = svd(A)\r\n\r\n if rank == dim - 1:\r\n if np.linalg.det(U) * np.linalg.det(V) > 0:\r\n T[:dim, :dim] = np.dot(U, V)\r\n else:\r\n s = d[dim - 1]\r\n d[dim - 1] = -1\r\n T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))\r\n d[dim - 1] = s\r\n else:\r\n T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T))\r\n\r\n fit_scale = (1.0 if rigid else\r\n 1.0 / src_cld.var(axis=0).sum() * np.dot(S, d))\r\n\r\n T[:dim, dim] = dst.mean(axis=0) - fit_scale * np.dot(\r\n T[:dim, :dim], src.mean(axis=0).T)\r\n T[:dim, :dim] *= fit_scale\r\n return T\r\n\r\n def estimate(self, A, B, return_params=True, **kwargs):\r\n \"\"\"method for setting this transformation with the\r\n best fit given the corresponding points src,dst\r\n\r\n Parameters\r\n ----------\r\n A : numpy.array\r\n a Nx2 matrix of source points\r\n B : numpy.array\r\n a Nx2 matrix of destination points\r\n return_params : bool\r\n whether to return the parameter matrix\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 2x3 matrix of parameters for this matrix,\r\n laid out (x,y) x (x,y,offset)\r\n (or None if return_params=False)\r\n \"\"\"\r\n self.M = self.fit(A, B, **kwargs)\r\n if return_params:\r\n return self.M\r\n\r\n\r\nclass SimilarityModel(RigidModel):\r\n \"\"\"class for fitting Similarity transformations\r\n (translation+rotation+scaling)\r\n or\r\n (orthogonal eigen vectors with equal eigenvalues)\r\n\r\n implemented as an :class:`AffineModel`\r\n\r\n Attributes\r\n ----------\r\n M00 : float\r\n x'+=M00*x\r\n M01 : float\r\n x'+=M01*y\r\n M10 : float\r\n y'+=M10*x\r\n M11 : float\r\n y'+=M11*y\r\n B0 : float\r\n x'+=B0\r\n B1 : float\r\n y'+=B1\r\n transformId : str, optional\r\n unique transformId for this transform\r\n labels : list of str\r\n list of labels to give this transform\r\n M : numpy.array\r\n 3x3 numpy array representing 2d Affine with homogeneous coordinates\r\n populates with values from M00, M01, M10, M11, B0, B1 with load_M()\r\n\r\n \"\"\"\r\n className = 'mpicbg.trakem2.transform.SimilarityModel2D'\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(SimilarityModel, self).__init__(*args, **kwargs)\r\n\r\n def _process_dataString(self, dataString):\r\n \"\"\"expected datastring is 's theta tx ty'\"\"\"\r\n s, theta, tx, ty = map(float(dataString.split(' ')))\r\n self.M00 = s * np.cos(theta)\r\n self.M01 = -s * np.sin(theta)\r\n self.M10 = s * np.sin(theta)\r\n self.M11 = s * np.sin(theta)\r\n self.B0 = tx\r\n self.B1 = ty\r\n self.load_M()\r\n\r\n @staticmethod\r\n def fit(src, dst, rigid=False, **kwargs):\r\n \"\"\"function to fit this transform given the corresponding\r\n sets of points src & dst\r\n Umeyama estimation of similarity transformation\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n rigid : bool\r\n whether to constrain this transform to be rigid\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a 6x1 matrix with the best fit parameters\r\n ordered M00,M01,M10,M11,B0,B1\r\n \"\"\"\r\n return RigidModel.fit(src, dst, rigid=rigid)\r\n\r\n\r\nclass Polynomial2DTransform(Transform):\r\n \"\"\"Polynomial2DTransform implemented as in skimage\r\n\r\n Attributes\r\n ----------\r\n params : numpy.array\r\n 2xK matrix of polynomial coefficents up to order K\r\n\r\n \"\"\"\r\n className = 'mpicbg.trakem2.transform.PolynomialTransform2D'\r\n\r\n def __init__(self, dataString=None, src=None, dst=None, order=2,\r\n force_polynomial=True, params=None, identity=False,\r\n labels=None,json=None, **kwargs):\r\n \"\"\"Initialize Polynomial2DTransform\r\n This provides 5 different ways to initialize the transform which are\r\n mutually exclusive and applied in the order specified here.\r\n 1)json2)dataString,3)identity,4)params,5)(src,dst)\r\n\r\n Parameters\r\n ----------\r\n json : dict\r\n dictionary representation of the Polynomial2DTransform\r\n generally used by TransformList\r\n dataString : str\r\n dataString representation of transform from mpicpg\r\n identity : bool\r\n whether to make this transform the identity\r\n params : numpy.array\r\n 2xK matrix of polynomial coefficents up to order K\r\n src : numpy.array\r\n Nx2 array of source points to use for fitting (used with dst)\r\n dst : numpy.array\r\n Nx2 array of destination points to use for fitting (used with src)\r\n order : int\r\n degree of polynomial to store\r\n force_polynomial : bool\r\n whether to force this representation to return a Polynomial\r\n regardless of degree (not implemented)\r\n\r\n\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.className = 'mpicbg.trakem2.transform.PolynomialTransform2D'\r\n if dataString is not None:\r\n self._process_dataString(dataString)\r\n elif identity:\r\n self.params = np.array([[0, 1, 0], [0, 0, 1]])\r\n elif params is not None:\r\n self.params = params\r\n elif src is not None and dst is not None:\r\n self.estimate(src, dst, order, return_params=False, **kwargs)\r\n\r\n if not force_polynomial and self.is_affine:\r\n raise NotImplementedError('Falling back to Affine model is '\r\n 'not supported {}')\r\n self.transformId = None\r\n self.labels = labels\r\n\r\n @property\r\n def is_affine(self):\r\n \"\"\"(boolean) TODO allow default to Affine\"\"\"\r\n return False\r\n # return self.order\r\n\r\n @property\r\n def order(self):\r\n \"\"\"(int) order of polynomial\"\"\"\r\n no_coeffs = len(self.params.ravel())\r\n return int((abs(np.sqrt(4 * no_coeffs + 1)) - 3) / 2)\r\n\r\n @property\r\n def dataString(self):\r\n \"\"\"dataString of polynomial\"\"\"\r\n return Polynomial2DTransform._dataStringfromParams(self.params)\r\n\r\n @staticmethod\r\n def fit(src, dst, order=2):\r\n \"\"\"function to fit this transform given the corresponding sets\r\n of points src & dst\r\n polynomial fit\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n order : bool\r\n order of polynomial to fit\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a [2,(order+1)*(order+2)/2] array with the best fit parameters\r\n \"\"\"\r\n xs = src[:, 0]\r\n ys = src[:, 1]\r\n xd = dst[:, 0]\r\n yd = dst[:, 1]\r\n rows = src.shape[0]\r\n no_coeff = (order + 1) * (order + 2)\r\n\r\n if len(src) != len(dst):\r\n raise EstimationError(\r\n 'source has {} points, but dest has {}!'.format(\r\n len(src), len(dst)))\r\n if no_coeff > len(src):\r\n raise EstimationError(\r\n 'order {} is too large to fit {} points!'.format(\r\n order, len(src)))\r\n\r\n A = np.zeros([rows * 2, no_coeff + 1])\r\n pidx = 0\r\n for j in range(order + 1):\r\n for i in range(j + 1):\r\n A[:rows, pidx] = xs ** (j - i) * ys ** i\r\n A[rows:, pidx + no_coeff // 2] = xs ** (j - i) * ys ** i\r\n pidx += 1\r\n\r\n A[:rows, -1] = xd\r\n A[rows:, -1] = yd\r\n\r\n # right singular vector corresponding to smallest singular value\r\n _, s, V = svd(A)\r\n Vsm = V[np.argmin(s), :] # never trust computers\r\n return (-Vsm[:-1] / Vsm[-1]).reshape((2, no_coeff // 2))\r\n\r\n def estimate(self, src, dst, order=2,\r\n test_coords=True, max_tries=100, return_params=True,\r\n **kwargs):\r\n \"\"\"method for setting this transformation with the\r\n best fit given the corresponding points src,dst\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n order : int\r\n order of polynomial to fit\r\n test_coords : bool\r\n whether to test model after fitting to\r\n make sure it is good (see fitgood)\r\n max_tries : int\r\n how many times to attempt to fit the model (see fitgood)\r\n return_params : bool\r\n whether to return the parameter matrix\r\n **kwargs\r\n dictionary of keyword arguments including those\r\n that can be passed to fitgood\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a (2,(order+1)*(order+2)/2) matrix of parameters for this matrix\r\n (or None if return_params=False)\r\n \"\"\"\r\n def fitgood(src, dst, params, atol=1e-3, rtol=0, **kwargs):\r\n \"\"\"check if model produces a 'good' result\r\n\r\n Parameters\r\n ----------\r\n src : numpy.array\r\n a Nx2 matrix of source points\r\n dst : numpy.array\r\n a Nx2 matrix of destination points\r\n params : numpy.array\r\n a Kx2 matrix of parameters\r\n atol : float\r\n absolute tolerance as in numpy.allclose for\r\n transformed sample points\r\n rtol : float\r\n relative tolerance as in numpy.allclose for\r\n transformed sample points\r\n\r\n Returns\r\n -------\r\n bool\r\n whether the goodness condition is met\r\n \"\"\"\r\n result = Polynomial2DTransform(params=params).tform(src)\r\n t = np.allclose(\r\n result, dst,\r\n atol=atol, rtol=rtol)\r\n return t\r\n\r\n estimated = False\r\n tries = 0\r\n while (tries < max_tries and not estimated):\r\n tries += 1\r\n try:\r\n params = Polynomial2DTransform.fit(src, dst, order=order)\r\n except (LinAlgError, ValueError) as e:\r\n logger.debug('Encountered error {}'.format(e))\r\n continue\r\n estimated = (fitgood(src, dst, params, **kwargs) if\r\n test_coords else True)\r\n\r\n if tries == max_tries and not estimated:\r\n raise EstimationError('Could not fit Polynomial '\r\n 'in {} attempts!'.format(tries))\r\n logger.debug('fit parameters in {} attempts'.format(tries))\r\n self.params = params\r\n if return_params:\r\n return self.params\r\n\r\n @staticmethod\r\n def _dataStringfromParams(params=None):\r\n \"\"\"method for producing a dataString from the parameters\"\"\"\r\n return ' '.join([str(i).replace('e-0', 'e-').replace('e+0', 'e+')\r\n for i in params.flatten()]).replace('e', 'E')\r\n\r\n def _process_dataString(self, datastring):\r\n \"\"\"generate datastring and param attributes from datastring\"\"\"\r\n dsList = datastring.split(' ')\r\n self.params = Polynomial2DTransform._format_raveled_params(dsList)\r\n\r\n @staticmethod\r\n def _format_raveled_params(raveled_params):\r\n \"\"\"method to reshape linear parameters into parameter matrix\r\n\r\n Parameters\r\n ----------\r\n raveled_params : numpy.array\r\n an K long vector of parameters\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a (2,K/2) matrix of parameters, with\r\n first row for x and 2nd row for y\r\n \"\"\"\r\n halfway = int(len(raveled_params) / 2)\r\n return np.array(\r\n [[float(d) for d in raveled_params[:halfway]],\r\n [float(d) for d in raveled_params[halfway:]]])\r\n\r\n def tform(self, points):\r\n \"\"\"transform a set of points through this transformation\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx2 array of x,y points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a Nx2 array of x,y points after transformation\r\n \"\"\"\r\n dst = np.zeros(points.shape)\r\n x = points[:, 0]\r\n y = points[:, 1]\r\n\r\n o = int((-3 + np.sqrt(9 - 4 * (2 - len(self.params.ravel())))) / 2)\r\n pidx = 0\r\n for j in range(o + 1):\r\n for i in range(j + 1):\r\n dst[:, 0] += self.params[0, pidx] * x ** (j - i) * y ** i\r\n dst[:, 1] += self.params[1, pidx] * x ** (j - i) * y ** i\r\n pidx += 1\r\n return dst\r\n\r\n def coefficients(self, order=None):\r\n \"\"\"determine number of coefficient terms in transform for a given order\r\n\r\n Parameters\r\n ----------\r\n order : int, optional\r\n order of polynomial, defaults to self.order\r\n\r\n Returns\r\n -------\r\n int\r\n number of coefficient terms expected in transform\r\n\r\n \"\"\"\r\n if order is None:\r\n order = self.order\r\n return (order + 1) * (order + 2)\r\n\r\n def asorder(self, order):\r\n '''return polynomial transform appoximation of this\r\n transformation with a lower order\r\n\r\n Parameters\r\n ----------\r\n order :int\r\n desired order (must have order> current order)\r\n\r\n Returns\r\n -------\r\n :class:`Polynomial2DTransform`\r\n transform of lower order\r\n\r\n Raises\r\n ------\r\n ConversionError\r\n if target order < input order\r\n '''\r\n if self.order > order:\r\n raise ConversionError(\r\n 'transformation {} is order {} -- conversion to '\r\n 'order {} not supported'.format(\r\n self.dataString, self.order, order))\r\n new_params = np.zeros([2, self.coefficients(order) // 2])\r\n new_params[:self.params.shape[0], :self.params.shape[1]] = self.params\r\n return Polynomial2DTransform(params=new_params)\r\n\r\n @staticmethod\r\n def fromAffine(aff):\r\n \"\"\"return a polynomial transformation equavalent to a given Affine\r\n\r\n Parameters\r\n ----------\r\n aff : AffineModel\r\n transform to become equivalent to\r\n\r\n Returns\r\n -------\r\n Polynomial2DTransform\r\n Order 1 transform equal in effect to aff\r\n\r\n Raises\r\n ------\r\n ConversionError\r\n if input model is not AffineModel\r\n \"\"\"\r\n if not isinstance(aff, AffineModel):\r\n raise ConversionError('attempting to convert a nonaffine model!')\r\n return Polynomial2DTransform(order=1, params=np.array([\r\n [aff.M[0, 2], aff.M[0, 0], aff.M[0, 1]],\r\n [aff.M[1, 2], aff.M[1, 0], aff.M[1, 1]]]))\r\n\r\n\r\ndef estimate_dstpts(transformlist, src=None):\r\n \"\"\"estimate destination points for list of transforms. Recurses\r\n through lists.\r\n\r\n Parameters\r\n ----------\r\n transformlist : :obj:list of :obj:Transform\r\n transforms that have a tform method implemented\r\n src : numpy.array\r\n a Nx2 array of source points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n Nx2 array of destination points\r\n \"\"\"\r\n dstpts = src\r\n for tform in transformlist:\r\n if isinstance(tform, list):\r\n dstpts = estimate_dstpts(tform, dstpts)\r\n else:\r\n dstpts = tform.tform(dstpts)\r\n return dstpts\r\n\r\n\r\nclass NonLinearCoordinateTransform(Transform):\r\n \"\"\"\r\n render-python class that implements the\r\n mpicbg.trakem2.transform.NonLinearCoordinateTransform class\r\n\r\n Parameters\r\n ----------\r\n dataString: str or None\r\n data string of transformation\r\n labels : list of str\r\n list of labels to give this transform\r\n json: dict or None\r\n json compatible dictionary representation of the transformation\r\n\r\n Returns\r\n -------\r\n :class:`NonLinearTransform`\r\n a transform instance\r\n\r\n\r\n \"\"\"\r\n\r\n className = 'mpicbg.trakem2.transform.NonLinearCoordinateTransform'\r\n\r\n def __init__(self, dataString=None, json=None, transformId=None,\r\n labels=None):\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n if dataString is not None:\r\n self._process_dataString(dataString)\r\n if labels is not None:\r\n self.labels = labels\r\n self.transformId = transformId\r\n self.className = 'mpicbg.trakem2.transform.NonLinearCoordinateTransform'\r\n\r\n def _process_dataString(self, dataString):\r\n\r\n fields = dataString.split(\" \")\r\n\r\n self.dimension = int(fields[0])\r\n self.length = int(fields[1])\r\n\r\n # cutoff whitespace if there\r\n fields = fields[0:2 + 4 * self.length + 2]\r\n # last 2 fields are width and height\r\n self.width = int(fields[-2])\r\n self.height = int(fields[-1])\r\n\r\n data = np.array(fields[2:-2], dtype='float32')\r\n try:\r\n self.beta = data[0:2 * self.length].reshape(self.length, 2)\r\n except ValueError as e:\r\n raise RenderError(\r\n 'Incorrect number of coefficients in '\r\n 'NonLinearCoordinateTransform. msg: {}'.format(e))\r\n if not (self.beta.shape[0] == self.length):\r\n raise RenderError(\"not correct number of coefficents\")\r\n\r\n # normMean and normVar follow\r\n self.normMean = data[self.length * 2:self.length * 3]\r\n self.normVar = data[self.length * 3:self.length * 4]\r\n if not (self.normMean.shape[0] == self.length):\r\n raise RenderError(\r\n \"incorrect number of normMean coefficents \"\r\n \"{} != length {}\".format(self.normMean.shape[0], self.length))\r\n if not (self.normVar.shape[0] == self.length):\r\n raise RenderError(\r\n \"incorrect number of normVar coefficents \"\r\n \"{} != {}\".format(self.normVar.shape[0], self.length))\r\n\r\n def kernelExpand(self, src):\r\n \"\"\"creates an expanded representation of the x,y\r\n src points in a polynomial form\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx2 array of x,y points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a (N x self.length) array of coefficents\r\n \"\"\"\r\n x = src[:, 0]\r\n y = src[:, 1]\r\n\r\n expanded = np.zeros([len(x), self.length])\r\n pidx = 0\r\n for i in range(1, self.dimension + 1):\r\n for j in range(i, -1, -1):\r\n expanded[:, pidx] = (\r\n np.power(x, j) * np.power(y, i - j))\r\n pidx += 1\r\n\r\n expanded[:, :-1] = ((expanded[:, :-1] - self.normMean[:-1]) /\r\n self.normVar[:-1])\r\n expanded[:, -1] = 100.0\r\n return expanded\r\n\r\n def tform(self, src):\r\n \"\"\"transform a set of points through this transformation\r\n\r\n Parameters\r\n ----------\r\n points : numpy.array\r\n a Nx2 array of x,y points\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n a Nx2 array of x,y points after transformation\r\n \"\"\"\r\n\r\n # final double[] featureVector = kernelExpand(position);\r\n # return multiply(beta, featureVector);\r\n nsrc = np.array(src, dtype=np.float64)\r\n featureVector = self.kernelExpand(nsrc)\r\n\r\n dst = np.zeros(src.shape)\r\n for i in range(0, featureVector.shape[1]):\r\n dst[:, 0] = dst[:, 0] + (featureVector[:, i] * self.beta[i, 0])\r\n dst[:, 1] = dst[:, 1] + (featureVector[:, i] * self.beta[i, 1])\r\n return np.array(dst, dtype=src.dtype)\r\n\r\n @property\r\n def dataString(self):\r\n shapestring = '{} {}'.format(self.dimension, self.length)\r\n betastring = ' '.join([str(i).replace('e-0', 'e-').replace('e+0', 'e+')\r\n for i in self.beta.ravel()]).replace('e', 'E')\r\n meanstring = ' '.join([str(i).replace('e-0', 'e-').replace('e+0', 'e+')\r\n for i in self.normMean]).replace('e', 'E')\r\n varstring = ' '.join([str(i).replace('e-0', 'e-').replace('e+0', 'e+')\r\n for i in self.normVar]).replace('e', 'E')\r\n dimstring = '{} {}'.format(self.height, self.width)\r\n return '{} {} {} {} {} '.format(\r\n shapestring, betastring, meanstring, varstring, dimstring)\r\n\r\n\r\nclass NonLinearTransform(NonLinearCoordinateTransform):\r\n className = 'mpicbg.trakem2.transform.nonLinearTransform'\r\n\r\n\r\nclass LensCorrection(NonLinearCoordinateTransform):\r\n \"\"\"\r\n a placeholder for the lenscorrection transform, same as NonLinearTransform\r\n for now\r\n \"\"\"\r\n className = 'lenscorrection.NonLinearTransform'\r\n\r\n\r\ndef estimate_transformsum(transformlist, src=None, order=2):\r\n \"\"\"pseudo-composition of transforms in list of transforms\r\n using source point transformation and a single estimation.\r\n Will produce an Affine Model if all input transforms are Affine,\r\n otherwise will produce a Polynomial of specified order\r\n\r\n Parameters\r\n ----------\r\n transformlist : :obj:`list` of :obj:`Transform`\r\n list of transform objects that implement tform\r\n src : numpy.array\r\n Nx2 array of source points for estimation\r\n order : int\r\n order of Polynomial output if transformlist\r\n inputs are non-Affine\r\n Returns\r\n -------\r\n :class:`AffineModel` or :class:`Polynomial2DTransform`\r\n best estimate of transformlist in a single transform of this order\r\n \"\"\"\r\n def flatten(l):\r\n \"\"\"generator-iterator to flatten deep lists of lists\"\"\"\r\n for i in l:\r\n if isinstance(i, Iterable):\r\n try:\r\n notstring = isinstance(i, basestring)\r\n except NameError as e:\r\n notstring = isinstance(i, str)\r\n if notstring:\r\n for sub in flatten(i):\r\n yield sub\r\n else:\r\n yield i\r\n\r\n dstpts = estimate_dstpts(transformlist, src)\r\n tforms = flatten(transformlist)\r\n if all([(tform.className == AffineModel.className)\r\n for tform in tforms]):\r\n am = AffineModel()\r\n am.estimate(A=src, B=dstpts, return_params=False)\r\n return am\r\n return Polynomial2DTransform(src=src, dst=dstpts, order=order)\r\n","sub_path":"renderapi/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":50912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328959593","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom datetime import datetime\r\nimport pytz\r\ntz = pytz.timezone('Asia/Seoul')\r\n\r\ndef print_log(message, print_enable = True, log_enable = True, new_line = True):\r\n date_time = datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S')\r\n\r\n # print to console\r\n if print_enable is True:\r\n if message is '\\n':\r\n print('')\r\n elif new_line is True:\r\n print(date_time, message)\r\n else:\r\n print(message, end='')\r\n \r\n # log to text file\r\n if log_enable is True:\r\n log_file_path = 'log.txt'\r\n logger = open(log_file_path, 'a')\r\n if message is '\\n':\r\n logger.write('\\n')\r\n elif new_line is True:\r\n logger.write(date_time + ': ' + message + '\\n')\r\n else:\r\n logger.write(message)\r\n logger.flush()\r\n logger.close()\r\n","sub_path":"printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160966872","text":"from setuptools import setup, find_packages\n\nversion_tuple = __import__('pascut').VERSION\n\nif version_tuple[2] is not None:\n version = \"%d.%d.%s\" % version_tuple\nelse:\n version = \"%d.%d\" % version_tuple[:2]\n\ndependencies = ['plugpy >= 0.2.1', 'werkzeug >= 0.5']\n\nsetup(\n name = \"pascut\",\n version = version,\n url = 'http://bitbucket.org/mopemope/pascut/',\n author = 'yutaka.matsubara',\n author_email = 'yutaka.matsubara@gmail.com',\n maintainer = 'yutaka.matsubara',\n maintainer_email = 'yutaka.matsubara@gmail.com',\n license='MIT License',\n description = 'Automation Flash build tool',\n platforms = ['Any'],\n install_requires = dependencies,\n include_package_data = True,\n packages = find_packages(),\n package_data = {'pascut': ['plugins/*.py', 'plugins/js/*.js', ],},\n entry_points = {\n 'console_scripts': [\n 'pascut = pascut.main:main'\n ]\n },\n classifiers = [\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\"\n ]\n)\n","sub_path":"pypi_install_script/pascut-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"296001378","text":"\nINDEX_ALPHABET = {\n 'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6,\n 'H': 7, 'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13,\n 'O': 14, 'P': 15, 'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'U': 20,\n 'V': 21, 'W': 22, 'X': 23, 'Y': 24, 'Z': 25\n}\n\nALPHABET = {\n 'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0,\n 'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0,\n 'O': 0, 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0,\n 'V': 0, 'W': 0, 'X': 0, 'Y': 0, 'Z': 0\n\n}\n\nFREQUENCY_ALPHABET = {\n 'A': 0.082, 'B': 0.015, 'C': 0.028, 'D': 0.043, 'E': 0.127, 'F': 0.022, 'G': 0.02,\n 'H': 0.061, 'I': 0.07, 'J': 0.002, 'K': 0.008, 'L': 0.04, 'M': 0.024, 'N': 0.067,\n 'O': 0.075, 'P': 0.019, 'Q': 0.001, 'R': 0.06, 'S': 0.063, 'T': 0.091, 'U': 0.028,\n 'V': 0.01, 'W': 0.023, 'X': 0.01, 'Y': 0.02, 'Z': 0.01\n}\n\nPLAIN_TEXT = 'it took erno rubik one month to learn how to do a rubik cube. some ' \\\n 'people started thinking about how to complete and in years have got little further ' \\\n 'than one side. if you want to learn how to solve the rubik cube look no further you ' \\\n 'have come to the right place getting help with solving the rubik cube is not cheating. ' \\\n 'there are quintillion possibilities, but only one correct solution. hence without knowing ' \\\n 'how to solve a rubik cube it is nearly impossible. this six step guide will take you through ' \\\n 'everything you need to know when it comes to solving the rubik cube. it is really simple, ' \\\n 'you just have to follow the steps and you will be solving the rubik cube in less than two minutes'\n\nCIPHER_TEXT = 'KXTGOBGVNGRLDMKGNVOSNLHKQPESREJSWLOUQERMBZMGUTEJQQEHEFRPEKTRTXEVTYKRKA\\\nNXCFOMTYQATGCFOTLWTVCRDANPGERKHRXIGGTCKXTDEWWVTZEIVLAFOEGWIVEZHCOMWRPXT\\\nGLVCVNZONVSSGLMGXHWRLDMKUUSGPOGKEQJUJTYGVYGUYCZEUODGXOLHVTMGZTGNECWGV\\\nVXIFGYGPPOIKJWODVZPKTZEIWFICCLDIIKNFVGHWAKKRGLHVTIAJEHWMNLICNMOFPFUWITICKXIWS\\\nSWXOFLPQREUOITICLSFNYTAOEJINUENKXHGUKMROOIEILOOTFUSLNERTYBAKTWFEATZURESRCA\\\nMMHOJUMBDEKJMSKIOUXEHGLKHEOICNXACEPQYTZRFWKHWVVTCTZIEICOMNVGHTGKEQAWZEEK\\\nXCGMVUXOKOCXMNYTYGVUTIBEYBWIKKWRWACNCSAMGNIYGUAWWTZAMGXOXOCNSWLHVUXEHS\\\nRPHYGUNKPLTEJQPVANXVLEJUSKOCMBVKRLWSJVLAFTNQQIFUKGW'\n\n\nclass Vigenere:\n\n def __init__(self):\n self.alphabet = ALPHABET\n self.index_alphabet = INDEX_ALPHABET\n self.freq_alphabet = FREQUENCY_ALPHABET\n\n def convert_number_to_character(self, number):\n for ch in self.index_alphabet:\n if self.index_alphabet[ch] == number:\n return ch\n return\n\n @staticmethod\n def generate_key(text, key):\n key = list(key)\n if len(text) == len(key):\n return key\n for i in range(len(text) - len(key)):\n key.append(key[i % len(key)])\n return \"\".join(key)\n\n # count each of letters in c_txt\n def count_charater(self, cipher_text):\n alphabet = dict(self.alphabet)\n for c in cipher_text:\n if c in alphabet:\n alphabet[c] += 1\n\n return alphabet\n\n @staticmethod\n def print_alphabet(alphabet):\n for i, (x, y) in enumerate(alphabet.items()):\n print(x, '=', y, end=' : ')\n if (i+1) % 7 == 0:\n print()\n\n\n\n\nif __name__ == '__main__':\n vin = Vigenere()\n # vin.count_word(vin.c_txt)\n #vin.run(11)\n #vin.encode(PLAIN_TEXT, 'QUANGTHANH')\n #vin.find_key(10)\n vin.decode(CIPHER_TEXT, 'QUANGTHANH')\n","sub_path":"Vigenere.py","file_name":"Vigenere.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"649472365","text":"# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) Microsoft, Inc. 2020\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# This piece of code is modified based on https://github.com/huggingface/transformers\n\nimport copy\nimport torch\nfrom torch import nn\nfrom collections import Sequence\nfrom packaging import version\nimport numpy as np\nimport math\nimport os\nimport pdb\n\nimport json\nfrom .ops import *\nfrom .disentangled_attention import *\n\n__all__ = ['BertEncoder', 'BertEmbeddings', 'ACT2FN', 'BertLayerNorm']\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\ndef linear_act(x):\n return x\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"tanh\": torch.nn.functional.tanh, \"linear\": linear_act, 'sigmoid': torch.sigmoid}\n\n\nclass BertLayerNorm(nn.Module):\n \"\"\"LayerNorm module in the TF style (epsilon inside the square root).\n \"\"\"\n\n def __init__(self, size, width_mult, eps=1e-12):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(size))\n self.weight.data /= width_mult # initialize LN weights like O(1/n)\n self.bias = nn.Parameter(torch.zeros(size))\n self.variance_epsilon = eps\n self.width_mult = width_mult # LUP\n\n def forward(self, x):\n input_type = x.dtype\n x = x.float()\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n x = x.to(input_type)\n y = self.weight * x * self.width_mult + self.bias * self.width_mult # InfLayerNorm\n return y\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.width_mult = config.hidden_size / config.base_size\n self.dense = LUPLinear(config.hidden_size, config.hidden_size, width_mult=self.width_mult)\n self.LayerNorm = BertLayerNorm(config.hidden_size, self.width_mult, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_states, mask=None):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states += input_states\n hidden_states = MaskedLayerNorm(self.LayerNorm, hidden_states)\n return hidden_states\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = DisentangledSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.config = config\n\n def forward(self, hidden_states, attention_mask, return_att=False, query_states=None, relative_pos=None, rel_embeddings=None):\n self_output = self.self(hidden_states, attention_mask, return_att, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)\n if return_att:\n self_output, att_matrix = self_output\n if query_states is None:\n query_states = hidden_states\n attention_output = self.output(self_output, query_states, attention_mask)\n\n if return_att:\n return (attention_output, att_matrix)\n else:\n return attention_output\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.width_mult = config.hidden_size / config.base_size\n self.dense = LUPLinear(config.hidden_size, config.intermediate_size, width_mult=self.width_mult)\n self.intermediate_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.width_mult = config.hidden_size / config.base_size\n self.dense = LUPLinear(config.intermediate_size, config.hidden_size, width_mult=self.width_mult)\n self.LayerNorm = BertLayerNorm(config.hidden_size, self.width_mult, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_states, mask=None):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states += input_states\n hidden_states = MaskedLayerNorm(self.LayerNorm, hidden_states)\n return hidden_states\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask, return_att=False, query_states=None, relative_pos=None, rel_embeddings=None):\n attention_output = self.attention(hidden_states, attention_mask, return_att=return_att, \\\n query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)\n if return_att:\n attention_output, att_matrix = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output, attention_mask)\n if return_att:\n return (layer_output, att_matrix)\n else:\n return layer_output\n\nclass BertEncoder(nn.Module):\n \"\"\" Modified BertEncoder with relative position bias support\n \"\"\"\n def __init__(self, config):\n super().__init__()\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])\n self.relative_attention = getattr(config, 'relative_attention', False)\n if self.relative_attention:\n self.max_relative_positions = getattr(config, 'max_relative_positions', -1)\n if self.max_relative_positions <1:\n self.max_relative_positions = config.max_position_embeddings\n self.rel_embeddings = nn.Embedding(self.max_relative_positions*2, config.hidden_size)\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if attention_mask.dim()<=2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = extended_attention_mask*extended_attention_mask.squeeze(-2).unsqueeze(-1)\n attention_mask = attention_mask.byte()\n elif attention_mask.dim()==3:\n attention_mask = attention_mask.unsqueeze(1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)\n relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)\n return relative_pos\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, return_att=False, query_states = None, relative_pos=None):\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n all_encoder_layers = []\n att_matrixs = []\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[0]\n else:\n next_kv = hidden_states\n rel_embeddings = self.get_rel_embedding()\n for i, layer_module in enumerate(self.layer):\n output_states = layer_module(next_kv, attention_mask, return_att, query_states = query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)\n if return_att:\n output_states, att_m = output_states\n\n if query_states is not None:\n query_states = output_states\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[i+1] if i+1 < len(self.layer) else None\n else:\n next_kv = output_states\n\n if output_all_encoded_layers:\n all_encoder_layers.append(output_states)\n if return_att:\n att_matrixs.append(att_m)\n if not output_all_encoded_layers:\n all_encoder_layers.append(output_states)\n if return_att:\n att_matrixs.append(att_m)\n if return_att:\n return (all_encoder_layers, att_matrixs)\n else:\n return all_encoder_layers\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n padding_idx = getattr(config, 'padding_idx', 0)\n self.width_mult = config.hidden_size / config.base_size\n self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)\n self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx = padding_idx)\n\n self.position_biased_input = getattr(config, 'position_biased_input', True)\n if not self.position_biased_input:\n self.position_embeddings = None\n else:\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)\n\n if config.type_vocab_size>0:\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)\n \n if self.embedding_size != config.hidden_size:\n self.embed_proj = LUPLinear(self.embedding_size, config.hidden_size, bias=False, width_mult=self.width_mult)\n self.LayerNorm = BertLayerNorm(config.hidden_size, self.width_mult, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.output_to_half = False\n self.config = config\n\n self._reset_parameters()\n \n def _reset_parameters(self):\n self.word_embeddings.data.normal_(0, 1/self.width_mult)\n if self.position_embeddings is not None:\n self.position_embeddings.data.normal_(0, 1/self.width_mult)\n if self.config.type_vocab_size>0:\n self.token_type_embeddings.data.normal_(0, 1/self.width_mult)\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None, mask = None):\n seq_length = input_ids.size(1)\n if position_ids is None:\n position_ids = torch.arange(0, seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids) * self.width_mult\n if self.position_embeddings is not None:\n position_embeddings = self.position_embeddings(position_ids.long()) * self.width_mult\n else:\n position_embeddings = torch.zeros_like(words_embeddings)\n\n embeddings = words_embeddings\n if self.position_biased_input:\n embeddings += position_embeddings\n if self.config.type_vocab_size>0:\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings += token_type_embeddings\n\n if self.embedding_size != self.config.hidden_size:\n embeddings = self.embed_proj(embeddings)\n\n embeddings = MaskedLayerNorm(self.LayerNorm, embeddings, mask)\n embeddings = self.dropout(embeddings)\n return embeddings\n","sub_path":"DeBERTa/deberta/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":11322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"444853011","text":"#encoding=utf-8\nimport logging\nimport datetime\nimport time\nimport hashlib\nfrom mysqlConn import *\n\ndef verifyPwd(username, pwd):\n conn = connectDB()\n cur = conn.cursor()\n pwdhash = hashlib.md5(username + pwd).hexdigest()\n \n sql = \"\"\"SELECT * FROM EP_USER_ACCOUNT WHERE username = '%s' AND pwd='%s';\"\"\" % (username, pwdhash)\n count = cur.execute(sql)\n logging.info(\"record count:\" + str(count))\n result = cur.fetchall()\n cur.close()\n \n if count == 0:\n return False\n return True\n\ndef selectByUsername(username):\n conn = connectDB()\n cur = conn.cursor()\n sql = \"\"\"SELECT * FROM EP_USER_ACCOUNT WHERE username = '%s';\"\"\" % (username)\n count = cur.execute(sql)\n logging.info(\"record count:\" + str(count))\n result = cur.fetchall()\n cur.close() \n return len(result)\n\ndef insertUserRecord(username, pwd):\n conn = connectDB()\n cur = conn.cursor()\n \n pwdhash = hashlib.md5(username + pwd).hexdigest()\n \n sql = \"\"\"\n INSERT INTO `EP_USER_ACCOUNT` (\n `username`, `nick_name`, `pwd`, `privilege`, `email`, `mobile`, `user_head_img_path`, \n `create_time`, `update_time`, `RESERVE1`) \n VALUES (\n '%s', '', '%s', '', '', '', '', \n '%s', '', NULL);\"\"\" % (\n username, pwdhash,\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S %f'))\n count = cur.execute(sql)\n logging.info(\"record count:\" + str(count))\n conn.commit()\n return count\n pass\n\nif __name__ == \"__main__\":\n #conn=MySQLdb.connect(host='127.0.0.1', user='root',passwd='root',db='batch_she_test',port=3306)\n pass\n\n","sub_path":"src/db/user_account.py","file_name":"user_account.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"557723434","text":"# -*- coding:utf-8 -*-\n\n\ndef from_url(url):\n return Redis()\n\n\nclass InvalideValueException(Exception):\n pass\n\n\nclass FakePipeLine(object):\n\n def __init__(self, redis):\n self.redis = redis\n self.memdb = []\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def __enter__(self):\n return self\n\n def _format_name(self, name):\n return name\n\n def set(self, name, value):\n r = self.redis.set(name, value)\n if r not in self.memdb:\n self.memdb.append(r)\n\n def get(self, name):\n r = self.redis.get(name)\n self.memdb.append(r)\n\n def delete(self, name):\n name = self._format_name(name)\n self.redis.delete(name)\n try:\n self.memdb.remove(name)\n except:\n pass\n # self.redis.clear()\n\n def execute(self):\n r = self.memdb\n self.memdb = []\n return r\n\n\nclass FakeScript(object):\n\n def __init__(self, client, script):\n self.registered_client = client\n self.script = script\n self.sha = ''\n\n\nclass Redis(object):\n\n def __init__(self, *args, **kwargs):\n self.d = dict()\n self.ld = dict()\n self.hd = dict()\n self.zd = dict()\n\n def exists(self, name):\n if self.d.get(name) is not None:\n return True\n if self.ld.get(name) is not None:\n return True\n if self.hd.get(name) is not None:\n return True\n if self.zd.keys():\n return True\n return False\n\n def set(self, name, value, *args, **kwargs):\n if not self.d.get(name):\n self.d[name] = None\n self.d[name] = value\n\n def get(self, name, *args, **kwargs):\n return self.d.get(name, None)\n\n def mget(self, names):\n return [self.d.get(name) or None for name in names]\n\n def setnx(self, key, value):\n cached = self.d.get(key, None)\n if cached:\n return 0\n self.d[key] = value\n return 1\n\n def delete(self, name):\n self.d.pop(name, None)\n self.ld.pop(name, None)\n self.hd.pop(name, None)\n self.zd.pop(name, None)\n\n def pipeline(self):\n return FakePipeLine(self)\n\n def ping(self):\n return True\n\n def register_script(self, script):\n return FakeScript(self, script)\n\n def lock(self, *args, **kwargs):\n from mock import MagicMock\n return MagicMock()\n\n def expire(self, key, expire):\n pass\n\n def lpop(self, k, v):\n if not self.ld.get(k):\n return\n\n _list = self.ld.get(k)\n assert isinstance(_list, list)\n self.ld[k] = _list[1:]\n\n def lpush(self, k, v):\n if not (isinstance(v, int) or isinstance(v, str)):\n raise InvalideValueException(\n 'only int or str allowed in redis list'\n )\n if not self.ld.get(k):\n self.ld[k] = []\n self.ld[k].insert(0, v)\n\n def lrem(self, k, v, num=0):\n if not self.ld.get(k):\n return\n\n _list = self.ld.get(k)\n assert isinstance(_list, list)\n for l in _list:\n if l == v:\n self.ld[k].remove(v)\n\n def llen(self, k):\n if not self.ld.get(k):\n return 0\n return len(self.ld.get(k))\n\n def lindex(self, k, v):\n if not self.ld.get(k):\n return\n try:\n return self.ld.get(k).index(v)\n except IndexError:\n return\n\n def rpush(self, k, v):\n if not (isinstance(v, int) or isinstance(v, str)):\n raise InvalideValueException(\n 'only int or str allowed in redis list'\n )\n data = self.ld.get(k, [])\n data.append(v)\n self.ld[k] = data\n\n def lrange(self, k, start, stop):\n data = self.ld.get(k, [])\n if start < 0:\n start = 0\n if stop == -1:\n return data[start:]\n elif stop >= 0:\n stop += 1\n return data[start:stop]\n\n def ltrim(self, name, start, stop):\n data = self.ld.get(name, [])\n if start < 0:\n start = 0\n if stop == -1:\n self.ld[name] = data[start:]\n return data[start:]\n elif stop > 0:\n stop += 1\n self.ld[name] = data[start:stop]\n return data[start:stop]\n\n def hset(self, name, key, value):\n if not self.hd.get(name):\n self.hd[name] = {}\n self.hd[name][key] = value\n\n def hdel(self, name, key):\n if not self.hd.get(name):\n return\n dict = self.hd[name]\n del(dict[key])\n self.hd[name] = dict\n\n def hexists(self, name, key):\n if not self.hd.get(name):\n return False\n dict = self.hd[name]\n return True if dict.get(key) else False\n\n def hget(self, name, key):\n if not self.hd.get(name):\n return None\n return self.hd[name][key]\n\n def hkeys(self, name):\n if not self.hd.get(name):\n return None\n return self.hd[name].keys()\n\n def hlen(self, name):\n if not self.hd.get(name):\n return 0\n return len(self.hd[name].keys())\n\n def hgetall(self, name):\n if not self.hd.get(name):\n return None\n return self.hd[name]\n\n def zadd(self, name, key, rank):\n \"\"\"Add one member to a sorted set, or update its score if it already exists\"\"\"\n if not self.zd.get(name):\n self.zd[name] = []\n v = (rank, key)\n ls = self.zd[name]\n for i, (r, k) in enumerate(ls):\n if key == k:\n ls.pop(i)\n ls.append(v)\n self.zd[name] = ls\n\n def zcard(self, name):\n return len(self.zd.keys())\n\n def zrem(self, name, key):\n if not self.zd.get(name):\n return\n ls = self.zd.get(name)\n index = -1\n for i, t in enumerate(ls):\n if t[1] == key:\n index = i\n if index != -1:\n ls.pop(index)\n self.zd[name] = ls\n\n def zrange(self, name, start, end):\n if not self.zd.get(name):\n return []\n ls = self.zd.get(name)\n sorted_list = sorted(ls, key=lambda l: int(l[0]))\n return [key for _, key in sorted_list]\n\n def zrangebyscore(self, name, min, max):\n list = self.zd.get(name)\n if not list:\n return []\n sorted_list = sorted(list, key=lambda l: int(l[0]))\n if min == '-inf':\n min = sorted_list[0][0]\n if max == '+inf':\n max = sorted_list[-1][0]\n filtered = filter(lambda l: l[0] >= min and l[0] <= max, sorted_list)\n return [key for _, key in filtered]\n\n def clear(self):\n self.d = dict()\n self.ld = dict()\n self.hd = dict()\n self.zd = dict()\n","sub_path":"jedi/tests/stubs/redis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"534863228","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\n\n#company[x][y] x회사에 y원 투자할 때 가치\ncompany = [[0 for money in range(n+1)] for com in range(m+1)]\n\n#dp[x][y] x회사까지 y원으로 얻을 수 있는 최대 가치\ndp = [[0 for money in range(n+1)] for com in range(m+1)]\n\n#back[x][y] dp[x][y]를 결정할 때 x 회사에 투자한 금액\nback = [[0 for money in range(n+1)] for com in range(m+1)]\n\nfor _ in range(n):\n temp = list(map(int, input().split()))\n money = temp[0]\n com = temp[1:]\n for i, c in enumerate(com):\n company[i+1][money] = c\n\nfor x in range(1, m+1):\n for y in range(1, n+1):\n for k in range(y+1):\n ret = dp[x-1][y-k] + company[x][k]\n\n if ret > dp[x][y]:\n dp[x][y] = ret\n back[x][y] = k\n\nprint(dp[m][n])\n\npaths = []\nwhile m:\n paths.append(back[m][n])\n n -= back[m][n]\n m -= 1\n\nfor path in paths[::-1]:\n print(path, end=' ')\n","sub_path":"Hangil/day24_2662_choi.py","file_name":"day24_2662_choi.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"374035413","text":"#!/usr/bin/python\nimport re\nfrom parser import parse\nimport utils\n\nfrom opcodes import opcodes, reverse_opcodes\n\nimport rewriter\n\n\nlabel_counter = [0]\n\n\ndef mksymbol():\n label_counter[0] += 1\n return '_' + str(label_counter[0] - 1)\n\n\n# Compile LLL to EVM\ndef compile_lll(ast):\n symb = mksymbol()\n # Literals\n if not isinstance(ast, utils.astnode):\n return [utils.numberize(ast)]\n\n\n subcodes = map(compile_lll, ast.args[1:])\n\n # Seq\n if ast.fun == 'seq':\n o = []\n for subcode in subcodes:\n o.extend(subcode)\n return o\n elif ast.fun == 'unless':\n assert len(ast.args) == 3\n return subcodes[0] + ['$endif'+symb, 'JUMPI'] + \\\n subcodes[1] + ['~endif'+symb]\n elif ast.fun == 'if':\n assert len(ast.args) == 4\n return subcodes[0] + ['NOT', '$else'+symb, 'JUMPI'] + \\\n subcodes[1] + ['$endif'+symb, 'JUMP', '~else'+symb] + \\\n subcodes[2] + ['~endif'+symb]\n elif ast.fun == 'until':\n return ['~beg'+symb] + subcodes[0] + ['$end'+symb, 'JUMPI'] + \\\n subcodes[1] + ['$beg'+symb, 'JUMP', '~end'+symb]\n elif ast.fun == 'lll':\n LEN = '$begincode'+symb+'.endcode'+symb\n STARTSYMB, STARTIND = '~begincode'+symb, '$begincode'+symb\n ENDSYMB, ENDIND = '~endcode'+symb, '$endcode'+symb\n return [LEN, 'DUP'] + subcodes[1] + [STARTIND, 'CODECOPY'] + \\\n [ENDIND, 'JUMP', STARTSYMB, '#CODE_BEGIN'] + subcodes[0] + \\\n ['#CODE_END', ENDSYMB]\n elif ast.fun == 'alloc':\n return subcodes[0] + ['MSIZE', 'SWAP', 'MSIZE'] + \\\n ['ADD', 1, 'SWAP', 'SUB', 0, 'SWAP', 'MSTORE8']\n elif ast.fun == 'array_lit':\n x = ['MSIZE', 'DUP']\n for s in subcodes:\n x += s + ['SWAP', 'MSTORE', 'DUP', 32, 'ADD']\n return x[:-3] if len(subcodes) > 0 else ['MSIZE']\n else:\n o = []\n for subcode in subcodes[::-1]:\n o.extend(subcode)\n return o + [ast.fun]\n\n# Dereference labels\ndef dereference(c):\n label_length = utils.log256(len(c)*4)\n iq = [x for x in c]\n mq = []\n pos = 0\n labelmap = {}\n beginning_stack = [0]\n while len(iq):\n front = iq.pop(0)\n if not utils.is_numeric(front) and front[0] == '~':\n labelmap[front[1:]] = pos - beginning_stack[-1]\n elif front == '#CODE_BEGIN':\n beginning_stack.append(pos)\n elif front == '#CODE_END':\n beginning_stack.pop()\n else:\n mq.append(front)\n if utils.is_numeric(front):\n pos += 1 + max(1, utils.log256(front))\n elif front[:1] == '$':\n pos += label_length + 1\n else:\n pos += 1\n oq = []\n for m in mq:\n oqplus = []\n if utils.is_numeric(m):\n L = max(1, utils.log256(m))\n oqplus.append('PUSH' + str(L))\n oqplus.extend(utils.tobytearr(m, L))\n elif m[:1] == '$':\n vals = m[1:].split('.')\n if len(vals) == 1:\n oqplus.append('PUSH'+str(label_length))\n oqplus.extend(utils.tobytearr(labelmap[vals[0]], label_length))\n else:\n oqplus.append('PUSH'+str(label_length))\n value = labelmap[vals[1]] - labelmap[vals[0]]\n oqplus.extend(utils.tobytearr(value, label_length))\n else:\n oqplus.append(m)\n oq.extend(oqplus)\n return oq\n\n\ndef serialize(source):\n def numberize(arg):\n if utils.is_numeric(arg):\n return arg\n elif arg.upper() in reverse_opcodes:\n return reverse_opcodes[arg.upper()]\n elif arg[:4] == 'PUSH':\n return 95 + int(arg[4:])\n elif re.match('^[0-9]*$', arg):\n return int(arg)\n else:\n raise Exception(\"Cannot serialize: \" + str(arg), source)\n return ''.join(map(chr, map(numberize, source)))\n\n\ndef deserialize(source):\n o = []\n i, j = 0, -1\n while i < len(source):\n p = ord(source[i])\n if j >= 0:\n o.append(p)\n elif p >= 96 and p <= 127:\n o.append('PUSH' + str(p - 95))\n else:\n o.append(opcodes[p][0])\n if j < 0 and p >= 96 and p <= 127:\n j = p - 95\n j -= 1\n i += 1\n return map(utils.tokenify, o)\n\n\ndef assemble(source):\n return serialize(dereference(source))\n\n\ndef compile(source):\n return assemble(compile_lll(rewriter.rewrite_to_lll(parse(source))))\n\n\ndef biject(source, byte):\n c = dereference(compile_lll(compile_to_lll(parse(source))))\n return c[int(byte)].metadata\n\n\ndef encode_datalist(vals):\n def enc(n):\n if utils.is_numeric(n):\n return ''.join(map(chr, utils.tobytearr(n, 32)))\n elif utils.is_string(n) and len(n) == 40:\n return '\\x00' * 12 + n.decode('hex')\n elif utils.is_string(n):\n return '\\x00' * (32 - len(n)) + n\n elif n is True:\n return 1\n elif n is False or n is None:\n return 0\n if isinstance(vals, (tuple, list)):\n return ''.join(map(enc, vals))\n elif vals == '':\n return ''\n else:\n # Assume you're getting in numbers or 0x...\n return ''.join(map(enc, map(utils.numberize, vals.split(' '))))\n\n\ndef decode_datalist(arr):\n if isinstance(arr, list):\n arr = ''.join(map(chr, arr))\n o = []\n for i in range(0, len(arr), 32):\n o.append(utils.frombytes(arr[i:i + 32]))\n return o\n","sub_path":"jaguar/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576718102","text":"#La sal son los 12 primeros caracteres\nfrom crypt import crypt\nf=open('shadow(EJ2Y3)','r')\nlineas=f.readlines()\n\nnombre=str(input(\"Usuario: \"))\ncontraseña=str(input(\"Contraseña: \"))\n\n# Esta es la Sal linea.split(\":\")[1][:12]\nfor linea in lineas:\n\tif nombre==linea.split(\":\")[0]:\n\t\ta=crypt(contraseña,linea.split(\":\")[1][:12])\n\t\tif a==linea.split(\":\")[1]:\n\t\t\tprint(\"Usuario valido. \")\n\t\telse:\n\t\t\tprint(\"Usuario invalido. \")\n\n#probar prueba/asdasd","sub_path":"Marcas/Trabajo3/Ejercicio2.py","file_name":"Ejercicio2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"540611398","text":"from rest_framework import generics\nfrom rest_framework import permissions\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import status\n\nfrom users.serializers import UserFollow\nfrom users.models import User\nfrom users.models import FollowRelation\n\n\n@api_view(http_method_names=['GET'])\n@permission_classes(permission_classes=[permissions.AllowAny])\ndef IsFollow(request, subjectUser, objectUser):\n if request.method == 'GET':\n sUser = User.objects.get(pub_id=subjectUser)\n try:\n sUser.follows.get(follow_user__pub_id=objectUser)\n except FollowRelation.DoesNotExist:\n data = {\n 'result': False\n }\n else:\n data = {\n 'result': True\n }\n\n return Response(data, status.HTTP_200_OK)\n\n\n\n\nclass UserFollowsGetView(generics.ListAPIView):\n permission_classes = (\n permissions.AllowAny,\n )\n serializer_class = UserFollow\n\n def get_queryset(self):\n pub_id = self.request.query_params.get('id', None)\n if pub_id:\n return User.objects.all().filter(pub_id=pub_id)\n else:\n return User.objects.none()\n","sub_path":"users/views/api/follows.py","file_name":"follows.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"556496097","text":"from collections import deque\r\n\r\n\r\ndef polaca(entrada):\r\n entrada = entrada.split()\r\n pila = deque()\r\n operadores = \"+*/-\" \r\n for i in range(0, len(entrada)):\r\n if entrada[i] in operadores:\r\n elDeArriba = pila.pop()\r\n elPenultimo = pila.pop()\r\n operador = entrada[i]\r\n if operador == '+':\r\n pila.append(elPenultimo + elDeArriba)\r\n elif operador == '-':\r\n pila.append(elPenultimo - elDeArriba)\r\n elif operador == '*':\r\n pila.append(elPenultimo * elDeArriba)\r\n else: \r\n pila.append(elPenultimo / elDeArriba)\r\n else:\r\n pila.append(int(entrada[i]))\r\n return pila.pop()\r\n\r\n#-------------------------------------------------------------------------\r\n\r\n#Turns a list into a deque and returns\r\ndef toDeque(lista): \r\n d = deque()\r\n for obj in lista:\r\n d.append(obj)\r\n return d\r\n#Return a list with every solicitud paired with the corresponding fridges \r\ndef asignarSolicitudes(neveras,solicitudes):\r\n result = [] #list to return\r\n neverasPerSolicitud = [] #fridges given to each store\r\n count = 0\r\n while(solicitudes):\r\n sol = solicitudes.popleft() #first in first out\r\n while(count < sol[1] and neveras):\r\n if(neveras):\r\n neverasPerSolicitud.append(neveras.pop()) #first in last out\r\n count = count + 1\r\n else:\r\n break;\r\n result = result + [(sol[0], neverasPerSolicitud)]\r\n neverasPerSolicitud = []\r\n count = 0\r\n return result\r\n \r\n \r\nalmacen = [(1,\"haceb\"), (2,\"lg\"), (3,\"ibm\"), (4,\"haceb\"), \r\n(5,\"lg\"), (6,\"ibm\"),(7,\"haceb\"), (8,\"lg\"), (9,\"ibm\"),(8,\"lg\"), \r\n(9,\"ibm\")] #9 ibm was the last element added (stack)\r\n\r\nsolicitudes = [(\"eafit\", 10), (\"la14\", 2), (\"olimpica\", 4), \r\n(\"éxito\", 1)] #exito was the first element added (queue)\r\n\r\nsolicitudes.reverse() #Since is queue we need to change the order for the toDeque method (queue)\r\n\r\nalmacen = toDeque(almacen)\r\nsolicitudes = toDeque(solicitudes)\r\n\r\nresult = asignarSolicitudes(almacen, solicitudes)\r\nprint(\"\")\r\nfor sol in result:\r\n print(sol)\r\n\r\n\r\n ","sub_path":"talleres/taller08/taller8.py","file_name":"taller8.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42561029","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nModule used to construct mock galaxy populations. \nEach mock factory only has knowledge of a simulation halocat \nand composite model object. \nCurrently only composite HOD models are supported. \n\n\"\"\"\n\nimport numpy as np\nfrom multiprocessing import cpu_count\nfrom copy import copy \nfrom astropy.extern import six\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nfrom astropy.table import Table \n\nfrom .mock_factory_template import MockFactory\nfrom .mock_helpers import three_dim_pos_bundle, infer_mask_from_kwargs\n\nfrom .. import model_helpers, model_defaults\n\ntry:\n from ... import mock_observables\n HAS_MOCKOBS = True\nexcept ImportError:\n HAS_MOCKOBS = False\n\nfrom ...sim_manager import sim_defaults\nfrom ...utils.array_utils import randomly_downsample_data\nfrom ...utils.table_utils import SampleSelector\nfrom ...sim_manager import FakeSim\nfrom ...custom_exceptions import *\n\n\n__all__ = ['HodMockFactory']\n__author__ = ['Andrew Hearin']\n\n\nclass HodMockFactory(MockFactory):\n \"\"\" Class responsible for populating a simulation with a \n population of mock galaxies based on an HOD-style model. \n\n Can be thought of as a factory that takes a model \n and simulation halocat as input, \n and generates a mock galaxy population. \n The returned collection of galaxies possesses whatever \n attributes were requested by the model, such as xyz position, \n central/satellite designation, star-formation rate, etc. \n\n \"\"\"\n\n def __init__(self, populate=True, **kwargs):\n \"\"\"\n Parameters \n ----------\n halocat : object, keyword argument\n Object containing the halo catalog and other associated data. \n Produced by `~halotools.sim_manager.CachedHaloCatalog`\n\n model : object, keyword argument\n A model built by a sub-class of `~halotools.empirical_models.HodModelFactory`. \n\n additional_haloprops : string or list of strings, optional \n Each entry in this list must be a column key of ``halocat.halo_table``. \n For each entry of ``additional_haloprops``, each member of \n `mock.galaxy_table` will have a column key storing this property of its host halo. \n If ``additional_haloprops`` is set to the string value ``all``, \n the galaxy table will inherit every halo property in the catalog. Default is None. \n\n populate : boolean, optional \n If set to ``False``, the class will perform all pre-processing tasks \n but will not call the ``model`` to populate the ``galaxy_table`` \n with mock galaxies and their observable properties. Default is ``True``. \n\n apply_completeness_cut : bool, optional \n If True, only halos passing the mass completeness cut defined in \n `~halotools.empirical_models.model_defaults` will be used to populate the mock. \n Default is True. \n \"\"\"\n\n super(HodMockFactory, self).__init__(populate=populate, **kwargs)\n\n self.preprocess_halo_catalog()\n\n if populate is True:\n self.populate()\n\n def preprocess_halo_catalog(self, apply_completeness_cut = True, **kwargs):\n \"\"\" Method to pre-process a halo catalog upon instantiation of \n the mock object. This pre-processing includes identifying the \n catalog columns that will be used by the model to create the mock, \n building lookup tables associated with the halo profile, \n and possibly creating new halo properties. \n\n Parameters \n ----------\n logrmin : float, optional \n Minimum radius used to build the lookup table for the halo profile. \n Default is set in `~halotools.empirical_models.model_defaults`. \n\n logrmax : float, optional \n Maximum radius used to build the lookup table for the halo profile. \n Default is set in `~halotools.empirical_models.model_defaults`. \n\n Npts_radius_table : int, optional \n Number of control points used in the lookup table for the halo profile.\n Default is set in `~halotools.empirical_models.model_defaults`. \n\n apply_completeness_cut : bool, optional \n If True, only halos passing the mass completeness cut defined in \n `~halotools.empirical_models.model_defaults` will be used to populate the mock. \n Default is True. \n \"\"\"\n\n ################ Make cuts on halo catalog ################\n # Select host halos only, since this is an HOD-style model\n self.halo_table = SampleSelector.host_halo_selection(\n table = self.halo_table)\n\n # make a conservative mvir completeness cut \n # This cut can be controlled by changing sim_defaults.Num_ptcl_requirement\n if apply_completeness_cut is True:\n cutoff_mvir = sim_defaults.Num_ptcl_requirement*self.halocat.particle_mass\n mass_cut = (self.halo_table['halo_mvir'] > cutoff_mvir)\n self.halo_table = self.halo_table[mass_cut]\n\n ############################################################\n\n ### Create new columns of the halo catalog, if applicable\n try:\n d = self.model.new_haloprop_func_dict\n for new_haloprop_key, new_haloprop_func in d.iteritems():\n self.halo_table[new_haloprop_key] = new_haloprop_func(table = self.halo_table)\n self.additional_haloprops.append(new_haloprop_key)\n except AttributeError:\n pass\n\n self.model.build_lookup_tables(**kwargs)\n\n def populate(self, **kwargs):\n \"\"\" Method populating halos with mock galaxies. \n \"\"\"\n self.allocate_memory()\n\n # Loop over all gal_types in the model \n for gal_type in self.gal_types:\n\n # Retrieve the indices of our pre-allocated arrays \n # that store the info pertaining to gal_type galaxies\n gal_type_slice = self._gal_type_indices[gal_type]\n # gal_type_slice is a slice object\n\n # For the gal_type_slice indices of \n # the pre-allocated array self.gal_type, \n # set each string-type entry equal to the gal_type string\n self.galaxy_table['gal_type'][gal_type_slice] = (\n np.repeat(gal_type, self._total_abundance[gal_type],axis=0))\n\n # Store all other relevant host halo properties into their \n # appropriate pre-allocated array \n for halocatkey in self.additional_haloprops:\n self.galaxy_table[halocatkey][gal_type_slice] = np.repeat(\n self.halo_table[halocatkey], self._occupation[gal_type], axis=0)\n\n self.galaxy_table['x'] = self.galaxy_table['halo_x']\n self.galaxy_table['y'] = self.galaxy_table['halo_y']\n self.galaxy_table['z'] = self.galaxy_table['halo_z']\n self.galaxy_table['vx'] = self.galaxy_table['halo_vx']\n self.galaxy_table['vy'] = self.galaxy_table['halo_vy']\n self.galaxy_table['vz'] = self.galaxy_table['halo_vz']\n\n for method in self._remaining_methods_to_call:\n func = getattr(self.model, method)\n gal_type_slice = self._gal_type_indices[func.gal_type]\n func(table = self.galaxy_table[gal_type_slice])\n \n # Positions are now assigned to all populations. \n # Now enforce the periodic boundary conditions for all populations at once\n self.galaxy_table['x'] = model_helpers.enforce_periodicity_of_box(\n self.galaxy_table['x'], self.halocat.Lbox)\n self.galaxy_table['y'] = model_helpers.enforce_periodicity_of_box(\n self.galaxy_table['y'], self.halocat.Lbox)\n self.galaxy_table['z'] = model_helpers.enforce_periodicity_of_box(\n self.galaxy_table['z'], self.halocat.Lbox)\n\n if hasattr(self.model, 'galaxy_selection_func'):\n mask = self.model.galaxy_selection_func(self.galaxy_table)\n self.galaxy_table = self.galaxy_table[mask]\n\n def allocate_memory(self):\n \"\"\" Method allocates the memory for all the numpy arrays \n that will store the information about the mock. \n These arrays are bound directly to the mock object. \n\n The main bookkeeping devices generated by this method are \n ``_occupation`` and ``_gal_type_indices``. \n\n \"\"\"\n\n self.galaxy_table = Table() \n\n # We will keep track of the calling sequence with a list called _remaining_methods_to_call\n # Each time a function in this list is called, we will remove that function from the list\n # Mock generation will be complete when _remaining_methods_to_call is exhausted\n self._remaining_methods_to_call = copy(self.model._mock_generation_calling_sequence)\n\n # Call all composite model methods that should be called prior to mc_occupation \n # All such function calls must be applied to the table, since we do not yet know \n # how much memory we need for the mock galaxy_table\n galprops_assigned_to_halo_table = []\n for func_name in self.model._mock_generation_calling_sequence:\n if 'mc_occupation' in func_name:\n break\n else:\n func = getattr(self.model, func_name)\n func(table = self.halo_table)\n galprops_assigned_to_halo_table_by_func = func._galprop_dtypes_to_allocate.names\n galprops_assigned_to_halo_table.extend(galprops_assigned_to_halo_table_by_func)\n self._remaining_methods_to_call.remove(func_name)\n # Now update the list of additional_haloprops, if applicable\n # This is necessary because each of the above function calls created new \n # columns for the *halo_table*, not the *galaxy_table*. So we will need to use \n # np.repeat inside mock.populate() so that mock galaxies inherit these newly-created columns\n # Since there is already a loop over additional_haloprops inside mock.populate() that does this, \n # then all we need to do is append to this list\n galprops_assigned_to_halo_table = list(set(\n galprops_assigned_to_halo_table))\n self.additional_haloprops.extend(galprops_assigned_to_halo_table)\n self.additional_haloprops = list(set(self.additional_haloprops))\n\n self._occupation = {}\n self._total_abundance = {}\n self._gal_type_indices = {}\n\n first_galaxy_index = 0\n for gal_type in self.gal_types:\n occupation_func_name = 'mc_occupation_'+gal_type\n occupation_func = getattr(self.model, occupation_func_name)\n # Call the component model to get a Monte Carlo\n # realization of the abundance of gal_type galaxies\n self._occupation[gal_type] = occupation_func(table=self.halo_table)\n\n # Now use the above result to set up the indexing scheme\n self._total_abundance[gal_type] = (\n self._occupation[gal_type].sum()\n )\n last_galaxy_index = first_galaxy_index + self._total_abundance[gal_type]\n # Build a bookkeeping device to keep track of \n # which array elements pertain to which gal_type. \n self._gal_type_indices[gal_type] = slice(\n first_galaxy_index, last_galaxy_index)\n first_galaxy_index = last_galaxy_index\n # Remove the mc_occupation function from the list of methods to call\n self._remaining_methods_to_call.remove(occupation_func_name)\n galprops_assigned_to_halo_table_by_func = occupation_func._galprop_dtypes_to_allocate.names\n self.additional_haloprops.extend(galprops_assigned_to_halo_table_by_func)\n \n self.Ngals = np.sum(self._total_abundance.values())\n\n # Allocate memory for all additional halo properties, \n # including profile parameters of the halos such as 'conc_NFWmodel'\n for halocatkey in self.additional_haloprops:\n self.galaxy_table[halocatkey] = np.zeros(self.Ngals, \n dtype = self.halo_table[halocatkey].dtype)\n\n # Separately allocate memory for the galaxy profile parameters\n for galcatkey in self.model.prof_param_keys:\n self.galaxy_table[galcatkey] = 0.\n\n self.galaxy_table['gal_type'] = np.zeros(self.Ngals, dtype=object)\n\n dt = self.model._galprop_dtypes_to_allocate\n for key in dt.names:\n self.galaxy_table[key] = np.zeros(self.Ngals, dtype = dt[key].type)\n\n","sub_path":"halotools/empirical_models/factories/hod_mock_factory.py","file_name":"hod_mock_factory.py","file_ext":"py","file_size_in_byte":12566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"464517922","text":"import json\nimport binascii\nfrom pprint import pprint\nfrom web3 import Web3, HTTPProvider \n\nweb3 = Web3(HTTPProvider('http://localhost:8545'))\n\n\nwith open('config.json') as f:\n conf = json.load(f)\n\npub1 = conf[\"PUB1\"]\npriv1 = \"0x\" + conf[\"PRIV1\"]\n\n\ncontract = web3.eth.contract(\n abi=conf[\"PM_ABI\"],\n bytecode=conf[\"PM_BYTECODE\"])\n\nacct = web3.eth.account.privateKeyToAccount(priv1)\ntxCount = web3.eth.getTransactionCount(acct.address)\n\n\nconstruct_txn = contract.constructor().buildTransaction({\n 'from': acct.address,\n 'nonce': web3.eth.getTransactionCount(acct.address),\n 'gas': 1728712,\n 'gasPrice': web3.toWei('21', 'gwei')})\n\nsigned = acct.signTransaction(construct_txn)\nweb3.eth.sendRawTransaction(signed.rawTransaction)\n\n\n\n","sub_path":"deploy_pm.py","file_name":"deploy_pm.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23776256","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Deals',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('title', models.CharField(max_length=50)),\n ('description', models.CharField(max_length=1000)),\n ('link', models.CharField(max_length=1000, default='#')),\n ('data_created', models.DateTimeField(default=django.utils.timezone.now, editable=False)),\n ('image', models.ImageField(default='/img/top_deals_default.png', upload_to='')),\n ('order', models.PositiveIntegerField(db_index=True, default=0, editable=False)),\n ('created_user', models.ForeignKey(editable=False, related_name='post_created', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Deals',\n 'ordering': ['order'],\n },\n ),\n migrations.CreateModel(\n name='SponsorsAndAds',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('title', models.CharField(max_length=50)),\n ('image', models.ImageField(default='/img/ads_default.png', upload_to='')),\n ],\n ),\n migrations.CreateModel(\n name='TopNews',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('title', models.CharField(max_length=50)),\n ('description', models.CharField(max_length=1000)),\n ('link', models.CharField(max_length=1000, default='#')),\n ('image', models.ImageField(default='/img/news_default.png', upload_to='')),\n ('order', models.PositiveIntegerField(db_index=True, default=0, editable=False)),\n ],\n options={\n 'verbose_name': 'TopNews',\n 'ordering': ['order'],\n },\n ),\n ]\n","sub_path":"news_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16580235","text":"import sys\n\nfrom bokeh.plotting import Figure\nfrom bokeh.layouts import row, column, widgetbox, gridplot\n\nfrom bokeh.models.widgets import PreText, Div\nfrom bokeh.models import PrintfTickFormatter\nfrom dashboard.bokeh.helper import write_info\n\n\nfrom bokeh.io import curdoc\nfrom bokeh.io import output_notebook, show, output_file\n\nfrom bokeh.models import ColumnDataSource, HoverTool, TapTool, Range1d, OpenURL\nfrom bokeh.models import LinearColorMapper, ColorBar\nfrom bokeh.models.widgets import Select, Slider\nfrom dashboard.bokeh.helper import get_url_args, write_description, \\\n get_scalar_metrics\n\nimport numpy as np\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# =============================================\n# THIS comes from INTERFACE\n#\nargs = get_url_args(curdoc)\n\ntry:\n selected_process_id = args['process_id']\n selected_arm = args['arm']\n selected_spectrograph = args['spectrograph']\nexcept:\n sys.exit('Invalid args')\n\n# ============================================\n# THIS READ yaml files\n#\n\ncam = selected_arm+str(selected_spectrograph)\ntry:\n lm = get_scalar_metrics(selected_process_id, cam)\n metrics, tests = lm['results']['metrics'], lm['results']['tests']\nexcept:\n sys.exit('Could not load metrics')\n\nskyresid = metrics['skyresid']\n\n# ============================================\n# THIS: Given the set up in the block above, \n# we have the bokeh plots\n\nskr_tooltip = \"\"\"\n
\n
\n Wavelength: \n @wl Å\n
\n
\n y: \n @med_resid\n
\n
\n\"\"\"\n\nwavg_tooltip = \"\"\"\n
\n
\n Wavelength: \n @wl Å\n
\n
\n y: \n @wavg_resid\n
\n
\n\"\"\"\n\nskr_hover=HoverTool(tooltips=skr_tooltip, mode='vline')\nwavg_hover=HoverTool(tooltips=wavg_tooltip, mode='vline')\n\n\nskyres_source = ColumnDataSource(\n data={'wl': skyresid['WAVELENGTH'],\n 'med_resid' : skyresid['MED_RESID_WAVE'],\n 'wavg_resid': skyresid['WAVG_RES_WAVE']\n })\n\np1 = Figure(title= 'MED_RESID_WAVE', \n x_axis_label='Angstrom',\n plot_width = 720, plot_height = 240,\n tools=[skr_hover,\"pan,box_zoom,reset,crosshair, lasso_select\" ])\n\np1.line('wl', 'med_resid', source=skyres_source)\n\np2 = Figure(title= 'WAVG_RESID_WAVE', \n x_axis_label='Angstrom',\n plot_width = 720, plot_height = 240,\n tools=[wavg_hover,\"pan,box_zoom,reset,crosshair, lasso_select\" ])\n\np2.line('wl', 'wavg_resid', source=skyres_source)\n\n\n'''p1.circle('wl', 'med_resid', source=skyres_source, alpha = 0, size=1,\n hover_alpha=1,\n hover_fill_color='orange', hover_line_color='red') '''\n\n'''p2.circle('wl', 'wavg_resid', source=skyres_source, alpha=0, size=1,\n hover_alpha=1,\n hover_fill_color='orange', hover_line_color='red')''' \n\n\n\np1.x_range = p2.x_range\n\ninfo, nlines = write_info('skyresid', tests['skyresid'])\n\ntxt = PreText(text=info, height=nlines*20, width=p2.plot_width)\ninfo_col=Div(text=write_description('skyresid'), width=p2.plot_width)\np2txt = column(widgetbox(info_col), p1, p2)\n\n#layout=column(p1,p2)\ncurdoc().add_root(p2txt)\ncurdoc().title = \"SKYRESID\"\n","sub_path":"backend/framework/qlf/dashboard/bokeh/qaskyresid/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"137965772","text":"from mongoengine import *\nfrom db import course_db\nfrom db import stream_db\nfrom db import staff_db\nimport json\n\n\ndef create_course_db():\n course_db.add_courses()\n return \"Course database created.\"\n\n\ndef create_stream_db():\n stream_db.create_streams()\n return \"Stream database created.\"\n\n\ndef create_staff_db():\n staff_db.create_staff()\n return \"Staff database created.\"\n\n\ndef get_info(table, keywords, query):\n keywords = [x.upper() for x in keywords]\n query = [x.lower() for x in query]\n if table == 'course':\n return get_course_info(keywords, query)\n elif table == 'stream':\n return get_stream_info(keywords, query)\n elif table == 'staff':\n return get_staff_info(keywords, query)\n return None\n\n\ndef get_course_info(keywords, query):\n connect(host='mongodb://benny:comp9900@ds125912.mlab.com:25912/comp9900')\n table = course_db.Course.objects\n final_result = []\n\n for keyword in keywords:\n result = {}\n for doc in table:\n if doc._id == keyword:\n if len(query) == 0:\n result = json.loads(doc.to_json())\n else:\n info = json.loads(doc.to_json())\n for key in query:\n result[key] = info[key]\n if '_id' in result:\n del result['_id']\n final_result.append(result)\n return final_result\n\n\ndef get_stream_info(keywords, original_query):\n found = False\n connect(host='mongodb://benny:comp9900@ds125912.mlab.com:25912/comp9900')\n table = stream_db.Stream.objects\n final_result = []\n original_query = [x.upper() for x in original_query]\n\n for keyword in keywords:\n result = []\n query = [x for x in original_query]\n for doc in table:\n if doc._id == keyword:\n found = True\n if len(query) == 0:\n result = json.loads(doc.to_json())['areas']\n else:\n areas = json.loads(doc.to_json())['areas']\n while query:\n course = query.pop(0)\n for i in range(len(areas)):\n area = areas[i]['electives']\n for j in range(len(area)):\n if course == area[j]:\n areas[i]['electives'].pop(j)\n areas[i]['number'] = areas[i]['number'] - 1\n break\n\n for k in range(len(areas)):\n if areas[k]['number'] > 0:\n result.append(areas[k])\n if result:\n final_result.append(result)\n else:\n if found:\n final_result.append([True])\n else:\n final_result.append(result)\n return final_result\n\n\ndef get_staff_info(keywords, query):\n connect(host='mongodb://benny:comp9900@ds125912.mlab.com:25912/comp9900')\n table = staff_db.Staff.objects\n final_result = []\n\n for keyword in keywords:\n result = {}\n keyword = staff_db.change_name_format(keyword)\n for doc in table:\n if doc._id == keyword:\n if len(query) == 0:\n result = json.loads(doc.to_json())\n else:\n info = json.loads(doc.to_json())\n for key in query:\n result[key] = info[key]\n if '_id' in result:\n del result['_id']\n final_result.append(result)\n return final_result\n","sub_path":"db/retrieve_info.py","file_name":"retrieve_info.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359107288","text":"import numpy as np \nimport matplotlib.pyplot as plt \nimport matplotlib.animation as animation\nimport matplotlib.lines as mlines\nimport sys\n\nfig = plt.figure()\naxe = fig.add_subplot(3,1,1)\naxe1 = fig.add_subplot(3,1,2)\naxe2 = fig.add_subplot(3,1,3)\n\n\nparent_r = 1\naxe.set_aspect('equal')\naxe.set_xlim([-parent_r - 0.1, parent_r + 0.1])\naxe.set_ylim([-parent_r - 0.1, parent_r + 0.1])\n# axe1.set_aspect('equal')\n# axe1.set_xlim([-parent_r, parent_r])\naxe1.set_ylim([-parent_r - 0.2, parent_r + 0.1])\npatches = []\n\ncircle = plt.Circle((0,0), radius=parent_r, fc='white', ec='black', linewidth=3)\n\nr = parent_r\nbase_angle = 45\ntheta = np.radians(base_angle)\nx = r * np.cos(theta)\ny = r * np.sin(theta)\ncircle2, = axe.plot([x], [y], 'ro')\npatches.append(axe.add_patch(circle))\n\n\n\nsamples = 360\nratemod = 1\ninterval = 1e-3 #* (ratemod % samples if ratemod % samples != 0 else 1)\n\nfreq = np.deg2rad((1)/interval)\nsample_rate = 1\n#sample_rate = 180\nperiods = 8\ntime = -x + np.linspace(0, 2 * periods * np.pi * freq, num = int(samples/ratemod) * periods)/freq\ndata = np.cos(time)\naxe1.plot(time, data, zorder=0)\ncircle3, = axe1.plot([x], [y], 'ro')\nj = 0\nprev_y = 0\ndef update(i = 0):\n global axe, axe1, time, circle, circle2, parent_r, ratemod, sample, j, prev_y\n ret = []\n\n offset = np.radians(ratemod * i)\n r = parent_r\n base_angle = 45\n theta = np.radians(base_angle) + offset\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n circle2.set_data(x, y)\n temp = circle2,\n ret.append(temp)\n\n\n\n circle3.set_data((time[j]), y)\n if (j % sample_rate == 0):\n if (np.sqrt(np.square(y - data[j])) < 0.2):\n tempr, = axe1.plot([time[j]], [data[j]], 'ro')\n prev_y = y\n temp2 = tempr,\n ret.append(temp2)\n circle2.set_data(x, y)\n temp = circle2,\n ret.append(temp)\n\n\n\n\n j = (j + 1)%(time.shape[0])\n\n return ret#[temp, temp2]\n\n\nanim = animation.FuncAnimation(fig, update, frames=samples, interval=interval * 1e3, blit = False, cache_frame_data = False, repeat = True)\n#anim2 = animation.FuncAnimation(fig, update2, frames=360, interval=120, blit = False, cache_frame_data = False, repeat = True)\nplt.show()\nprint(\"\")\n","sub_path":"code version 2/problem5-3.py","file_name":"problem5-3.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"116261610","text":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange\n\nimport tensorflow as tf\n\nimport resnet_model\nimport vgg_preprocessing\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\nfrom tensorflow.contrib.tpu.python.tpu import tpu_estimator\nfrom tensorflow.contrib.tpu.python.tpu import tpu_optimizer\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\n 'master', default_value='local',\n docstring='Location of the master.')\n\ntf.flags.DEFINE_string(\n 'data_dir', default_value='',\n docstring='The directory where the ImageNet input data is stored.')\n\ntf.flags.DEFINE_string(\n 'model_dir', default_value='',\n docstring='The directory where the model will be stored.')\n\ntf.flags.DEFINE_integer(\n 'resnet_size', default_value=50, docstring='The size of the ResNet model to use.')\n\ntf.flags.DEFINE_integer(\n 'train_steps', default_value=200000,\n docstring='The number of steps to use for training.')\n\ntf.flags.DEFINE_boolean(\n 'enable_eval', default_value=True,\n docstring='Flag to enable/disable evaluation.')\n\ntf.flags.DEFINE_integer(\n 'steps_per_eval', default_value=5000,\n docstring='The number of training steps to run between evaluations.')\n\ntf.flags.DEFINE_integer(\n 'train_batch_size', default_value=1024, docstring='Batch size for training.')\n\ntf.flags.DEFINE_integer(\n 'eval_batch_size', default_value=1024, docstring='Batch size for evaluation.')\n\ntf.flags.DEFINE_integer(\"num_shards\", 8, \"Number of shards (TPU chips).\")\n\ntf.flags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations per TPU training loop.\")\n\n\n_LABEL_CLASSES = 1001\n_NUM_CHANNELS = 3\n\n_MOMENTUM = 0.9\n_WEIGHT_DECAY = 1e-4\n\nimage_preprocessing_fn = vgg_preprocessing.preprocess_image\n\n\nclass ImageNetInput(object):\n\n def __init__(self, is_training):\n self.is_training = is_training\n\n def dataset_parser(self, value):\n \"\"\"Parse an Imagenet record from value.\"\"\"\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.parse_single_example(value, keys_to_features)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _NUM_CHANNELS)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n # TODO(shivaniagrawal): height and width of image from model\n image = image_preprocessing_fn(\n image=image,\n output_height=224,\n output_width=224,\n is_training=self.is_training)\n\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32)\n\n return image, tf.one_hot(label, _LABEL_CLASSES)\n\n def __call__(self, params):\n \"\"\"Input function which provides a single batch for train or eval.\"\"\"\n batch_size = params['batch_size']\n\n # Shuffle the filenames to ensure better randomization\n file_pattern = os.path.join(\n FLAGS.data_dir, 'train-*' if self.is_training else 'validation-*')\n dataset = tf.contrib.data.Dataset.list_files(file_pattern)\n if self.is_training:\n dataset = dataset.shuffle(buffer_size=1024)\n\n if self.is_training:\n dataset = dataset.repeat()\n\n def prefetch_dataset(filename):\n dataset = tf.contrib.data.TFRecordDataset(filename, buffer_size=268435456)\n dataset = dataset.prefetch(batch_size)\n return dataset\n\n dataset = dataset.interleave(prefetch_dataset, cycle_length=2)\n\n dataset = dataset.map(\n self.dataset_parser,\n num_parallel_calls=16,\n output_buffer_size=batch_size)\n dataset = dataset.batch(batch_size)\n images, labels = dataset.make_one_shot_iterator().get_next()\n\n images.set_shape(images.get_shape().merge_with(\n tf.TensorShape([batch_size, None, None, None])))\n labels.set_shape(\n labels.get_shape().merge_with(tf.TensorShape([batch_size, None])))\n return images, labels\n\n\ndef metric_fn(labels, logits):\n \"\"\"Evaluation metric Fn.\"\"\"\n predictions = tf.argmax(logits, axis=1)\n accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), predictions)\n return {'accuracy': accuracy}\n\n\ndef piecewise_constant(x, boundaries, values):\n \"\"\"Simulates the behavior of tf.train.piecewise_constant with tf.where.\"\"\"\n piecewise_value = values[0]\n\n for i in xrange(len(boundaries)):\n piecewise_value = tf.where(\n x < boundaries[i], piecewise_value, values[i + 1])\n\n return piecewise_value\n\n\ndef resnet_model_fn(features, labels, mode, params):\n \"\"\"Our model_fn for ResNet to be used with our Estimator.\"\"\"\n network = resnet_model.resnet_v2(\n resnet_size=FLAGS.resnet_size, num_classes=_LABEL_CLASSES)\n\n logits = network(\n inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'classes': tf.argmax(logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n cross_entropy = tf.losses.softmax_cross_entropy(\n logits=logits, onehot_labels=labels)\n\n # Create a tensor named cross_entropy for logging purposes.\n # tf.identity(cross_entropy, name='cross_entropy')\n # tf.summary.scalar('cross_entropy', cross_entropy)\n\n # Add weight decay to the loss. We perform weight decay on all trainable\n # variables, which includes batch norm beta and gamma variables.\n loss = cross_entropy + _WEIGHT_DECAY * tf.add_n(\n [tf.nn.l2_loss(v) for v in tf.trainable_variables()])\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Scale the learning rate linearly with the batch size. When the batch size is\n # 256, the learning rate should be 0.1.\n _INITIAL_LEARNING_RATE = 0.1 * FLAGS.train_batch_size / 256\n\n batches_per_epoch = 1281167 / FLAGS.train_batch_size\n global_step = tf.train.get_or_create_global_step()\n\n # Perform a gradual warmup of the learning rate, as in the paper \"Training\n # ImageNet in 1 Hour.\" Afterward, decay the learning rate by 0.1 at 30, 60,\n # 120, and 150 epochs.\n boundaries = [int(batches_per_epoch * epoch) for epoch in [\n 1, 2, 3, 4, 5, 30, 60, 120, 150]]\n values = [_INITIAL_LEARNING_RATE * decay for decay in [\n 1.0 / 6, 2.0 / 6, 3.0 / 6, 4.0 / 6, 5.0 / 6, 1, 0.1, 0.01, 1e-3, 1e-4]]\n learning_rate = piecewise_constant(global_step, boundaries, values)\n\n # Create a tensor named learning_rate for logging purposes.\n # tf.identity(learning_rate, name='learning_rate')\n # tf.summary.scalar('learning_rate', learning_rate)\n\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=learning_rate,\n momentum=_MOMENTUM)\n optimizer = tpu_optimizer.CrossShardOptimizer(optimizer)\n\n # Batch norm requires update_ops to be added as a train_op dependency.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step)\n else:\n train_op = None\n\n eval_metrics = None\n if mode == tf.estimator.ModeKeys.EVAL:\n eval_metrics = (metric_fn, [labels, logits])\n\n return tpu_estimator.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n eval_metrics=eval_metrics)\n\n\ndef main(unused_argv):\n config = tpu_config.RunConfig(\n master=FLAGS.master,\n evaluation_master=FLAGS.master,\n model_dir=FLAGS.model_dir,\n tpu_config=tpu_config.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_shards))\n resnet_classifier = tpu_estimator.TPUEstimator(\n model_fn=resnet_model_fn,\n config=config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size)\n\n if FLAGS.enable_eval:\n for cycle in range(FLAGS.train_steps // FLAGS.steps_per_eval):\n tf.logging.info('Starting a training cycle.')\n resnet_classifier.train(\n input_fn=ImageNetInput(True), steps=FLAGS.steps_per_eval)\n\n _EVAL_STEPS = 50000 // FLAGS.eval_batch_size\n tf.logging.info('Starting to evaluate.')\n eval_results = resnet_classifier.evaluate(\n input_fn=ImageNetInput(False), steps=_EVAL_STEPS)\n tf.logging.info('Eval results: %s' % eval_results)\n\n else:\n tf.logging.info('Starting training.')\n resnet_classifier.train(\n input_fn=ImageNetInput(True), steps=FLAGS.train_steps)\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n\n","sub_path":"cloud_tpu/models/resnet_garden/resnet_main.py","file_name":"resnet_main.py","file_ext":"py","file_size_in_byte":9917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"625292628","text":"import csv\nimport json\nimport openpyxl\nimport typing\n\n\nFile = typing.TextIO\n\n\ndef merge_students_data(csv_file: File, xlsx_workbook, json_file: File) \\\n -> typing.NoReturn:\n workbook = xlsx_workbook\n students_csv = csv.DictReader(csv_file)\n sheet = workbook[workbook.sheetnames[0]]\n\n further_json = {}\n\n for number_of_row in range(len(sheet['A'])):\n further_json[sheet[f'A{number_of_row + 1}'].value] = {\n 'marks': [mark.value for mark in sheet[str(number_of_row + 1)] if\n isinstance(mark.value, int) and not isinstance(mark.value, bool)]\n } # if value is int\n\n for student in students_csv:\n name = f'{student[\"first_name\"]} {student[\"last_name\"]}'\n\n further_json[name]['age'] = int(student['age'])\n\n json.dump(further_json, json_file)\n","sub_path":"main/dz3/zadacza2.py","file_name":"zadacza2.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"227756094","text":"import pandas as pd\n\n\nimport numpy as np\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n# from xgboost.sklearn import XGBRegressor\nfrom sklearn import preprocessing\nimport xgboost as xgb\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.metrics import mean_absolute_error\n\ndef msle(preds, dtrain):\n labels = dtrain.get_label()\n # return a pair metric_name, result. The metric name must not contain a\n # colon (:) or a space since preds are margin(before logistic\n # transformation, cutoff at 0)\n return 'my-error', mean_squared_log_error(labels,preds)\n\n# Read the data\ndata = pd.read_csv(\"../Train/train.csv\")\ntest_data = pd.read_csv(\"../test.csv\")\nprint(data.head())\nprint(test_data.head())\nprint(data.columns)\n\nprint(data.loan_enq.unique())\ndata['loan_enq'].fillna('N',inplace=True)\ntest_data['loan_enq'].fillna('N',inplace=True)\nprint(data.dtypes)\n\n# Encode the categorical variables\nle = preprocessing.LabelEncoder()\nle.fit(data['account_type'])\nprint(le.classes_)\ndata['account_type'] = le.transform(data['account_type'])\ntest_data['account_type'] = le.transform(test_data['account_type'])\n\nle1 = preprocessing.LabelEncoder()\nle1.fit(data['gender'])\nprint(le1.classes_)\ndata['gender'] = le1.transform(data['gender'])\ntest_data['gender'] = le1.transform(test_data['gender'])\n\nle2 = preprocessing.LabelEncoder()\nle2.fit(data['loan_enq'])\nprint(le2.classes_)\ndata['loan_enq'] = le2.transform(data['loan_enq'])\ntest_data['loan_enq'] = le2.transform(test_data['loan_enq'])\n\nprint(data.dtypes)\n#Build an xgboost model on raw data\ny = data['cc_cons']\n# X = data.iloc[,2:43]\nX = data.iloc[:, 1:43]\nprint(X.columns)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\ndtrain = xgb.DMatrix(X_train, label=y_train)\ndtest = xgb.DMatrix(X_test, label=y_test)\n\nY = test_data.iloc[:,1:43]\ndfinal_test = xgb.DMatrix(Y)\n# params = {'colsample_bylevel': 0.7, 'colsample_bytree': 0.7, 'learning_rate': 0.1,\n# 'max_depth': 10, 'min_child_weight': 1, 'n_estimators': 20,\n# 'objective': 'reg:linear', 'scale_pos_weight': 1, 'subsample': 1.0}\n\n\n\n#\n# best_xgb_model = XGBRegressor(n_estimators=10, learning_rate=0.08, gamma=0, subsample=0.75,\n# colsample_bytree=1, max_depth=4,objective=\"reg:squarederror\",verbosity = 0)\n\n# best_xgb_model.fit(X_train,y_train)\n\n########################################\n\n# \"Learn\" the mean from the training data\nmean_train = np.mean(y_train)\n# Get predictions on the test set\nbaseline_predictions = np.ones(y_test.shape) * mean_train\n# Compute MAE\nmae_baseline = mean_absolute_error(y_test, baseline_predictions)\nprint(\"Baseline MSLE is {:.2f}\".format(mae_baseline))\n\n#########################################\n\n# param = {'max_depth': 2, 'eta': 1, 'silent': 1}\n# watchlist = [(dtest, 'eval'), (dtrain, 'train')]\n# num_round = 2\n# bst = xgb.train(param, dtrain, num_round, watchlist,\n# feval=\"msle\")\n\nparams = {\n # Parameters that we are going to tune.\n 'max_depth':6,\n 'min_child_weight': 1,\n 'eta':.3,\n 'subsample': 1,\n 'colsample_bytree': 1,\n # Other parameters\n 'objective':'reg:squarederror'\n}\nnum_boost_round = 100\n\nmodel = xgb.train(\n params,\n dtrain,\n num_boost_round=num_boost_round,\n evals=[(dtest, \"Test\")],\n early_stopping_rounds=10,\n feval=msle\n)\nprint(\"Best MSLE: {:.2f} with {} rounds\".format(\n model.best_score,\n model.best_iteration+1))\n\n# cv_results = xgb.cv(\n# params,\n# dtrain,\n# num_boost_round=num_boost_round,\n# seed=42,\n# nfold=5,\n# metrics={'mae'},\n# early_stopping_rounds=10\n# )\n#\n# print(cv_results)\n#\n# print(cv_results['test-mae-mean'].min())\n#\n\npredictions = model.predict(dtest)\nresult= mean_squared_log_error(predictions,y_test)\nprint(result)\n\n\n# Predict on final data\nfinal_predictions = model.predict(dfinal_test)\nprint(final_predictions)\nfinal_submission_data = pd.DataFrame({'id':test_data.id,'cc_cons':final_predictions})\nprint(final_submission_data.head())\nfinal_submission_data.to_csv(\"../Submissions/test_submission.csv\",index=False)","sub_path":"AMExpert/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348690342","text":"\"\"\"\nCheck the given number is prime or not using JAVA regex.\n\nInput:\nFirst line consists of T test cases. Only line of every test case consists of an integer N.\n\nOutput:\nPrint \"1\" if the given number statement is prime else 0.\n\nConstraints:\n1<=T<=100\n1<=N<=1000\n\nExample:\nInput:\n2\n3\n4\n\nOutput:\n1\n0\n\"\"\"\n\n\ndef prime_number_validation(n):\n count = 0\n for i in range(2, n // 2 + 1):\n if (n % i) == 0:\n count = 1\n break\n if count == 1:\n return \"0\"\n else:\n return \"1\"\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n n = int(input())\n print(prime_number_validation(n))\n","sub_path":"practice/school/prime_number_validation.py","file_name":"prime_number_validation.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"149208214","text":"def is_prime():\r\n try:\r\n cont=0\r\n for i in range(1,num+1):\r\n if num%i==0:\r\n cont+=1\r\n if cont==2:\r\n print(\"1\")\r\n else:\r\n print(\"0\")\r\n except:\r\n print(\"-1\")\r\nis_prime()\r\n\r\nwhile True:\r\n\tnum=int(input(\"Digite un numero: \"))\r\nif num<=0:\r\n print(\"Ingrese un numero distinto\")\r\n break\r\nelse:\r\n\tcont+=1\r\n\tprint(\"El total de primos encontrados fue :\") ,cont ","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422347522","text":"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is SearchSpace for backbones.\"\"\"\nfrom vega.search_space.fine_grained_space.fine_grained_space import FineGrainedSpace\nfrom vega.core.common.class_factory import ClassFactory, ClassType\nfrom vega.search_space.fine_grained_space.blocks import InitialBlock\nfrom vega.search_space.fine_grained_space.conditions import Append, Sequential, Map, Tuple\nfrom vega.search_space.fine_grained_space.operators import op\nfrom vega.search_space.utils import get_search_space\n\n\n@ClassFactory.register(ClassType.SEARCH_SPACE)\nclass ResNetDet(FineGrainedSpace):\n \"\"\"Create ResNet_Det SearchSpace.\n\n As the backbone of the faster-RCNN inspection network, the fully connected layer is removed,\n the freeze part of the layer is supported, and the output of multiple layers is supported as input to the neck.\n \"\"\"\n\n _block_setting = {18: ('BasicBlock', [2, 2, 2, 2]),\n 34: ('BasicBlock', [3, 4, 6, 3]),\n 50: ('BottleneckBlock', [3, 4, 6, 3]),\n 101: ('BottleneckBlock', [3, 4, 23, 3])}\n\n def constructor(self, depth, block=None, items=None, num_class=10, num_reps=4, frozen_stages=-1):\n \"\"\"Create layers.\n\n :param num_reps: number of layers\n :type num_reqs: int\n :param items: channel and stride of every layer\n :type items: dict\n :param num_class: number of class\n :type num_class: int\n \"\"\"\n if depth in self._block_setting.keys():\n block = dict()\n block['type'] = self._block_setting[depth][0]\n block_cls = get_search_space(self._block_setting[depth][0])\n num_reps = self._block_setting[depth][1]\n else:\n block_cls = get_search_space(block.get('type'))\n self.init_block = InitialBlock(init_plane=64)\n blocks = []\n in_planes = 64\n for i, num_blocks in enumerate(num_reps):\n seq = Sequential()\n out_planes = 64 * 2 ** i\n stride = 1 if i == 0 else 2\n seq.add(block_cls(inchannel=in_planes, outchannel=out_planes, groups=1, stride=stride, base_width=64))\n in_planes = out_planes * block_cls.expansion\n for idx in range(1, num_blocks):\n seq.add(block_cls(inchannel=in_planes, outchannel=out_planes, groups=1, stride=1, base_width=64))\n if i == frozen_stages:\n seq.freeze(True)\n blocks.append(seq)\n self.blocks = Append(*tuple(blocks))\n\n\n@ClassFactory.register(ClassType.SEARCH_SPACE)\nclass RPNHead(FineGrainedSpace):\n \"\"\"RpnHead.\"\"\"\n\n def constructor(self, in_channels=256, feat_channels=256, num_classes=2):\n \"\"\"Create rpn Search Space.\"\"\"\n anchor_scales = [8, 16, 32]\n anchor_ratios = [0.5, 1.0, 2.0]\n num_anchors = len(anchor_ratios) * len(anchor_scales)\n if feat_channels > 0:\n conv = op.Conv2d(in_channels=in_channels, out_channels=feat_channels, kernel_size=3, padding=1)\n relu = op.ReLU(inplace=True)\n rpn_cls_conv = op.Conv2d(in_channels=feat_channels, out_channels=num_anchors * num_classes, kernel_size=1)\n rpn_reg_conv = op.Conv2d(in_channels=feat_channels, out_channels=num_anchors * 4, kernel_size=1)\n rpn_cls = Sequential(conv, relu, rpn_cls_conv)\n rpn_reg = Sequential(conv, relu, rpn_reg_conv)\n else:\n rpn_cls = op.Conv2d(in_channels=in_channels, out_channels=num_anchors * num_classes, kernel_size=1)\n rpn_reg = op.Conv2d(in_channels=in_channels, out_channels=num_anchors * 4, kernel_size=1)\n self.rpn = Tuple(Map(rpn_cls), Map(rpn_reg))\n","sub_path":"built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/search_space/fine_grained_space/blocks/backbones.py","file_name":"backbones.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287362516","text":"#!usr/bin/env python\nimport sys\n\ndef main():\n\n with open(sys.argv[1], \"w\") as col1, open(sys.argv[2], \"w\") as col2:\n for line in sys.stdin:\n col = line.rstrip('\\n').split('\\t')\n col1.write(col[0] + '\\n')\n col2.write(col[1] + '\\n')\n\nif __name__ == '__main__':\n main() ","sub_path":"exercises/ex01/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"235954110","text":"\"\"\"Get pandas dataframes for a given data and month.\n\n*get_dataframes(csvfile, spec=SPEC)* is a lower-level function to get\ndataframes from *csvfile* connection under *spec* parsing instruction.\n\n*Vintage* class addresses dataset by year and month:\n\n Vintage(year, month).save()\n Vintage(year, month).validate()\n\nThese calls should give similar results:\n\n csv_path = PathHelper.locate_csv(year, month)\n csvfile = open_csv(csv_path)\n\n Vintage(year, month).dfs()\n\n*Collection* manipulates all datasets, released at various dates:\n\n Collection.save_all()\n Collection.save_latest()\n Collection.approve_latest()\n Collection.approve_all()\n\"\"\"\nimport pandas as pd\n\nfrom config import DateHelper, PathHelper\nfrom csv2df.specification import SPEC\nfrom csv2df.reader import Reader, open_csv\nfrom csv2df.parser import extract_tables\nfrom csv2df.emitter import Emitter\nfrom csv2df.validator import Validator\n\n\n__all__ = ['get_dataframes', 'Vintage', 'Collection']\n\n\ndef get_dataframes(csvfile, spec=SPEC):\n \"\"\"Extract dataframes from *csvfile* using *spec* parsing instructions.\n\n Args:\n csvfile (file connection or StringIO) - CSV file for parsing\n spec (spec.Specification) - pasing instructions, defaults to spec.SPEC\n\n Returns:\n Three pandas dataframes at annual, qtr and monthly frequencies.\n \"\"\"\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()\n for t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n dfa = emitter.get_dataframe(freq='a')\n dfq = emitter.get_dataframe(freq='q')\n dfm = emitter.get_dataframe(freq='m')\n return dfa, dfq, dfm\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n csv_path = PathHelper.locate_csv(year, month)\n with open_csv(csv_path) as csvfile:\n self.dfa, self.dfq, self.dfm = get_dataframes(csvfile)\n\n def dfs(self):\n \"\"\"Shorthand for obtaining three dataframes.\"\"\"\n return self.dfa, self.dfq, self.dfm\n\n def save(self):\n folder_path = PathHelper.get_processed_folder(self.year, self.month)\n self.dfa.to_csv(folder_path / 'dfa.csv')\n self.dfq.to_csv(folder_path / 'dfq.csv')\n self.dfm.to_csv(folder_path / 'dfm.csv')\n print(\"Saved dataframes to\", folder_path)\n return True\n\n def validate(self):\n checker = Validator(self.dfa, self.dfq, self.dfm)\n checker.run()\n print(\"Test values parsed OK for\", self)\n return True\n\n def __repr__(self):\n return \"Vintage({}, {})\".format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n\n all_dates = DateHelper.get_supported_dates()\n\n @staticmethod\n def save_latest():\n year, month = DateHelper.get_latest_date()\n latest_vintage = Vintage(year, month)\n latest_vintage.save()\n\n @staticmethod\n def approve_latest():\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n year, month = DateHelper.get_latest_date()\n latest_vintage = Vintage(year, month)\n latest_vintage.validate()\n\n @staticmethod\n def save_all():\n for (year, month) in Collection.all_dates:\n Vintage(year, month).save()\n\n @staticmethod\n def approve_all():\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in Collection.all_dates:\n print(\"Checking\", year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\nif __name__ == \"__main__\":\n # Collection calls\n # Collection.approve_latest()\n # Collection.approve_all()\n Collection.save_latest()\n # Collection.save_all()\n\n # sample Vintage call\n year, month = 2017, 5\n vint = Vintage(year, month)\n vint.validate()\n dfa, dfq, dfm = vint.dfs()\n\n from io import StringIO\n s = dfa.to_csv()\n dx = pd.read_csv(StringIO(s))\n","sub_path":"src/csv2df/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"651703053","text":"'''\r\nCreated on Apr 16, 2020\r\n\r\n@author: shan.jiang\r\n'''\r\nimport requests,re,shutil,database,time,random\r\n\r\n\r\n\r\n#Navigating start page to find expansion list\r\nstarting_URL = \"https://hearthstone.gamepedia.com/Expansion\"\r\nstarting_HTML = str(requests.get(starting_URL).content)\r\ntable_match = re.compile(r\"Year(.*?)\",re.S|re.I).findall(starting_HTML)\r\ntable_HTML = table_match[0]\r\nexpansion_matches = re.compile(r\"(.*?)data page\",re.S|re.I).findall(card_HTML)\r\n if len(block_match)>0:\r\n block_HTML = block_match[0]\r\n \r\n #Type\r\n type_match = re.compile(r\"Type:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(type_match)>0:\r\n card_type = re.sub(\"<.*?>\",\"\",type_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Card Class\r\n class_match = re.compile(r\"Class:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(class_match)>0:\r\n card_class = re.sub(\"<.*?>\",\"\",class_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Race\r\n race_match = re.compile(r\"Subtype:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(race_match)>0:\r\n race = re.sub(\"<.*?>\",\"\",race_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Cardset\r\n set_match = re.compile(r\"Set:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(set_match)>0:\r\n cardset = re.sub(\"<.*?>\",\"\",set_match[0]).replace(\"\\\\n\",\"\").replace(\"\\\\'\",\"'\").strip()\r\n \r\n #Rarity\r\n rarity_match = re.compile(r\"Rarity:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(rarity_match)>0:\r\n rarity = re.sub(\"<.*?>\",\"\",rarity_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Cost\r\n cost_match = re.compile(r\"Cost:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(cost_match)>0:\r\n cost = re.sub(\"<.*?>\",\"\",cost_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Attack\r\n attack_match = re.compile(r\"Attack:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(attack_match)>0:\r\n attack = re.sub(\"<.*?>\",\"\",attack_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Health\r\n health_match = re.compile(r\"Health:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(health_match)>0:\r\n health = re.sub(\"<.*?>\",\"\",health_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Durability\r\n durability_match = re.compile(r\"Durability:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(durability_match)>0:\r\n durability = re.sub(\"<.*?>\",\"\",durability_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Abilities\r\n abilities_match = re.compile(r\"Abilities:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(card_HTML)\r\n if len(abilities_match)>0:\r\n abilities = re.sub(\"<.*?>\",\"\",abilities_match[0]).replace(\"\\\\n\",\"\").replace(\"\\\\'\",\"'\").strip().split(\",\")\r\n\r\n \r\n #Tags\r\n tags_match = re.compile(r\"Tags:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(card_HTML)\r\n if len(tags_match)>0:\r\n tags = re.sub(\"<.*?>\",\"\",tags_match[0]).replace(\"\\\\n\",\"\").strip().replace(\"\\\\'\",\"'\").split(\",\")\r\n\r\n \r\n #Artist\r\n artist_match = re.compile(r\"Artist:(.*?)<\\/td><\\/tr>\",re.S|re.I).findall(block_HTML)\r\n if len(artist_match)>0:\r\n artist = re.sub(\"<.*?>\",\"\",artist_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Card Text\r\n card_text_match = re.compile(r\"

(.*?)<\\/p>\",re.S|re.I).findall(block_HTML)\r\n if len(card_text_match)>0:\r\n card_text = re.sub(\"<.*?>\",\"\",card_text_match[0]).replace(\"\\\\n\",\"\").strip()\r\n \r\n #Back Text\r\n back_text_match = re.compile(r\"(.*?)<\\/i>\",re.S|re.I).findall(block_HTML)\r\n if len(back_text_match)>0:\r\n back_text = re.sub(\"<.*?>\",\"\",back_text_match[0]).replace(\"\\\\n\",\"\").replace(\"\\\\'\",\"'\").strip()\r\n \r\n #Regular and Gold Images\r\n image_url_match = re.compile(r\"(https:\\/\\/gamepedia.cursecdn.com\\/hearthstone_gamepedia\\/thumb.*?)\\\" decoding=\\\"async\\\"\",re.S|re.I).findall(block_HTML)\r\n if len(image_url_match)>0:\r\n reg_image_url = re.sub(\"<.*?>\",\"\",image_url_match[0]).strip()\r\n if len(image_url_match)>1:\r\n gold_image_url = re.sub(\"<.*?>\",\"\",image_url_match[1]).strip()\r\n \r\n \r\n #Craft and Disenchant Cost\r\n craft_match = re.compile(r\"Disenchanting.*?(\\d{1,4}).*?>(\\d{1,4})\",re.S|re.I).findall(card_HTML)\r\n if len(craft_match)>0:\r\n craft_cost=craft_match[0][0]\r\n disenchant_cost=craft_match[0][1]\r\n \r\n #Lore\r\n lore_match = re.compile(r\"id=\\\"Lore\\\">.*?

(.*?)<\\/p>\",re.S|re.I).findall(block_HTML)\r\n if len(lore_match)>0:\r\n lore = re.sub(\"<.*?>\",\"\",lore_match[0]).replace(\"\\\\n\",\"\").replace(\"\\\\'\",\"'\").strip()\r\n \r\n #Full Image\r\n full_image_url_match = re.compile(r\"\\\"\\\"\\s+src=\\\"(https:\\/\\/gamepedia.cursecdn.com\\/hearthstone_gamepedia.*?)\\\"0:\r\n full_image_url = re.sub(\"<.*?>\",\"\",full_image_url_match[0]).strip()\r\n \r\n try:\r\n database.insert_card(card_name=card_name,\\\r\n card_type=card_type,\\\r\n card_class=card_class,\\\r\n race=race,\\\r\n cardset=cardset,\\\r\n rarity=rarity,\\\r\n cost=cost,\\\r\n attack=attack,\\\r\n health=health,\\\r\n durability=durability,\\\r\n craft_cost=craft_cost,\\\r\n disenchant_cost=disenchant_cost,\\\r\n artist=artist,\\\r\n card_text=card_text,\\\r\n back_text=back_text,\r\n lore=lore) \r\n \r\n for ability in abilities:\r\n database.insert_ability(card_name,ability)\r\n \r\n for tag in tags:\r\n database.insert_tag(card_name,tag)\r\n \r\n except:\r\n print(\"Failed inserting card: \"+card_name)\r\n print(\"Failed inserting card: \"+card_name,file=error_log)\r\n \r\n #Download Images: \r\n try:\r\n response = requests.get(reg_image_url, stream=True)\r\n reg_image_file = open('images/card_images/'+card_name.replace(\":\",\"\")+'.png', 'wb')\r\n response.raw.decode_content = True\r\n shutil.copyfileobj(response.raw, reg_image_file)\r\n except:\r\n print(\"Download regular image failed for: \"+card_name)\r\n print(\"Download regular image failed for: \"+card_name,file=error_log)\r\n \r\n try: \r\n response = requests.get(gold_image_url, stream=True)\r\n gold_image_file = open('images/card_images/'+card_name.replace(\":\",\"\")+'_gold.png', 'wb')\r\n response.raw.decode_content = True\r\n shutil.copyfileobj(response.raw, gold_image_file)\r\n except:\r\n print(\"Download gold image failed for: \"+card_name)\r\n print(\"Download gold image failed for: \"+card_name,file=error_log)\r\n \r\n try:\r\n response = requests.get(full_image_url, stream=True)\r\n full_image_file = open('images/card_images/'+card_name.replace(\":\",\"\")+'_full.png', 'wb')\r\n response.raw.decode_content = True\r\n shutil.copyfileobj(response.raw, full_image_file)\r\n except:\r\n print(\"Download full image failed for: \"+card_name)\r\n print(\"Download full image failed for: \"+card_name,file=error_log)\r\n \r\n error_log.close()\r\n time.sleep(random.uniform(1,5))\r\n \r\n \r\n ","sub_path":"gamepedia_crawler.py","file_name":"gamepedia_crawler.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"137239953","text":"from pytrinamic.evalboards import TMCLEval\nfrom pytrinamic.ic import TMC2240\nfrom pytrinamic.features import MotorControlModule\nfrom pytrinamic.helpers import BitField\n\n\nclass TMC2240_eval(TMCLEval):\n \"\"\"\n This class represents a TMC2240 Evaluation board.\n\n Communication is done over the TMCL commands writeMC and readMC. An\n implementation without TMCL may still use this class if these two functions\n are provided properly. See __init__ for details on the function\n requirements.\n \"\"\"\n def __init__(self, connection, module_id=1):\n \"\"\"\n Parameters:\n connection:\n Type: class\n A class that provides the necessary functions for communicating\n with a TMC2240. The required functions are\n connection.writeMC(registerAddress, value, moduleID)\n connection.readMC(registerAddress, moduleID, signed)\n for writing/reading to registers of the TMC2240.\n module_id:\n Type: int, optional, default value: 1\n The TMCL module ID of the TMC2240. This ID is used as a\n parameter for the writeMC and readMC functions.\n \"\"\"\n TMCLEval.__init__(self, connection, module_id)\n self.motors = [self._MotorTypeA(self, 0)]\n self.ics = [TMC2240()]\n\n # Use the motion controller functions for register access\n\n def write_register(self, register_address, value):\n return self._connection.read_drv(register_address, value, self._module_id)\n\n def read_register(self, register_address, signed=False):\n return self._connection.read_drv(register_address, self._module_id, signed)\n\n def write_register_field(self, field, value):\n return self.write_register(field[0], BitField.field_set(self.read_register(field[0]),\n field[1], field[2], value))\n\n def read_register_field(self, field):\n return BitField.field_get(self.read_register(field[0]), field[1], field[2])\n\n # Motion control functions\n\n def rotate(self, motor, value):\n self._connection.rotate(motor, value)\n \n def stop(self, motor):\n self._connection.stop(motor)\n \n def move_to(self, motor, position, velocity=None):\n if velocity and velocity != 0:\n # Set maximum positioning velocity\n self.motors[motor].set_axis_parameter(self.motors[motor].AP.MaxVelocity, velocity)\n self._connection.move_to(motor, position, self._module_id)\n\n class _MotorTypeA(MotorControlModule):\n def __init__(self, eval_board, axis):\n MotorControlModule.__init__(self, eval_board, axis, self.AP)\n\n class AP:\n TargetPosition = 0\n ActualPosition = 1\n TargetVelocity = 2\n ActualVelocity = 3\n MaxVelocity = 4\n MaxAcceleration = 5\n MaxCurrent = 6\n StandbyCurrent = 7\n PositionReachedFlag = 8\n THIGH = 26\n HighSpeedChopperMode = 28\n HighSpeedFullstepMode = 29\n MeasuredSpeed = 30\n internal_Rsense = 34\n GlobalCurrentScaler = 35\n MicrostepResolution = 140\n ChopperBlankTime = 162\n ConstantTOffMode = 163\n DisableFastDecayComparator = 164\n ChopperHysteresisEnd = 165\n ChopperHysteresisStart = 166\n TOff = 167\n SEIMIN = 168\n SECDS = 169\n smartEnergyHysteresis = 170\n SECUS = 171\n smartEnergyHysteresisStart = 172\n SG2FilterEnable = 173\n SG2Threshold = 174\n smartEnergyActualCurrent = 180\n smartEnergyStallVelocity = 181\n smartEnergyThresholdSpeed = 182\n SG4FilterEnable = 183\n SGAngleOffset = 184\n ChopperSynchronization = 185\n PWMThresholdSpeed = 186\n PWMGrad = 187\n PWMAmplitude = 188\n PWMFrequency = 191\n PWMAutoscale = 192\n PWMScaleSum = 193\n MSCNT = 194\n MEAS_SD_EN = 195\n DIS_REG_STST = 196\n FreewheelingMode = 204\n LoadValue = 206\n EncoderPosition = 209\n EncoderResolution = 210\n CurrentScalingSelector = 211\n CurrentRange = 212\n ADCTemperature = 213\n ADCIN = 214\n ADCSupply = 215\n ADCOvervoltageLimit = 216\n ADCOvertemperatureWarningLimit = 217\n Temperature = 218\n AIN = 219\n VSupply = 220\n OvervoltageLimit = 221\n OvertemperatureWarningLimit = 222\n nSLEEP = 223\n","sub_path":"pytrinamic/evalboards/TMC2240_eval.py","file_name":"TMC2240_eval.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441333886","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport mpl_toolkits.mplot3d.axes3d\r\n\r\ndef sample_nsphere(n):\r\n angles = np.random.normal(0, 1, n)\r\n angles_norm = np.linalg.norm(angles, 2)\r\n nsphere_sample = angles / angles_norm\r\n return nsphere_sample\r\n\r\n\r\ndef generate_random_basis(dim=3, allow_vertical=False, seed=42):\r\n np.random.seed(seed)\r\n\r\n # Generate random basis for hyperplane in d-dim space. Time complexity O(d^2).\r\n # Algorithm Monte-Carlo Graham Smith Orthogonalization\r\n m_v = []\r\n up_vec = np.zeros(dim)\r\n up_vec[dim - 1] = 1\r\n for i in range(0, dim):\r\n ei = sample_nsphere(dim)\r\n\r\n while True:\r\n is_vertical = math.isclose(0.8, np.abs(np.dot(ei, up_vec))) # Avoid close-to-vertical hyperplane\r\n if allow_vertical or not is_vertical:\r\n is_orthogonalizable = True # Graham Smith Orthogonalization\r\n for j in range(0, i):\r\n proj = np.dot(ei, m_v[j])*m_v[j]\r\n diff = ei - proj\r\n ei = diff\r\n if math.isclose(0, np.linalg.norm(ei, ord=2)):\r\n is_orthogonalizable = False\r\n break\r\n norm = ei / np.linalg.norm(ei, ord=2)\r\n ei = norm\r\n if is_orthogonalizable is True:\r\n break\r\n ei = sample_nsphere(dim)\r\n m_v.append(ei)\r\n\r\n return np.array(m_v).transpose()\r\n\r\nif __name__==\"__main__\":\r\n hyperplane_d_1 = np.random.uniform(-5, 5, (30, 3))\r\n hyperplane_d_1[0:, 2] = 0\r\n\r\n m_v = generate_random_basis(3, seed=3288)\r\n m_e = np.linalg.inv(m_v)\r\n\r\n transformed = np.matmul(hyperplane_d_1, m_e.transpose())\r\n #transformed = transformed + np.array([3, 5, 0])\r\n\r\n p0 = m_v[0]\r\n p1 = m_v[1]\r\n p2 = m_v[2]\r\n\r\n origin = [0, 0, 0]\r\n X, Y, Z = zip(origin, origin, origin)\r\n U, V, W = zip(p0*10, p1*10, p2*10)\r\n\r\n normal = p2\r\n d = 0\r\n xx, yy = np.meshgrid(range(-7, 7), range(-7, 7))\r\n z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]\r\n\r\n plt3d = plt.figure().gca(projection='3d')\r\n plt3d.plot_surface(xx, yy, z, alpha=0.2)\r\n plt3d.plot(transformed[0:, 0], transformed[0:, 1], transformed[0:, 2], 'go')\r\n plt3d.plot(hyperplane_d_1[0:, 0], hyperplane_d_1[0:, 1], hyperplane_d_1[0:, 2], 'mo')\r\n plt3d.quiver(X, Y, Z, U, V, W, color=\"red\")\r\n plt3d.set_xlim([-5, 5])\r\n plt3d.set_ylim([-5, 5])\r\n plt3d.set_zlim([-5, 5])\r\n plt.show()","sub_path":"mlp_tests/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"307674092","text":"\"\"\"\r\n\r\nModel: Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors (SARIMAX)\r\nMethod: Maximum Likelihood Estimation (MLE) via Kalman filter\r\n\r\nDataset: Monthly sunspots\r\nTask: Dynamic Forecasting of Univariate Time Series (Univariate Regression)\r\n\r\n Author: Ioannis Kourouklides, www.kourouklides.com\r\n License:\r\n https://github.com/kourouklides/artificial_neural_networks/blob/master/LICENSE\r\n\r\n\"\"\"\r\n# %%\r\n# IMPORTS\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n# standard library imports\r\nimport argparse\r\nfrom math import sqrt\r\nimport os\r\nimport random as rn\r\nfrom timeit import default_timer as timer\r\n\r\n# third-party imports\r\nimport numpy as np\r\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\r\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\r\n\r\n# %%\r\n\r\n\r\ndef sarimax_sunspots(args):\r\n \"\"\"\r\n Main function\r\n \"\"\"\r\n # %%\r\n # IMPORTS\r\n\r\n # code repository sub-package imports\r\n from artificial_neural_networks.utils.download_monthly_sunspots import \\\r\n download_monthly_sunspots\r\n from artificial_neural_networks.utils.generic_utils import affine_transformation\r\n from artificial_neural_networks.utils.vis_utils import regression_figs\r\n\r\n # %%\r\n\r\n if args.verbose > 0:\r\n print(args)\r\n\r\n # For reproducibility\r\n if args.reproducible:\r\n os.environ['PYTHONHASHSEED'] = '0'\r\n np.random.seed(args.seed)\r\n rn.seed(args.seed)\r\n\r\n # %%\r\n # Load the Monthly sunspots dataset\r\n\r\n sunspots_path = download_monthly_sunspots()\r\n sunspots = np.genfromtxt(\r\n fname=sunspots_path, dtype=np.float32, delimiter=\",\", skip_header=1, usecols=1)\r\n\r\n # %%\r\n # Train-Test split\r\n\r\n L_series = len(sunspots)\r\n\r\n split_ratio = 2 / 3 # between zero and one\r\n n_split = int(L_series * split_ratio)\r\n\r\n train_y = sunspots[:n_split]\r\n test_y = sunspots[n_split:]\r\n\r\n # %%\r\n # PREPROCESSING STEP\r\n\r\n scaling_factor = args.scaling_factor\r\n translation = args.translation\r\n\r\n n_train = train_y.shape[0] # number of training examples/samples\r\n\r\n # Apply preprocessing\r\n train_y_ = affine_transformation(train_y, scaling_factor, translation)\r\n test_y_ = affine_transformation(test_y, scaling_factor, translation)\r\n\r\n # %%\r\n # Model hyperparameters\r\n\r\n optimizer = 'lbfgs'\r\n # optimizer = 'powell'\r\n\r\n maxiter = 1\r\n # maxiter = 50\r\n\r\n s = args.seasonal_periods\r\n order = (args.autoregressive, args.integrated, args.moving_average)\r\n seasonal_order = (0, 1, 0, s)\r\n trend = 'ct'\r\n\r\n # %%\r\n # TRAINING PHASE\r\n\r\n if args.use_custom_params:\r\n custom_params = np.zeros(6)\r\n custom_params[0] = 0.6446147426983434 # 0.4446147426983434\r\n custom_params[1] = -0.00067190913463951184 # -0.00047190913463951184\r\n custom_params[2] = 0.0 # 0.0\r\n custom_params[3] = 0.9518981714555636 # 0.9418981714555636\r\n custom_params[4] = -0.38742006217597214 # -0.38742006217597214\r\n custom_params[5] = 460.2075087762523 # 460.2075087762523\r\n\r\n if args.verbose > 0:\r\n print('All parameters:')\r\n print(custom_params)\r\n\r\n fitted_params = custom_params\r\n else:\r\n train_outliers = np.zeros(n_train)\r\n\r\n train_model = SARIMAX(train_y_, order=order, seasonal_order=seasonal_order,\r\n exog=train_outliers, trend=trend)\r\n\r\n fitted_params = None\r\n\r\n if args.time_training:\r\n start = timer()\r\n\r\n for i in range(1):\r\n model_fit = train_model.fit(start_params=fitted_params, method=optimizer,\r\n maxiter=maxiter)\r\n fitted_params = model_fit.params\r\n\r\n if args.verbose > 0:\r\n print('All parameters:')\r\n print(fitted_params)\r\n\r\n if args.time_training:\r\n end = timer()\r\n duration = end - start\r\n print('Total time for training (in seconds):')\r\n print(duration)\r\n\r\n if args.verbose > 0:\r\n print(model_fit.summary())\r\n\r\n def model_predict(y):\r\n \"\"\"\r\n Predict using the SARIMAX Model (Dynamic Forecasting)\r\n \"\"\"\r\n n_y = y.shape[0]\r\n\r\n y_pred = np.zeros(n_y)\r\n\r\n pred_start = s\r\n pred_end = n_y - 1\r\n pred_outliers = np.zeros(n_y)\r\n pred_model = SARIMAX(y, order=order, seasonal_order=seasonal_order,\r\n exog=pred_outliers, trend=trend)\r\n y_pred[pred_start:pred_end + 1] = pred_model.filter(fitted_params).get_prediction(\r\n start=pred_start, end=pred_end, exog=pred_outliers, dynamic=True).predicted_mean\r\n\r\n return y_pred\r\n\r\n # %%\r\n # TESTING PHASE\r\n\r\n # Predict preprocessed values\r\n train_y_pred_ = model_predict(train_y_)\r\n test_y_pred_ = model_predict(test_y_)\r\n\r\n # Remove preprocessing\r\n train_y_pred = affine_transformation(train_y_pred_, scaling_factor, translation, inverse=True)\r\n test_y_pred = affine_transformation(test_y_pred_, scaling_factor, translation, inverse=True)\r\n\r\n train_y_pred[:s] = np.zeros(s)\r\n test_y_pred[:s] = np.zeros(s)\r\n\r\n train_rmse = sqrt(mean_squared_error(train_y, train_y_pred))\r\n train_mae = mean_absolute_error(train_y, train_y_pred)\r\n train_r2 = r2_score(train_y, train_y_pred)\r\n\r\n test_rmse = sqrt(mean_squared_error(test_y, test_y_pred))\r\n test_mae = mean_absolute_error(test_y, test_y_pred)\r\n test_r2 = r2_score(test_y, test_y_pred)\r\n\r\n if args.verbose > 0:\r\n print('Train RMSE: %.4f ' % (train_rmse))\r\n print('Train MAE: %.4f ' % (train_mae))\r\n print('Train (1 - R_squared): %.4f ' % (1.0 - train_r2))\r\n print('Train R_squared: %.4f ' % (train_r2))\r\n print('')\r\n print('Test RMSE: %.4f ' % (test_rmse))\r\n print('Test MAE: %.4f ' % (test_mae))\r\n print('Test (1 - R_squared): %.4f ' % (1.0 - test_r2))\r\n print('Test R_squared: %.4f ' % (test_r2))\r\n\r\n # %%\r\n # Data Visualization\r\n\r\n if args.plot:\r\n regression_figs(train_y=train_y, train_y_pred=train_y_pred,\r\n test_y=test_y, test_y_pred=test_y_pred)\r\n\r\n # %%\r\n\r\n model = {}\r\n model['params'] = fitted_params\r\n model['hyperparams'] = {}\r\n model['hyperparams']['order'] = order\r\n model['hyperparams']['seasonal_order'] = seasonal_order\r\n model['hyperparams']['trend'] = trend\r\n\r\n return model\r\n\r\n\r\n# %%\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # %%\r\n # IMPORTS\r\n\r\n os.chdir('../../../../')\r\n\r\n # %%\r\n # SETTINGS\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n # General settings\r\n parser.add_argument('--verbose', type=int, default=1)\r\n parser.add_argument('--reproducible', type=bool, default=False)\r\n parser.add_argument('--seed', type=int, default=0)\r\n parser.add_argument('--time_training', type=bool, default=True)\r\n parser.add_argument('--plot', type=bool, default=True)\r\n parser.add_argument('--use_custom_params', type=bool, default=False)\r\n\r\n # Settings for preprocessing and hyperparameters\r\n parser.add_argument('--scaling_factor', type=float, default=2)\r\n parser.add_argument('--translation', type=float, default=-100)\r\n parser.add_argument('--autoregressive', type=int, default=1)\r\n parser.add_argument('--integrated', type=int, default=0)\r\n parser.add_argument('--moving_average', type=int, default=1)\r\n parser.add_argument('--seasonal_periods', type=int, default=126)\r\n\r\n args = parser.parse_args()\r\n\r\n # %%\r\n # MODEL\r\n\r\n model_sarimax_sunspots = sarimax_sunspots(args)\r\n","sub_path":"artificial_neural_networks/applications/sequential_data/dynamic_time_series_forecasting/sarimax_sunspots.py","file_name":"sarimax_sunspots.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511526151","text":"import os\nimport json\nfrom flask import Flask, render_template, redirect, request, url_for\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\nfrom bson.json_util import loads\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = 'cook_book'\napp.config[\"MONGO_URI\"] = 'mongodb+srv://root:Jl011187@cluster0-u6mnz.mongodb.net/cook_book?retryWrites=true&w=majority'\n\nmongo = PyMongo(app)\n\n\n@app.route('/', methods=[\"GET\"])\n@app.route('/get_recipes', methods=[\"GET\"])\ndef get_recipes():\n recipes = mongo.db.recipes.find()\n # for recipe in recipes:\n # print(recipe)\n return render_template(\"recipes.html\", recipes=recipes)\n\n\n@app.route('/recipes_search')\ndef recipes_search():\n return render_template(\"search.html\", cuisines=mongo.db.cuisines.find())\n\n\n@app.route('/add_recipe', methods=[\"GET\"])\ndef add_recipe():\n return render_template(\"addrecipe.html\",\n cuisines=mongo.db.cuisines.find(),\n tools=mongo.db.tools.find())\n\n\n@app.route('/cuisine_match', methods=[\"POST\"])\ndef cuisine_match():\n the_cuisine = request.form['cuisine_name']\n search_recipes = mongo.db.recipes.find({'cuisine': the_cuisine})\n print(the_cuisine)\n print(search_recipes)\n return render_template(\"recipes.html\", recipes=search_recipes)\n\n\n@app.route('/insert_recipe', methods=['POST'])\ndef insert_recipe():\n recipes = mongo.db.recipes\n # recipes.insert_one(request.form.to_dict())\n\n data = request.form.to_dict()\n data['recipe_name'] = data['recipe_name']\n data.update({'ingredients': request.form.getlist('ingredients[]')})\n data.update({'steps': request.form.getlist('steps[]')})\n data.update({'tools': request.form.getlist('tools[]')})\n # Remove the property ingredients[] and steps[] from the dictionary data\n del data['ingredients[]']\n del data['steps[]']\n del data['tools[]']\n recipes.insert_one(data)\n return redirect(url_for('get_recipes'))\n\n\n@app.route('/edit_recipe/', methods=[\"GET\"])\ndef edit_recipe(recipe_id):\n the_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n all_cuisines = mongo.db.cuisines.find()\n all_tools = mongo.db.tools.find()\n return render_template('editrecipe.html', recipe=the_recipe,\n cuisines=all_cuisines, tools=all_tools)\n\n\n@app.route('/update_recipe/', methods=[\"POST\"])\ndef update_recipe(recipe_id):\n recipes = mongo.db.recipes\n recipes.update({'_id': ObjectId(recipe_id)},\n {\n 'recipe_name': request.form.get('recipe_name'),\n 'cuisine': request.form.get('cuisine'),\n 'season': request.form.get('season'),\n 'prep_time': request.form.get('prep_time'),\n 'cook_time': request.form.get('cook_time'),\n 'total_time': request.form.get('total_time'),\n 'main_ingredient': request.form.get('main_ingredient'),\n 'ingredients': request.form.getlist('ingredients[]'),\n 'steps': request.form.getlist('steps[]'),\n 'tools': request.form.getlist('tools[]')\n })\n return redirect(url_for('get_recipes'))\n\n\n@app.route('/delete_recipe/')\ndef delete_recipe(recipe_id):\n mongo.db.recipes.remove({'_id': ObjectId(recipe_id)})\n return redirect(url_for('get_recipes'))\n\n\n@app.route('/get_cuisines')\ndef get_cuisines():\n return render_template('cuisines.html',\n cuisines=mongo.db.cuisines.find())\n\n\n@app.route('/edit_cuisine/')\ndef edit_cuisine(cuisine_id):\n return render_template('editcuisine.html',\n cuisine=mongo.db.cuisines.find_one(\n {'_id': ObjectId(cuisine_id)}))\n\n\n@app.route('/update_cuisine/', methods=['POST'])\ndef update_cuisine(cuisine_id):\n cuisines = mongo.db.cuisines\n cuisines.update({'_id': ObjectId(cuisine_id)},\n {\n 'name': request.form.get('name'),\n })\n return redirect(url_for('get_cuisines'))\n\n\n@app.route('/delete_cuisine/')\ndef delete_cuisine(cuisine_id):\n mongo.db.cuisines.remove({'_id': ObjectId(cuisine_id)})\n return redirect(url_for('get_cuisines'))\n\n\n@app.route('/insert_cuisine', methods=['POST'])\ndef insert_cuisine():\n cuisine_doc = {'name': request.form.get('cuisine_type')}\n mongo.db.cuisines.insert_one(cuisine_doc)\n return redirect(url_for('get_cuisines'))\n\n\n@app.route('/add_cuisine')\ndef add_cuisine():\n return render_template('addcuisine.html')\n\n\n@app.route('/get_tools')\ndef get_tools():\n return render_template('tools.html',\n tools=mongo.db.tools.find())\n\n\n@app.route('/edit_tool/')\ndef edit_tool(tool_id):\n return render_template('edittool.html',\n tool=mongo.db.tools.find_one(\n {'_id': ObjectId(tool_id)}))\n\n\n@app.route('/update_tool/', methods=['POST'])\ndef update_tool(tool_id):\n tools = mongo.db.tools\n tools.update({'_id': ObjectId(tool_id)},\n {\n 'tool_name': request.form.get('tool_name'),\n 'tool_description': request.form.get('tool_description'),\n 'tool_cost': request.form.get('tool_cost'),\n 'tool_model': request.form.get('tool_model'),\n 'tool_color': request.form.get('tool_color'),\n 'tool_warranty': request.form.get('tool_warranty'),\n\n })\n return redirect(url_for('get_tools'))\n\n\n@app.route('/delete_tool/')\ndef delete_tool(tool_id):\n mongo.db.tools.remove({'_id': ObjectId(tool_id)})\n return redirect(url_for('get_tools'))\n\n\n@app.route('/insert_tool', methods=['POST'])\ndef insert_tool():\n tools = mongo.db.tools\n tools.insert_one(request.form.to_dict())\n return redirect(url_for('get_tools'))\n\n\n@app.route('/add_tool')\ndef add_tool():\n return render_template('addtool.html')\n\n\n@app.route('/buy_tool')\ndef buy_tool():\n return render_template('buytool.html',\n tools=mongo.db.tools.find())\n\n\n@app.route('/one_tool', methods=[\"POST\"])\ndef one_tool():\n the_tool = request.form['tool_name']\n print(the_tool)\n search_tools = mongo.db.tools.find({'tool_name': the_tool})\n\n return render_template(\"buytool.html\", tools=search_tools)\n\n\nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'),\n port=int(os.environ.get('PORT')),\n debug=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287110472","text":"from py_wake.examples.data.hornsrev1 import V80, Hornsrev1Site, wt16_x, wt16_y\nfrom py_wake.deficit_models.noj import NOJ, NOJDeficit\nimport os\nimport psutil\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport gc\nfrom py_wake.wind_farm_models.engineering_models import All2AllIterative\nimport memory_profiler\nfrom py_wake.tests import npt\nimport pytest\n\n\ndef get_memory_usage():\n pid = os.getpid()\n python_process = psutil.Process(pid)\n return python_process.memory_info()[0] / 1024**2\n\n\ndef test_memory_usage():\n if os.name == 'posix':\n pytest.skip('Memory usage seems not to work on linux')\n gc.collect()\n initial_mem_usage = get_memory_usage()\n wt = V80()\n site = Hornsrev1Site()\n x, y = site.initial_position.T\n\n for wfm, mem_min, mem_max in [(NOJ(site, wt), 65, 90),\n (All2AllIterative(site, wt, wake_deficitModel=NOJDeficit()), 525, 540)]:\n mem_usage, _ = memory_profiler.memory_usage(\n (wfm, (x, y), {'wd': np.arange(0, 360, 4)}), interval=0.001, max_usage=True, retval=True)\n\n mem_usage -= initial_mem_usage\n print(initial_mem_usage, mem_usage)\n assert mem_min < mem_usage < mem_max, (initial_mem_usage, mem_usage)\n\n return\n\n\ndef test_memory_leak():\n\n N = 10\n\n wt = V80()\n site = Hornsrev1Site()\n\n wfm_lst = [NOJ(site, wt), All2AllIterative(site, wt, wake_deficitModel=NOJDeficit())]\n memory_usage = np.zeros((len(wfm_lst), N))\n for i, wfm in enumerate(wfm_lst):\n memory_usage[i, 0] = get_memory_usage()\n for j in range(1, N):\n wfm(wt16_x, wt16_y, ws=10, wd=np.arange(0, 360, 30))\n gc.collect()\n memory_usage[i, j] = get_memory_usage()\n npt.assert_array_less(memory_usage - memory_usage[:, :1], 1) # at most 1mb more than initial usage\n if 0:\n for i, wfm in enumerate(wfm_lst):\n plt.plot(memory_usage[i], label=str(wfm.__class__.__name__))\n plt.legend()\n plt.show()\n","sub_path":"py_wake/tests/test_wind_farm_models/test_memory_usage.py","file_name":"test_memory_usage.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"193354315","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\nfrom kollab.forms import UserForm, UserProfileForm\nfrom kollab.models import Tag, UserProfile, Membership, Project\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.validators import validate_email\nimport re\n\n\ndef index(request):\n return render(request, 'kollab/index.html')\n\n\ndef login_page(request):\n context = {}\n context['click'] = \"=false\"\n return render(request, 'kollab/login.html', context)\n \ndef login_authenticate(request):\n context = {}\n if request.method == 'POST':\n email = request.POST.get('email', None)\n password = request.POST.get('password', None)\n \n try:\n user = User.objects.get(email=email)\n except ObjectDoesNotExist:\n context['loginerror'] = \"=true\"\n return render(request, 'kollab/login.html', context)\n print('error!')\n \n logged_user = authenticate(username=user.username, password=password)\n print(email + \" \" + password) \n \n if logged_user is not None:\n login(request, logged_user)\n print('got to login') \n prof = UserProfile.objects.get(user=logged_user)\n # temporary redirect to build profile, should probibly be collabprate / logged_user profile\n return HttpResponseRedirect(reverse('profile', kwargs={'user_name_slug': prof.slug}))\n else:\n context['loginerror'] = \"=true\"\n print('login failed')\n return render(request, 'kollab/login.html', context)\n \n\ndef login_register(request):\n context = {}\n if request.method == 'POST':\n username = request.POST.get('username', None)\n email = request.POST.get('user-email', None)\n password1 = request.POST.get('user-pass', None)\n password2 = request.POST.get('user-repeatpass', None)\n \n isValid = True;\n error = []\n if User.objects.filter(username=username).exists():\n error.append(\"Username \" + username + \" already exists.\")\n isValid = False\n \n if User.objects.filter(email=email).exists():\n error.append(\"Email \" + email + \" already exists.\")\n isValid = False\n \n if password1 != password2:\n error.append( \"Passwords were not identical.\")\n isValid = False\n \n \n \n if isValid:\n user = User.objects.create_user(username=username, password=password1, email=email)\n user.save()\n user = authenticate(request, username=username, password=password1)\n print(\"user: \"+ user.username)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('buildprofile'))\n\n if not isValid: \n context['error'] = error\n context['click'] = \"=true\"\n return render(request, 'kollab/login.html', context)\n \n \ndef logoff(request):\n logout(request)\n return HttpResponseRedirect(reverse('index'))\n\n\ndef secondstep(request):\n\n if request.method == 'POST':\n firstName = request['firstName']\n lastName = request['lastName']\n\n@login_required \ndef buildprofile(request):\n return render(request, 'kollab/buildprofile.html')\n\n@login_required\ndef buildprofile_data(request):\n if request.method == 'POST':\n tags = request.POST.getlist('tags', '')\n print(tags)\n \n pic = request.POST.get('profile-pic','')\n print(pic)\n \n loc_user = request.user #User.objects.get(username=\"test5\")\n \n prof, created = UserProfile.objects.get_or_create(user=loc_user) \n prof.picture = request.FILES.get('profile-pic', '')\n prof.selfinfo = request.POST.get('selfinfo','')\n prof.firstname = request.POST.get('firstName','')\n prof.lastname = request.POST.get('lastName','')\n \n prof.save()\n \n # reset tags\n prof.tags.clear()\n \n for i in range(0, len(tags)):\n if Tag.objects.filter(name=tags[i].lower()).exists():\n tag = Tag.objects.filter(name=tags[i].lower()).first()\n print(tag)\n else:\n tag = Tag.objects.create(name=tags[i])\n print(tag)\n tag.save()\n \n prof.tags.add(tag)\n \n prof.save()\n return HttpResponseRedirect(reverse('profile', kwargs={'user_name_slug': prof.slug}))\n \n return HttpResponse(\"big ass error\")\n \n\n# def step2():\n# if post\n# profile = UserProfile()\n# profile.user = logged in\n\n\n# def signup(request):\n# registered = False\n#\n# if request.method == 'POST':\n# user_form = UserForm(data=request.POST)\n# profile_form = UserProfileForm(data=request.POST)\n#\n# if user_form.is_valid() and profile_form.is_valid():\n# user = user_form.save()\n#\n# user.set_password(user.password)\n# user.save()\n#\n# profile1 = profile_form.save(commit=False)\n# profile1.user = user\n#\n# if 'picture' in request.FILES:\n# profile.picture = request.FILES['picture']\n#\n# profile1.save()\n#\n# registered = True\n#\n# else:\n# print(user_form.errors, profile_form.errors)\n#\n# else:\n# user_form = UserForm()\n# profile_form = UserProfileForm()\n#\n# # username = form.cleaned_data.get('username')\n# # raw_password = form.cleaned_data.get('password')\n# # email = form.cleaned_data.get('email')\n# # user = authenticate(username=username, password=raw_password)\n# # login(request, user)\n# # return redirect('home')\n#\n# return render(request, 'kollab/signup.html', {'user_form': user_form,\n# 'profile_form': profile_form,\n# 'registered': registered})\n \n \ndef profile(request, user_name_slug):\n context = {}\n print(user_name_slug)\n #user = User.objects.get(username=user_name_slug)\n #print(user.email)\n try:\n userprof = UserProfile.objects.get(slug=user_name_slug)\n except ObjectDoesNotExist:\n return HttpResponse(\"Does not exist...temp error page\")\n \n context['firstName'] = userprof.user.username\n context['secondName'] = \"\"\n context['location'] = \"Exampleton\"\n context['latlon'] = [userprof.lat, userprof.lon]\n context['picture'] = userprof.picture\n context['selfinfo'] = userprof.selfinfo\n context['tags'] = userprof.tags.all()\n context['collaborations'] = Membership.objects.filter(userProfile = UserProfile.objects.get(slug=user_name_slug))\n \n return render(request, 'kollab/profile.html', context)\n \ndef project(request, project_name_slug):\n context = {}\n try:\n projprof = Project.objects.get(slug=project_name_slug)\n except ObjectDoesNotExist:\n return HttpResponse(\"Does not exist...temp error page\")\n \n context['name'] = projprof.name\n context['short'] = projprof.short\n context['long'] = projprof.long\n context['picture'] = projprof.picture\n context['members'] = projprof.members\n \n tags = projprof.tags.all()\n tagset = set()\n for t in tags.all():\n tagset.add(t.name)\n \n context['tags'] = tagset\n \n return render(request, 'kollab/project.html', context)\n \ndef collaborators(request):\n return render(request, 'kollab/collaborators.html')\n \n\ndef searchtags(request):\n context = {}\n if request.method == 'POST':\n raw_query = request.POST.get('search_query', None)\n search_option = request.POST.get('search_option', None)\n search_query = clean(raw_query.lower())\n query_tags, context['error_message'] = removeUseless(search_query)\n \n if search_option != \"Projects\":\n context['results'] = get_user_results(query_tags)\n context['type'] = 'users'\n else:\n context['results'] = get_project_results(query_tags)\n context['type'] = 'projects'\n \n \n print(search_query)\n \n print(query_tags, context['error_message'])\n \n \n return render(request, 'kollab/collaborators.html', context)\n else:\n print('not a post!')\n context['error_message'] = \"Sorry, but the system has failed to search\"\n return render(request, 'kollab/collaborators.html', context)\n \ndef embedded_search(request, tag_slug, search_type):\n context = {}\n \n query_tags = []\n \n query_tags.append(tag_slug.lower())\n \n if search_type != \"project\":\n context['results'] = get_user_results(query_tags)\n context['type'] = 'users'\n else:\n context['results'] = get_project_results(query_tags)\n context['type'] = 'projects'\n \n \n print(query_tags, context['results'])\n \n \n return render(request, 'kollab/collaborators.html', context)\n \n\n# remove everything that is not a letter or regular space, return array\ndef clean(raw_query):\n return re.sub('[^a-zA-Z ]', \"\", raw_query).split(\" \")\n \n#remove tags that dont exist\ndef removeUseless(search_query):\n error = \"\"\n for i in range(0, len(search_query)):\n if not Tag.objects.filter(name__contains=search_query[i]).exists():\n error += search_query[i] + \" \"\n search_query[i] = \"Not Valid\"\n else:\n print(search_query[i])\n \n return search_query, error;\n \ndef get_user_results(query_tags):\n results = UserProfile.objects.none()\n for i in range(0, len(query_tags)):\n if 'Not Valid' not in query_tags[i]:\n query = UserProfile.objects.filter(tags__name__contains=query_tags[i])\n results = results | query\n \n print(results.distinct())\n return results.distinct();\n \ndef get_project_results(query_tags):\n results = Project.objects.none()\n for i in range(0, len(query_tags)):\n if 'Not Valid' not in query_tags[i]:\n query = Project.objects.filter(tags__name__contains=query_tags[i])\n results = results | query\n \n print(results.distinct())\n return results.distinct();","sub_path":"kollab/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"219887694","text":"#! /usr/bin/env python3\n\n__author__ = 'Nina Verstraete, Jacques TOEN & Nicolas JEANNE'\n__copyright__ = 'GNU General Public License'\n__version__ = '1.0.0'\n__email__ = 'nicolas.jeanne@ntymail.com'\n\nimport argparse\nimport sys\nimport os\nimport logging\nimport urllib\nimport subprocess\nfrom datetime import datetime\nimport parse_uniprot\nimport midi_operations\n\n\n# Check range for tempo argument, must be between 60 and 150.\n# @param x: [str] value of tempo argument in BPM.\n# @return: [int] the tempo.\ndef restricted_tempo(x):\n x = int(x)\n if x < 60 or x > 150:\n raise argparse.ArgumentTypeError('{} not in range 60 to 150.'.format(x))\n return x\n\nif __name__ == '__main__':\n descr = '''\n proteios_sounds.py v.{}\n\n Created by {}.\n Contact: {}\n {}\n\n Create a MIDI file from a protein entry of the UniProt database (https://www.uniprot.org/).\n '''.format(__version__, __author__, __email__, __copyright__)\n\n # Parse arguments\n parser = argparse.ArgumentParser(description=descr, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-o', '--out', required=True, help='path to the results directory.')\n parser.add_argument('-s', '--score', required=False, action='store_true', help='use musescore software to create the score corresponding to the MIDI file.')\n parser.add_argument('-p', '--play', required=False, action='store_true', help='play the music with Timidity, just for tests.')\n parser.add_argument('-t', '--tempo', required=False, type=restricted_tempo, help='set the tempo in BPM. Value between 60 and 150.')\n parser.add_argument('-i', '--instruments', required=False, nargs=3, help='set channel 0, 1 and 2 instruments, restricted to 3 values between 0 and 127 separated by spaces. Default is 0: Acoustic Grand, 42: Cello and 65: Alto Sax. See: http://www.pjb.com.au/muscript/gm.html#patch for details.')\n parser.add_argument('-d', '--debug', required=False, action='store_true', help='debug mode, create a log file which details each entry of the MIDI file.')\n parser.add_argument('uniprot_accession_number', help='the protein accession number in the UniProt database. Example: Human Interleukin-8 > P10145')\n args = parser.parse_args()\n\n\n # check if instruments are between 0 and 127\n if args.instruments:\n for i in range(len(args.instruments)):\n instru = int(args.instruments[i])\n if instru < 0 or instru > 127:\n raise argparse.ArgumentTypeError('{} should be 3 integers between 0 and 127.'.format(args.instruments))\n else:\n args.instruments[i] = instru\n instrus = args.instruments\n else:\n instrus = [0, 42, 65]\n\n # tempo\n if args.tempo:\n tempo = int(args.tempo)\n else:\n tempo = 100 # In BPM\n\n ### midi notes on major mode correspondance with AA sorted by decreasing molecular weight\n # keys are set as DO (48, 60, 72) degrees I, SOL (53, 55) degrees V, FA (67, 65) degrees IV, RE (50, 62) degrees II,\n # MI (52, 64) degrees III, LA (57, 69) degrees VI and SI (59, 71) degrees VII. Finally, we add 7 alterations #\n # following the ascending quint (54, 66, 49, 61, 56, 68, 51)\n initial_midi_keys = [48, 60, 72, 53, 55, 67, 65, 50, 62, 52, 64, 57, 69, 59, 71, 54, 66, 49, 61, 56, 68, 51]\n midi_keys = {}\n\n ### Physico-chemical properties of AA\n AA_PHY_CHI = {'A': {'hybrophobic', 'small'},\n 'R': {'polar', 'pos_charged'},\n 'N': {'polar', 'small'},\n 'D': {'polar', 'small', 'neg_charged'},\n 'C': {'hydrophobic', 'polar', 'small'},\n 'E': {'polar', 'neg_charged'},\n 'Q': {'polar'},\n 'G': {'hydrophobic', 'small'},\n 'H': {'hydrophobic', 'polar', 'pos_charged', 'aromatic'},\n 'I': {'hydrophobic', 'aliphatic'},\n 'L': {'hydrophobic', 'aliphatic'},\n 'K': {'hydrophobic', 'polar', 'pos_charged'},\n 'M': {'hydrophobic'},\n 'F': {'hydrophobic', 'aromatic'},\n 'P': {'small'},\n 'S': {'polar', 'small'},\n 'T': {'hydrophobic', 'polar', 'small'},\n 'W': {'hydrophobic', 'polar', 'aromatic'},\n 'Y': {'hydrophobic', 'polar', 'aromatic'},\n 'V': {'hydrophobic', 'small', 'aliphatic'}}\n\n # create the output directory\n out_dir = os.path.abspath(args.out)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # create the log file\n if args.debug:\n logPath = os.path.join(out_dir, '{}_{}bpm_{}.log'.format(args.uniprot_accession_number, tempo, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))\n logging.basicConfig(filename=logPath, level=logging.DEBUG, format='%(asctime)s\\t%(levelname)s:\\t%(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n logger = logging.getLogger(__name__)\n logger.info(' '.join(sys.argv))\n\n\n if args.debug:\n logger.info('tempo: {} BPM'.format(tempo))\n\n # parsing of uniprot entry\n protein = parse_uniprot.parse_entry(args.uniprot_accession_number)\n\n sequence = protein['seq']\n sequence_length = len(sequence)\n protein['seq'] = {}\n if args.debug:\n logger.info('AA sequence ({} AA): {}'.format(sequence_length, sequence))\n for i in range(sequence_length):\n protein['seq'][i] = sequence[i]\n # frequence of AA in the sequence\n set_AA = set(''.join(sequence))\n proportion_AA = {}\n for aa in set_AA:\n proportion_AA[aa] = sequence.count(aa) / sequence_length\n # sort by decreasing frequency\n proportion_AA = sorted(proportion_AA.items(), key=lambda kv: kv[1], reverse=True)\n\n for idx in range(0, len(proportion_AA)):\n midi_keys[proportion_AA[idx][0]] = initial_midi_keys[idx]\n\n print(midi_keys)\n\n # create the MIDI file\n midi_file_path = midi_operations.create_midi(args.uniprot_accession_number, protein, midi_keys, tempo, instrus, out_dir, AA_PHY_CHI, logger, args.debug)\n print('Done!\\nMIDI file for {} {} ({}) created in: {}'.format(protein['entry_name'], protein['organism'], args.uniprot_accession_number, midi_file_path))\n\n\n # play the file with timidity if asked\n if args.play:\n cmd = 'timidity {}'.format(midi_file_path)\n subprocess.run(cmd, shell=True)\n","sub_path":"proteios_sounds.py","file_name":"proteios_sounds.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"468781687","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError\nimport odoo.addons.decimal_precision as dp\n\n\nclass stockPackOperation(models.Model):\n _inherit = \"stock.pack.operation\"\n\n @api.multi\n @api.constrains('qty_done')\n def check_done_qty(self):\n for pack in self:\n if pack.picking_id.picking_type_id.code == 'outgoing':\n if pack.qty_done > pack.ordered_qty:\n raise ValidationError(\n _('Validation Error! Delivered Quantity cannot exceed Ordered Quantity'))","sub_path":"warehouse_changes/models/stock_pack_operation.py","file_name":"stock_pack_operation.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154271539","text":"import json\nimport nltk\nimport inflect\nimport tqdm\nimport time\nimport requests \nfrom bs4 import BeautifulSoup as bs\nimport gensim\nimport pandas as pd\nimport urllib.request\nimport numpy as np\nimport collections\n\nengine = inflect.engine()\n \ndef singularize(word):\n ingr = engine.singular_noun(word)\n return word if (not ingr) else ingr\n\ndef clean_ing_word(word) : return singularize(word).lower()\n\ndef clean_whole_ing(ing) : return \" \".join([clean_ing_word(word) for word in ing.split(\" \")])\n\ndef string_to_float(x) :\n try :\n x = x.split('/')\n if len(x) > 1 :\n return (float(x[0])/float(x[1]))\n else :\n return float(x[0])\n except :\n raise ValueError(\"not possible to cast \", x, \"to float\")\n \ndef string_to_frac(x) :\n try :\n if '/' in x:\n return string_to_float(x)\n else :\n return(float(x[0]) / float(x[1:]))\n except :\n raise ValueError(\"not possible to cast \", x, \"to float\")\n \ndef fmt_unit(x) :\n return singularize(x.lower())\n\nunits = [\"bushel\", \"cup\", \"dash\", \"drop\", \"fl.\", \"oz\", \"g\", \"cc\", \"gram\", \"gallon\", \"glass\",\n \"kg\", \"liter\", \"ml\", \"ounce\", \"c.\", \"pinch\", \"pint\", \"pound\", \"lb\", \"quart\",\n \"scoop\", \"shot\", \"tablespoon\", \"teaspoon\", \"tsp\", \"tbsp\"]\n\ndef extract_quantity(tags, recipe_index = None, ingredient_index=None) : \n try :\n ingr = det_ingr[recipe_index]['ingredients'][ingredient_index]['text']\n ingr_first_word = ingr.split(\" \")[0]\n\n if ((len(tags) >= 2) and (tags[0][1] == 'CD')) :\n\n #nb (nb+ unit) ing\n if ((tags[1][0] == '(') and (len(tags)>6)):\n idx_par = tags.index((')', ')')) \n quant_in = tags[2:idx_par-1]\n quant=0\n\n #nb nb \n if len(quant_in) == 2 :\n quant = (string_to_float(quant_in[0][0]) + string_to_frac(quant_in[1][0])) / 2\n elif len(quant_in) == 1:\n quant = string_to_float(quant_in[0][0])\n\n else :\n return None\n\n unit = fmt_unit(tags[idx_par-1][0]) \n return (quant, unit, ingr)\n\n\n #nb+ [unit] ing\n else :\n tag1_nb = tags[1][1] == 'CD'\n tag1_to = tags[1][0] == 'to'\n tag1_unit = fmt_unit(tags[1][0]) in units\n tag1_starts_minus = tags[1][0][0] == '-'\n\n #nb unit ing\n if (tag1_unit) :\n return (string_to_float(tags[0][0]), fmt_unit(tags[1][0]), ingr)\n\n #nb nb ...\n elif tag1_nb : \n first_nb = string_to_float(tags[0][0]) + string_to_frac(tags[1][0])\n\n #nb nb unit ing\n if fmt_unit(tags[2][0]) in units :\n return (first_nb, fmt_unit(tags[2][0]), ingr)\n\n #nb nb to nb ...\n elif tags[2][0] == 'to':\n\n #nb nb to nb unit ing\n if fmt_unit(tags[4][0]) in units :\n return ((first_nb + string_to_float(tags[3][0])) / 2, fmt_unit(tags[4][0]), ingr)\n\n #nb nb to nb nb ...\n elif tags[4][1] == 'CD' :\n\n second_nb = string_to_float(tags[3][0]) + string_to_frac(tags[4][0])\n average_qt = (first_nb + second_nb) / 2\n\n #nb nb to nb nb unit ing\n if fmt_unit(tags[5][0]) in units :\n return (average_qt, fmt_unit(tags[5][0]), ingr)\n\n #nb nb to nb nb ing\n else :\n return (average_qt, \"\", ingr)\n\n #nb nb to nb ing\n else :\n return ((first_nb + string_to_float(tags[4][0])) / 2, \"\", ingr)\n\n #nb nb ing\n else :\n return (first_nb, \"\", ingr) \n\n\n\n #nb -nb ...\n elif tag1_starts_minus :\n first_nb = (string_to_float(tags[0][0]) + string_to_float(tags[1][0][1:]))/2\n\n #nb -nb unit ing\n if fmt_unit(tags[2][0]) in units :\n return (first_nb, fmt_unit(tags[2][0]), ingr)\n\n #nb -nb ing\n else :\n return (first_nb, \"\", ingr)\n\n #nb to nb ...\n elif (tag1_to):\n first_nb = string_to_float(tags[0][0])\n\n #nb to nb nb ...\n if (tags[3][1] == 'CD') :\n second_nb = string_to_float(tags[2][0]) + string_to_frac([3][0])\n avg_qt = (first_nb + second_nb) / 2\n\n #nb to nb nb unit ing\n if fmt_unit(tags[4][0]) in units :\n return (avg_qt, fmt_unit(tags[4][0]), ingr)\n\n #nb to nb nb ing\n else :\n return (avg_qt, \"\", ingr)\n\n #nb to nb unit ing\n elif (fmt_unit(tags[3][0]) in units) :\n second_nb = string_to_float(tags[2][0])\n avg_qt = (first_nb + second_nb) / 2\n return (avg_qt, fmt_unit(tags[3][0]), ingr)\n\n #nb to nb ing\n else :\n second_nb = string_to_float(tags[2][0])\n avg_qt = (first_nb + second_nb) / 2\n return (avg_qt, \"\", ingr)\n #nb ing \n else :\n return (string_to_float(tags[0][0]), \"\", ingr)\n except :\n return None\n \ndef extract_quantity_from_raw(raw_ingr) : \n if True:\n tags = nltk.pos_tag(nltk.word_tokenize(raw_ingr))\n tags_words = [t[0] for t in tags]\n\n if ((len(tags) >= 2) and (tags[0][1] == 'CD')) :\n\n #nb (nb+ unit) ing\n if ((tags[1][0] == '(') and (len(tags)>6)):\n idx_par = tags.index((')', ')')) \n quant_in = tags[2:idx_par-1]\n quant=0\n\n #nb nb \n if len(quant_in) == 2 :\n quant = (string_to_float(quant_in[0][0]) + string_to_frac(quant_in[1][0])) / 2\n elif len(quant_in) == 1:\n quant = string_to_float(quant_in[0][0])\n\n else :\n return None\n\n unit = fmt_unit(tags[idx_par-1][0]) \n ingr = \" \".join(tags_words[idx_par+1])\n return (quant, unit, ingr)\n\n\n #nb+ [unit] ing\n else :\n tag1_nb = tags[1][1] == 'CD'\n tag1_to = tags[1][0] == 'to'\n tag1_unit = fmt_unit(tags[1][0]) in units\n tag1_starts_minus = tags[1][0][0] == '-'\n\n #nb unit ing\n if (tag1_unit) :\n ingr = \" \".join(tags_words[2:])\n return (string_to_float(tags[0][0]), fmt_unit(tags[1][0]), ingr)\n\n #nb nb ...\n elif tag1_nb : \n first_nb = string_to_float(tags[0][0]) + string_to_frac(tags[1][0])\n\n #nb nb unit ing\n if fmt_unit(tags[2][0]) in units :\n ingr = \" \".join(tags_words[3:])\n return (first_nb, fmt_unit(tags[2][0]), ingr)\n\n #nb nb to nb ...\n elif tags[2][0] == 'to':\n\n #nb nb to nb unit ing\n if fmt_unit(tags[4][0]) in units :\n ingr = \" \".join(tags_words[5:])\n return ((first_nb + string_to_float(tags[3][0])) / 2, fmt_unit(tags[4][0]), ingr)\n\n #nb nb to nb nb ...\n elif tags[4][1] == 'CD' :\n\n second_nb = string_to_float(tags[3][0]) + string_to_frac(tags[4][0])\n average_qt = (first_nb + second_nb) / 2\n\n #nb nb to nb nb unit ing\n if fmt_unit(tags[5][0]) in units :\n ingr = \" \".join(tags_words[6:])\n return (average_qt, fmt_unit(tags[5][0]), ingr)\n\n #nb nb to nb nb ing\n else :\n ingr = \" \".join(tags_words[5:])\n return (average_qt, \"\", ingr)\n\n #nb nb to nb ing\n else :\n ingr = \" \".join(tags_words[4:])\n return ((first_nb + string_to_float(tags[4][0])) / 2, \"\", ingr)\n\n #nb nb ing\n else :\n ingr = \" \".join(tags_words[2:])\n return (first_nb, \"\", ingr) \n\n\n\n #nb -nb ...\n elif tag1_starts_minus :\n first_nb = (string_to_float(tags[0][0]) + string_to_float(tags[1][0][1:]))/2\n\n #nb -nb unit ing\n if fmt_unit(tags[2][0]) in units :\n ingr = \" \".join(tags_words[3:])\n return (first_nb, fmt_unit(tags[2][0]), ingr)\n\n #nb -nb ing\n else :\n ingr = \" \".join(tags_words[2:])\n return (first_nb, \"\", ingr)\n\n #nb to nb ...\n elif (tag1_to):\n first_nb = string_to_float(tags[0][0])\n\n #nb to nb nb ...\n if (tags[3][1] == 'CD') :\n second_nb = string_to_float(tags[2][0]) + string_to_frac([3][0])\n avg_qt = (first_nb + second_nb) / 2\n\n #nb to nb nb unit ing\n if fmt_unit(tags[4][0]) in units :\n ingr = \" \".join(tags_words[5:])\n return (avg_qt, fmt_unit(tags[4][0]), ingr)\n\n #nb to nb nb ing\n else :\n ingr = \" \".join(tags_words[4:])\n return (avg_qt, \"\", ingr)\n\n #nb to nb unit ing\n elif (fmt_unit(tags[3][0]) in units) :\n second_nb = string_to_float(tags[2][0])\n avg_qt = (first_nb + second_nb) / 2\n ingr = \" \".join(tags_words[4:])\n return (avg_qt, fmt_unit(tags[3][0]), ingr)\n\n #nb to nb ing\n else :\n second_nb = string_to_float(tags[2][0])\n avg_qt = (first_nb + second_nb) / 2\n ingr = \" \".join(tags_words[3:])\n return (avg_qt, \"\", ingr)\n #nb ing \n else :\n ingr = \" \".join(tags_words[1:])\n return (string_to_float(tags[0][0]), \"\", ingr)\n\ndef init_recipes_valid(recipes, det_ingr):\n usda_no_quant_indices = []\n usda_no_quant = 0\n for index in range(len(recipes)) :\n try :\n det_ingr[index]['valid'].index(False)\n except :\n usda_no_quant += 1\n usda_no_quant_indices.append(index)\n recipes = [recipes[index] for index in usda_no_quant_indices] \n det_ingr = [det_ingr[index] for index in usda_no_quant_indices]\n \n return recipes, det_ingr\n\ndef ingredients_count(recipes, det_ingr):\n # Generate ingredients count\n ingredients_counter = collections.Counter()\n \n for e, recipe in tqdm.tqdm_notebook(enumerate(recipes)) :\n ingredients_counter.update([c['text'] for c in det_ingr[e]['ingredients']])\n \n # Filter the ingredients to keep the most important ones (appear more than 50 times)\n common_ing_counts = []\n thresh=50\n\n for c in ingredients_counter.most_common() :\n if c[1] >= thresh :\n common_ing_counts.append(c)\n else :\n break\n \n rep_with_ing =[]\n\n # Rewriting recipes with ingredients\n for e, r in tqdm.tqdm_notebook(enumerate(recipes)) :\n ingredients = []\n for ing_index in range(len(r['ingredients'])) :\n ingredients.append(det_ingr[e]['ingredients'][ing_index]['text'])\n\n rep_with_ing.append(ingredients)\n\n json.dump(rep_with_ing, open(\"../generated/1m_recipes.json\", 'w'))\n \n \ndef main():\n # Load data\n recipes = json.load(open(\"../data/1M/recipe1M_layers/layer1.json\"))\n det_ingr = json.load(open(\"../data/1M/det_ingr.json\"))\n unit_quantities_dict = json.load(open(\"../generated/1m_unit_quantities.json\"))\n \n # Filter recipes with only valid ingredients\n recipes, det_ingr = init_recipes_valid(recipes, det_ingr)\n \n # Generate ingredients count and rewrite recipes with ingredients\n ingredients_count(recipes, det_ingr)\n \n #Extracting quantities for recipes\n measurable_indices = []\n all_extracted = []\n unit_ing = collections.Counter()\n measurables = 0\n nb_to_try = len(recipes)\n sizes = ['large', 'medium', 'small']\n\n for e, r in tqdm.tqdm_notebook(enumerate(recipes[:nb_to_try])) :\n\n extracted = []\n contains_immeasurable = False\n\n for i, ingredient in enumerate(r['ingredients']) :\n\n #remove sizes \n ingredient['text'] = \" \".join([c for c in ingredient['text'].split(\" \") if (not c.lower().strip() in sizes)])\n\n #tag the ingredient definition\n tags = nltk.pos_tag(nltk.word_tokenize(ingredient['text']))\n\n raw_ing_to_detect = det_ingr[e]['ingredients'][i]['text']\n ing_to_detect = \" \".join([singularize(c) for c in raw_ing_to_detect.split(\" \")])\n\n a = extract_quantity(tags, e, i)\n\n if a is not None and a[1] != \"\":\n extracted.append(a)\n\n else :\n\n #detect units ingredients\n if ((a is not None) \\\n and (a[1] == \"\") \\\n and (len(ingredient['text'].split(\" \")) > 2) \\\n and (len(ing_to_detect.split(\" \")) > 0) \\\n and (singularize(ingredient['text'].split(\" \")[1]).strip() == ing_to_detect.split(\" \")[0].strip())) :\n unit_ing.update([ing_to_detect])\n extracted.append(a)\n\n else :\n\n #detect salt (usually nio quantities)\n ing_is_salt = ((('salt', 'NN') in tags) or (('salt', 'NNP') in tags) or (('Salt', 'NN') in tags) or (('Salt', 'NNP') in tags)) \n\n if not ing_is_salt :\n contains_immeasurable = True\n break\n elif ing_is_salt : \n extracted.append((2.5, 'g', 'kosher salt')) \n \n # Recipes with both quantities and usda id for all ingredients\n usda_and_quant_recipes = []\n\n for r in tqdm.tqdm_notebook(all_extracted) :\n ingredients_entries = []\n all_actually_measurable = True\n\n for ing in r :\n if ing[1] == \"\" :\n cleaned_eq = clean_whole_ing(ing[2])\n\n if cleaned_eq in unit_quantities_dict.keys() :\n unit_quant = unit_quantities_dict[cleaned_eq]\n ingredients_entries.append((unit_quant[0], unit_quant[1], ing[2]))\n\n else :\n all_actually_measurable = False\n break\n else :\n ingredients_entries.append(ing)\n\n if all_actually_measurable :\n usda_and_quant_recipes.append(ingredients_entries)\n\n json.dump(usda_and_quant_recipes, open(\"../generated/1m_quant_recipes.json\", 'w'))\n \n \n\nif __name__ == \"__main__\":\n main()","sub_path":"python/one_m.py","file_name":"one_m.py","file_ext":"py","file_size_in_byte":16060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"130264008","text":"people = [[6,0],[5,0],[4,0],[3,2],[2,2],[1,4]]\nsortpeople=sorted(people,key=lambda x:x[0],reverse=True)\nimport collections\nindeg=collections.defaultdict(list)\nmemo=collections.defaultdict(int)\nstack=[]\nfor item in sortpeople:\n memo[item[0]]=0\n if item[1]==0:\n stack.append(item)\n else:\n indeg[item[0]].append(item)\nfor i in indeg:\n indeg[i].sort()\nprint(indeg,stack)\nans=[]\nwhile stack:\n temp=stack.pop()\n ans.append(temp)\n for i in memo:\n if i<=temp[0]:\n memo[i]+=1\n while indeg[i] and indeg[i][0][1]<=memo[i]:\n stack.append(indeg[i].pop(0))\nprint(ans)\n","sub_path":"406.py","file_name":"406.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"146166927","text":"from .base import Stim\nfrom moviepy.audio.io.AudioFileClip import AudioFileClip\nimport six\n\n\nclass AudioStim(Stim):\n\n ''' An audio clip. For now, only handles wav files. '''\n\n def __init__(self, filename, onset=None, sampling_rate=44100):\n\n self.filename = filename\n self.sampling_rate = sampling_rate\n\n self._load_clip()\n\n # Small default buffer isn't ideal, but moviepy has persistent issues\n # with some files otherwise; see https://github.com/Zulko/moviepy/issues/246\n self.data = self.clip.to_soundarray(buffersize=1000)\n duration = self.clip.duration\n\n if self.data.ndim > 1:\n # Average channels to make data mono\n self.data = self.data.mean(axis=1)\n\n super(AudioStim, self).__init__(filename, onset=onset, duration=duration)\n\n def _load_clip(self):\n self.clip = AudioFileClip(self.filename, fps=self.sampling_rate)\n\n def __getstate__(self):\n d = self.__dict__.copy()\n d['clip'] = None\n return d\n\n def __setstate__(self, d):\n self.__dict__ = d\n self._load_clip()\n","sub_path":"pliers/stimuli/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306169313","text":"import xmltodict\nfrom collections import defaultdict\n\nfrom customer.models import Shipper, Customer, CustomerAPI\nfrom location.models import Pincode, Address\n\ndef api_auth(request):\n if request.GET.get('username') or request.POST.get('username') :\n #customer_api = CustomerAPI.objects.get(username=request.POST['username'])\n if request.GET.get('username'):\n username = request.GET.get('username')\n password = request.GET.get('password')\n if request.POST.get('username'):\n username = request.POST.get('username')\n password = request.POST.get('password')\n try:\n customer_api = CustomerAPI.objects.get(username=username)\n if username == 'ecomexpress':\n return customer_api\n if customer_api.password == password:\n if customer_api.ipaddress != \"0\":\n ip_list = customer_api.ipaddress.split(\",\")\n request_ip = request.META.get('REMOTE_ADDR').strip()\n if request_ip in ip_list:\n return customer_api\n else:\n return False\n else:\n return customer_api\n except CustomerAPI.DoesNotExist:\n return False\n else:\n return False\n\n\ndef create_vendor(xml_input, customer):\n\n def handle_record(record):\n error = \"\"\n if not record[\"vendor_code\"]: \n error = \"vendor_code not provided.\"\n if not record[\"name\"]: \n error = error + \"\\nname not provided.\"\n if not record[\"address1\"]:\n error = error + \"\\naddress1 not provided.\"\n if not record[\"pincode\"]:\n error = error + \"\\npincode not provided.\"\n\n pincode = Pincode.objects.filter(pincode=record[\"pincode\"])\n if not pincode:\n error = error + \"\\n{0} pincode doesnot exist\".format(record['pincode'])\n\n if error:\n return (False, error)\n\n pincode = pincode[0]\n sub_customer = Shipper.objects.filter(customer=customer, alias_code=record[\"vendor_code\"])\n if not sub_customer :\n address = Address.objects.create(\n address1=record[\"address1\"], \n pincode=pincode, \n city=pincode.service_center.city, \n state=pincode.service_center.city.state\n )\n if \"address2\" in record:\n address.address2 = record[\"address2\"]\n if \"address3\" in record:\n address.address3 = record[\"address3\"]\n if \"address4\" in record:\n address.address2 = record[\"address4\"]\n if \"phone\" in record:\n address.phone = record[\"phone\"]\n address.save()\n subcustomer = Shipper.objects.create(\n customer=customer, \n alias_code=record[\"vendor_code\"], \n name = record[\"name\"], \n address=address\n )\n else:\n sub_customer = sub_customer[0]\n address = sub_customer.address\n if \"address1\" in record:\n address.address1 = record[\"address1\"]\n if \"address2\" in record:\n address.address2 = record[\"address2\"]\n if \"address3\" in record:\n address.address3 = record[\"address3\"]\n if \"address4\" in record:\n address.address2 = record[\"address4\"]\n if \"phone\" in record:\n address.phone = record[\"phone\"]\n \n address.pincode = pincode.pincode\n address.city = pincode.service_center.city\n address.state = pincode.service_center.city.state\n address.save()\n\n sub_customer.name = record[\"name\"]\n sub_customer.save()\n return (True, sub_customer)\n\n error_list = defaultdict(list)\n file_contents = xmltodict.parse(xml_input)\n vendors = file_contents['VENDOR-OBJECTS']['VENDOR']\n vendors_list = []\n if not isinstance(vendors, list):\n vendors_list.append(vendors)\n else:\n vendors_list = vendors\n \n for record in vendors_list:\n success, result = handle_record(record)\n if not success:\n error_list[record['name']].append(result)\n return error_list\n\ndef json2xml(json_obj, line_padding=\"\"):\n result_list = list()\n\n json_obj_type = type(json_obj)\n\n if json_obj_type is list:\n for sub_elem in json_obj:\n result_list.append(json2xml(sub_elem, line_padding))\n\n return \"\\n\".join(result_list)\n\n if json_obj_type is dict:\n for tag_name in json_obj:\n sub_obj = json_obj[tag_name]\n result_list.append(\"%s<%s>\" % (line_padding, tag_name))\n result_list.append(json2xml(sub_obj, \"\\t\" + line_padding))\n result_list.append(\"%s\" % (line_padding, tag_name))\n\n return \"\\n\".join(result_list)\n\n return \"%s%s\" % (line_padding, json_obj)\n","sub_path":"ecomexpress/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68790079","text":"# 문제\n# SWEA 5202 - [파이썬 S/W 문제해결 구현 3일차] 탐욕 알고리즘 - 화물 도크\n\n# 나의 코드\nT = int(input())\nfor tc in range(T):\n N = int(input())\n \n time = []\n Lorry = []\n \n for i in range(N):\n time.append(list(map(int, input().split())))\n\n time.sort(key = lambda i:i[1])\n\n Lorry.append(time[0])\n \n for k in range(N):\n if Lorry[-1][1] <= time[k][0]:\n Lorry.append(time[k])\n i += 1\n\n print(\"#%d %d\" %(tc+1, len(Lorry)))\n","sub_path":"SWEA/5202_화물 도크.py","file_name":"5202_화물 도크.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271976956","text":"import random\nright={}\n\ndef check_word(y):\n if y in worddict:\n print(y)\n print(worddict[y])\ndef test():\n print(\"Enter the meaning of following word\")\n right_num=0\n wrong_num=len(worddict)\n for i in range(0,3):\n word,me=random.choice(list(worddict.items()))\n print(word) \n x=input()\n if(x.strip()==me.strip()):\n print('correct')\n right_num=right_num+1\n wrong_num=wrong_num-1\n worddict.pop(word)\n print(right_num)\n print(wrong_num)\n right[word]=me\n else:\n print('Wrong Meaning Entered, the correct one is- '+me) \ntry:\n m=open('Meaning.txt','r+')\n worddict={}\n for i in m:\n (word,meaning)=i.split('-')\n worddict[word]=meaning\n print(\"Enter F to find meaning or Enter T for Test\")\n y=input()\n if(y=='F'):\n print(\"Enter your Word\")\n check_word(input())\n if(y=='T'):\n test()\n print(str(right)+'-'+str(me),file=m)\n print(worddict,file=m)\nexcept IOError as err:\n print(str(err))\n","sub_path":"Dictionary-git.py","file_name":"Dictionary-git.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511070321","text":"\"\"\"\nslackに関するモジュール\n\"\"\"\n\nimport sys\nimport requests\nimport src.util as util\n\n\nclass Slack(object):\n \"\"\"\n Slack関連クラス\n \"\"\"\n\n def __init__(self, logger, token, channel):\n # type: (logger, str, str) -> None\n \"\"\"\n slack設定のコンストラクタ\n :param logger: logger\n :param token: slackのtoken\n :param channel: 投稿先チャンネル名\n \"\"\"\n self.logger = logger\n self.token = token\n self.channel = channel\n\n def notify_with_figure(self, massage, abs_figure_path):\n # type: (str, str) -> None\n \"\"\"\n slackにメッセージと画像を投稿する\n :param massage: 投稿内容\n :param abs_figure_path: 保存した画像の絶対パス\n \"\"\"\n self.logger.info('notify_with_figure')\n\n if util.is_exits(abs_figure_path):\n # pythonではif文やfor文でスコープが作られないので注意\n files = {'file': open(abs_figure_path, 'rb')}\n else:\n self.logger.info('figure is not found: {}'.format(abs_figure_path))\n sys.exit(1)\n\n param = {\n 'token': self.token,\n 'channels': self.channel,\n 'filename': \"filename\",\n 'initial_comment': massage,\n 'title': \"todays btc MACD\"\n }\n\n requests.post(url=\"https://slack.com/api/files.upload\", params=param, files=files)\n","sub_path":"src/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"27670229","text":"#แบบฝึกหัดที่ 3.1\n\"\"\"print(\"\\tเลือกเมนูเพื่อทำรายการ\")\nprint(\"#\"*50)\nprint(\"\\tกด 1 เลือกจ่ายเพิ่ม\")\nprint(\"\\tกด 2 เลือกเหมาจ่าย\")\n\nchoose = int(input(\" \")) #เลือกจ่ายเพิ่มหรือเหมาจ่าย\nkm = int(input(\"กรุณากรอกระยะทาง กิโลเมตร\\n\")) #กรอกระยะทาง\nif choose == 1 : #เลือกแบบจ่ายเพิ่ม\n if km <= 25: #ถ้าไม่ถึง 25 km จ่าย 25 บาท\n print(\"ค่าใช้จ่ายรวมทั้งหมด 25 บาท\") \n elif km > 25: #ถ้าเกิน 25 km จ่าย 25+55 บาท\n print(\"ค่าใช้จ่ายรวมทั้งหมด 80 บาท\")\nif choose == 2 : #เลือกแบบเหมาจ่าย\n if km <= 25: #ถ้าไม่เกิน 25 km จ่าย 25 บาท\n print(\"ค่าใช้จ่ายรวมทั้งหมด 25 บาท\")\n elif km > 25: #ถ้าเกิน 25 km จ่าย 55 บาท\n print(\"ค่าใช้จ่ายรวมทั้งหมด 55 บาท\")\"\"\"\n\n#แบบฝึกหัดที่ 3.2\n'''a1=int(input(\"กรุณากรอกจำนวนครั้งการรับค่า\\n\"))\na2 = 0\na3 = 1\nwhile(a3 <= a1) :\n num = int(input(\"กรอกตัวเลข : \"))\n a2 += num\n a3+=1\nprint(\"ผลรวมค่าที่รับมาทั้งหมด = %d\"%a2)'''\n\n#แบบฝึกหัด 3.3\n\"\"\"print(\"ป้อนชื่ออาหารสุดโปรดของคุณ หรือ exitเพื่อออกจากโปรแกรม\")\na1 = []\ni = 0\nwhile(True) :\n i += 1\n food = input(\"อาหารโปรดอันดับที่ {} คือ \\t\".format(i))\n a1.append(food)\n if food == \"exit\" :\n break\nprint(\"อาหารสุดโปรดของคุณมีดังนี้ \",end= \"\")\nfor x in range(1,i):\n print(x,\".\",a1[x-1],end=\" \")\"\"\"\n\n#แบบฝึกหัด 3.4\na = []\nwhile True :\n b = input('----ร้านคุณหลินบิวตี้----\\n เพิ่ม [a]\\n แสดง [s]\\n ออกจากระบบ [x]\\n')\n b = b.lower()\n if b == 'a' : \n c = input('ป้อนรายชื่อลูกค้า(รหัส : ชื่อ : จังหวัด)')\n a.append(c)\n print('\\n*******ข้อมูลได้เข้าสู่ระบบแล้ว*******\\n')\n elif b == 's' : \n print('{0:-<30}'.format(\"\"))\n print('{0:-<8}{1:-<10}{2:10}'.format('รหัส','ชื่อ','จังหวัด'))\n print('{0:-<6}{0:-<10}{0:-<10}'.format(\"\"))\n for d in a : \n e = d.split(\":\")\n print('{0[0]:<6} {0[1]:<10}({0[2]:<10})'.format(e))\n continue\n elif b == 'x' : \n c=input(\"ต้องการปิดโปรแกรมใช่หรือไม่ : \")\n if c ==\"ใช่\":\n print(\"จบการทำงาน\")\n break\n else : \n continue\n\n#แบบฝึกหัด3.5\n'''student = int(input('please enter student :'))\nprint('-'*30)\ntotal = [0 , 0 , 0 , 0 , 0 , 0]\nscore = ['90-100 :','80-89 :','70-79 :','60-69 :','50-59 : ','0-49 :']\nx = 1\nwhile x <= student :\n point = int(input('please enter score :'))\n if point <= 100 and point >= 90 :\n total[0] += 1\n elif point < 90 and point >= 80 :\n total[1] += 1\n elif point < 80 and point >= 70 :\n total[2] += 1\n elif point < 70 and point >= 60 :\n total[3] += 1\n elif point < 60 and point >= 50 :\n total[4] += 1\n elif point < 50 and point >= 0 :\n total[5] +=1\n x = x+1\nfor x in range(0,6) :\n print(score[x],'*'*total[x])'''","sub_path":"week3/work3.py","file_name":"work3.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"145453321","text":"\"\"\"\nThis script gets the pronunciations of words in a simplified format\nand stores the result in txt files\n\"\"\"\n\nimport os\n\n#Directories\nwrd_dir = \"exp_txg_tst/wrd_simplified\"\nphn_dir = \"exp_txg_tst/simplified\"\nout_dir = \"exp_txg_tst/prons\"\n\n#Make out directory if required; otherwise empty it\nif os.path.exists(out_dir):\n #Empty the folder\n for f in os.listdir(out_dir):\n os.remove(\"%s/%s\" % (out_dir, f))\nelse:\n #Make the folder\n os.mkdir(out_dir)\n\n\ndef iterfile(f):\n \"\"\"Helper function to open a file line by line, skipping header\"\"\"\n for line in f:\n fields = line.replace(\"\\n\",\"\").split(\"\\t\")\n yield float(fields[0]), fields[-1].lower()\n\n#Get list of filenames\nfiles = os.listdir(wrd_dir)\n\n#Iterate through filenames\nfor name in files:\n with open(\"%s/%s.txt\" % (out_dir, name), \"wb\") as outFile:\n with open(\"%s/%s\" % (wrd_dir, name), \"rb\") as wrdfile:\n with open(\"%s/%s\" % (phn_dir, name), \"rb\") as phnfile:\n wrds = iterfile(wrdfile)\n phns = iterfile(phnfile)\n \n #Get first phone\n endphn, phnsymb = phns.next()\n \n #Iterate through words\n for endtime, word in wrds:\n endphn = 0\n pron = \"\"\n if word == \"h#\":\n try:\n #Skip pauses\n endphn, phnsymb = phns.next()\n except:\n break\n else:\n while endphn <= endtime:\n if phnsymb != \"h#\":\n pron += phnsymb + \" \"\n try:\n endphn, phnsymb = phns.next()\n except:\n break\n #Now the pronunciation is complete. Trim extra spaces.\n pron = pron.strip()\n \n #Add to pronunciation file\n if word != \"h#\":\n outFile.write(\"%s %s\\n\" % (word, pron))","sub_path":"forced-alignment/wrd_prons_filewise.py","file_name":"wrd_prons_filewise.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141632690","text":"elems, median = [int(x) for x in input().split()]\r\narray = [-1] + sorted([int(y) for y in input().split()])\r\nfirst = 0\r\nlast = 0\r\ntry:\r\n\tfirst = array.index(median) \r\n\tlast = elems - array[::-1].index(median)\r\nexcept ValueError:\r\n\tpass\r\n\r\nmidPt = (elems + 1)//2\r\n\r\n# print(first, last)\r\nif first == 0 and last == 0:\r\n\t# print(\"Did not find\")\r\n\tinsertIndex = elems + 1\r\n\tnewEnd = elems + 1\r\n\tfor i in range(1, elems + 1):\r\n\t\tif array[i] > median:\r\n\t\t\tinsertIndex = i\r\n\t\t\tbreak\r\n\tnewMidPt = (elems+2)//2\r\n\tnewElems = elems + 1\r\n\t# print(insertIndex, newMidPt)\r\n\tif insertIndex == newMidPt:\r\n\t\tprint(1)\r\n\telif insertIndex < newMidPt:\r\n\t\tif newElems % 2 == 1:\r\n\t\t\tprint((newMidPt - insertIndex)*2)\r\n\t\telse:\r\n\t\t\tprint((newMidPt - insertIndex)*2 + 1)\t\t\r\n\telif insertIndex > newMidPt:\r\n\t\tif newElems % 2 == 1:\r\n\t\t\tprint((insertIndex - newMidPt)*2 + 1)\r\n\t\telse:\r\n\t\t\tprint((insertIndex - newMidPt)*2)\r\n\t# print(\"Insert index: \", insertIndex)\r\n\t# if insertIndex == elems + 1:\r\n\t# \tprint(elems + 1)\r\n\t# else:\r\n\t# \tif elems % 2 == 1:\r\n\t# \t\tprint(max(elems + 1 - insertIndex, insertIndex - 1) - min(elems + 1 - insertIndex, insertIndex - 1))\r\n\t# \telse:\r\n\t# \t\tprint(max(elems + 1 - insertIndex, insertIndex - 1) - min(elems + 1 - insertIndex, insertIndex - 1))\r\nelse:\r\n\tif array[midPt] == median:\r\n\t\tprint(0)\r\n\telif last < midPt:\r\n\t\tif elems % 2 == 1:\r\n\t\t\tprint((midPt - last)*2 - 1)\r\n\t\telse:\r\n\t\t\tprint((midPt - last)*2)\r\n\telif first > midPt:\r\n\t\tif elems % 2 == 1:\r\n\t\t\tprint((first - midPt)*2)\r\n\t\telse:\r\n\t\t\tprint((first - midPt)*2 - 1)","sub_path":"Problems/6/166C.py","file_name":"166C.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"386629703","text":"import requests\nfrom hashlib import md5\nimport base64\nimport json\nfrom Crypto.Cipher import AES\nimport codecs\nimport datetime\nimport re\n\nfrom core.models import StopID, Stop, Journey, Agency, Source, JourneyStop, StopName\n\nclass HafasClient():\n debug = False\n base_url = \"https://reiseauskunft.bahn.de/bin/mgate.exe?checksum={checksum}\"\n redtnCards = {\"\": 0, \"25_1\": 1, \"25_2\": 2, \"50_1\": 3, \"50_2\": 4}\n traveler_types = {\"adult\": \"E\", \"child\": \"K\", \"infant\": \"B\"}\n aid = \"rGhXPq+xAlvJd8T8cMnojdD0IoaOY53X7DPAbcXYe5g=\"\n aid2 = \"n91dB8Z77MLdoR0K\"\n key = bytes([97, 72, 54, 70, 56, 122, 82, 117, 105, 66, 110, 109, 51, 51, 102, 85])\n\n def searchLocation(self, term):\n data = {\n \"svcReqL\": [{\"meth\": \"LocMatch\", \"req\": {\"input\": {\"field\": \"S\", \"loc\": {\"name\": term, \"type\": \"S\"}}}}]}\n search_request = self.sendPostRequest(data)\n # print(search_request.text)\n response = self.cleanResponse(search_request.json())\n search_results = []\n if response[\"svcResL\"][0][\"err\"] == \"OK\":\n for response_part in response[\"svcResL\"]:\n if response_part[\"err\"] == \"OK\" and response_part[\"meth\"] == \"LocMatch\":\n for result in response_part[\"res\"][\"match\"][\"locL\"]:\n search_results.append({k: v for k, v in result.items() if k in [\"lid\", \"name\", \"type\"]})\n return search_results\n\n def journeyDetails(self, journeyId):\n data = {\"svcReqL\": [{\n \"meth\": \"JourneyDetails\",\n \"req\": {\"jid\": journeyId}\n }]}\n search_request = self.sendPostRequest(data)\n response = self.cleanResponse(search_request.json())\n return response\n\n def stationBoard(self, stationName, start_datetime=datetime.datetime.now(), duration=60):\n station = self.searchLocation(stationName)[0]\n data = {\"svcReqL\": [\n {\n \"meth\": \"StationBoard\",\n \"req\": {\n \"date\": start_datetime.strftime(\"%Y%m%d\"),\n \"dur\": duration,\n \"stbLoc\": {\n \"lid\": station['lid']\n },\n \"getPasslist\": False,\n \"time\": start_datetime.strftime(\"%H%M%S\"),\n \"jnyFltrL\": [\n {\n \"mode\": \"INC\",\n \"type\": \"PROD\",\n \"value\": \"7\"\n }\n ],\n }\n }\n ]\n }\n search_request = self.sendPostRequest(data)\n response = self.cleanResponse(search_request.json())\n return response\n\n def parse_timedelta(self, time_str):\n regex = re.compile(r'(?P\\d{2})(?P\\d{2})(?P\\d{2})(?P\\d{2})')\n if len(time_str) == 6:\n time_str = \"00\" + time_str\n parts = regex.match(time_str)\n if not parts:\n return\n parts = parts.groupdict()\n time_params = {name: int(amount) for name, amount in parts.items()}\n return datetime.datetime.timedelta(**time_params)\n\n def getFinalTime(self, start_date, duration):\n dur = self.parse_timedelta(duration)\n return datetime.datetime.strptime(start_date, \"%Y%m%d\") + dur\n\n def sendPostRequest(self, data, headers={}):\n data[\"auth\"] = {\"aid\": self.aid2, \"type\": \"AID\"} # from res/raw/haf_config.properties of DBNavigator\n data[\"client\"] = {\"id\": \"DB\", \"name\": \"DB Navigator\", \"type\": \"IPH\", \"v\": \"19040000\", \"os\": \"iOS 13.1.2\"}\n data[\"ver\"] = \"1.15\"\n data[\"ext\"] = \"DB.R19.04.a\"\n data[\"lang\"] = \"de\"\n data = json.dumps(data)\n chk = self.generateChecksum(data)\n url = self.base_url.format(checksum=chk)\n req = requests.post(url, data=data,\n headers={\"User-Agent\": \"DB Navigator/19.10.04 (iPhone; iOS 13.1.2; Scale/2.00)\",\n \"Authorization\": \"Basic Og== \", \"Content-Type\": \"application/json\"})\n return req\n\n def cleanResponse(self, data):\n return data\n\n def generateChecksum(self, data):\n to_hash = data + self.getSecret()\n to_hash = to_hash.encode(\"utf-8\")\n return md5(to_hash).hexdigest()\n\n def getSecret(self):\n unpad = lambda s: s[:-ord(s[len(s) - 1:])]\n enc = base64.b64decode(self.aid)\n iv = codecs.decode(\"00\" * 16, \"hex\")\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n dec = unpad(cipher.decrypt(enc).decode(\"utf-8\"))\n return dec\n\n def strpDelta(self, string):\n return datetime.timedelta(hours=int(string[-6:-4]), minutes=int(string[-4:-2]), seconds=int(string[-2:]))\n","sub_path":"FahrplanDatenGarten/DBApis/hafasClient.py","file_name":"hafasClient.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"209657574","text":"class Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n hTable = {}\n start = 0\n ans =0\n for i in range(len(s)):\n if s[i] in hTable and start <= hTable[s[i]]:\n start = hTable[s[i]] +1\n else:\n ans = max(ans, i-start+1)\n print(ans)\n hTable[s[i]] = i\n\n return ans\n\nprint(Solution().lengthOfLongestSubstring(\"tmmzuxt\"))\n\n\n","sub_path":"githubanswers/Longestsubstringwithoutrepeatingcharacters_git.py","file_name":"Longestsubstringwithoutrepeatingcharacters_git.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414657895","text":"import numpy as np\r\nfrom numpy import pi\r\nfrom source_mu import Fisher, SNR\r\nfrom astropy.cosmology import FlatLambdaCDM\r\nfrom multiprocessing import Pool, cpu_count\r\nfrom functools import partial\r\nimport h5py\r\nimport time\r\nfrom scipy.interpolate import interp1d\r\n\"\"\" Geometricised Units; c=G=1, Mass[1500 meter/Solar Mass], Distance[meter], Time[3.0e8 meter/second]\"\"\"\r\n\r\n\"\"\"Parameters: (ln Mc_z, ln(Lambda),tc,Phi_c,ln d)\"\"\"\r\ncosmo_true=FlatLambdaCDM(70.5,0.2736)\r\n# standard siren (EOS: SLY)\r\nm1_SLY = m2_SLY = 1.433684*1500. \t \t \r\nLambda_SLY=2.664334e+02\r\nPN_order=3.5\r\ndl_dM_SLY=(3.085067e+02-1.997018e+02)/((1.433684e+00 -1.493803)*1500.*2.)\r\nsm1=sm2=0.09*1500.\r\nZ_true=1.5\r\n\r\nN=100000\r\nz_Peak=2.5\r\nz_Max=10.\r\n#load data\r\nm,L=np.loadtxt('Mass_Vs_TidalDeformability_SLY.txt',usecols=(0,1),unpack=True)\r\nm*=1500.\r\nm_l=min(m)\r\nm_u=max(m)\r\nmass=interp1d(L,m,kind='cubic')\r\nLamb=interp1d(m,L,kind='cubic')\r\nl_l=min(L)\r\nl_u=max(L)\r\nce_fs, ce_asd , et_asd, aligo_asd = np.loadtxt('Amplitude_of_Noise_Spectral_Density.txt', usecols=[0,3,2,1],unpack = True)\r\n\r\n\r\n# correct units\r\nc=G=1\r\nc1=3.0e8\r\nMpc=3.086e22\r\n\r\nce_fs *= c1**-1.\r\nce_asd *= c1**0.5/30.\r\net_asd *= c1**0.5\r\n\r\nrho_th=8.0\r\n#Draw True Values\r\ndef Draw_true(m10,m20,Lambda,cosmo,z_max,z_peak,j):\r\n beta=2.*z_max/z_peak -2.\r\n m1=m10\r\n m2=m20\r\n M=m1+m2\r\n tc=pc=1.0\r\n eta=m1*m2/M**2\r\n Lambda=Lamb(m1)\r\n Lambdat=Lambda*(1.+7.*eta - 31.*eta**2)*16./13.\r\n while True:\r\n \r\n z=np.random.beta(3,beta+1)*z_max\r\n Th=np.random.beta(2,4)\r\n\r\n \r\n Mz=M*(1.+z)\r\n mu_z=(m1/2.)*(1.+z)\r\n \r\n \r\n Mc_z=Mz*eta**0.6\r\n \r\n d_l=cosmo.luminosity_distance(z)\r\n \r\n d_l=d_l.value*Mpc\r\n \r\n deff=d_l/Th\r\n \r\n \r\n \r\n V=Fisher(Mc_z,Lambdat,mu_z,deff,1.,1.,ce_fs,ce_asd)\r\n try:\r\n Cov=np.linalg.inv(V)\r\n except np.linalg.LinAlgError:\r\n continue\r\n rho=SNR(Mc_z,mu_z,deff,ce_fs,ce_asd)**0.5\r\n if(rho>rho_th):\r\n \r\n break\r\n Mean=np.array([np.log(Mc_z),np.log(mu_z),np.log(abs(Lambdat)),1.,1.,np.log(deff)])\r\n \r\n return [np.array(Mean),np.array(Cov)]\r\n\r\n#Draw Measured Values from Fisher Matrix evaluated at True Values_samp\r\nn_dim=4\r\n\r\n\r\n\r\ndef Draw_Measured(n,Tr,j):\r\n mean=Tr[j][0]\r\n cov=Tr[j][1]\r\n Mc_z,mu_z,Lambdat,tc,pc,deff=np.random.multivariate_normal(mean,cov,size=n).T\r\n \r\n\r\n\r\n Mc_z=np.array(Mc_z)\r\n mu_z=np.array(mu_z)\r\n Lambdat=np.array(Lambdat)\r\n deff=np.array(deff)\r\n \r\n return [np.array([Mc_z, mu_z, Lambdat, deff])]\r\nN_events=10000\r\nn_events=10\r\nn_samples=4000\r\nprint('no. of measured samples per event='+str(n_samples))\r\nl=0\r\nfle=h5py.File('data_real_'+str(N_events)+'_'+str(n_samples)+'.h5','w')\r\nfor i in range(n_events,N_events+n_events,n_events):\r\n with Pool(cpu_count()) as pool:\r\n start=time.time()\r\n f=partial(Draw_true,m1_SLY,m2_SLY,Lambda_SLY,cosmo_true,z_Max,z_Peak)\r\n Tr=pool.map(f,range(n_events))\r\n end=time.time()\r\n Tr_time='True data samples took {0:.1f} seconds'.format(end-start)\r\n\r\n with Pool(cpu_count()) as pool:\r\n start=time.time()\r\n f=partial(Draw_Measured,n_samples,Tr)\r\n Dat=pool.map(f,range(n_events))\r\n end=time.time()\r\n Meas_time='Measured data samples took {0:.1f} seconds'.format(end-start)\r\n #print(Tr[5][0])\r\n n=0\r\n for k in range(i-n_events,i):\r\n fle.create_dataset('True'+str(k),data=np.array(Tr[n][0]))\r\n fle.create_dataset('Data'+str(k),data=np.array(Dat[n]))\r\n n+=1\r\n print(i,Tr_time,Meas_time)\r\nfle.close()\r\n\"\"\"for i in range(l,l+n_events):\r\n fle.create_dataset('True_'+str(i),data=np.array(Tr[i-l][0]))\r\n fle.create_dataset('Measured_'+str(i),data=np.array(Dat)[i-l])\r\nprint(l)\"\"\"\r\n \r\n\r\n\r\n","sub_path":"old_codes/12months/data_real2.py","file_name":"data_real2.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"166022189","text":"#!/usr/bin/env python3\n\"\"\"Helper tool to convert CNVetti coverage output to hom. DEL calls.\n\"\"\"\n\nimport argparse\nimport contextlib\nimport datetime\nimport logging\nimport sys\nimport typing\n\nimport attr\nimport logzero\nfrom logzero import logger\nimport vcfpy\n\n\n@attr.s(frozen=True, auto_attribs=True)\nclass HomDel:\n \"\"\"Represent of hom. DEL from one sample.\"\"\"\n\n #: The chromosome.\n chrom: str\n #: The 0-based start position.\n pos_begin: int\n #: The 0-based end position.\n pos_end: int\n #: The raw read count\n raw_reads: int\n #: The total number of target bases\n target_bases: int\n #: The name of the sample\n sample: str\n\n def extend(self, other: typing.TypeVar(\"HomDel\")) -> typing.TypeVar(\"HomDel\"):\n assert self.chrom == other.chrom\n return HomDel(\n self.chrom,\n min(self.pos_begin, other.pos_begin),\n max(self.pos_end, other.pos_end),\n self.raw_reads + other.raw_reads,\n self.target_bases + other.target_bases,\n other.sample,\n )\n\n @staticmethod\n def from_record(record: vcfpy.Record) -> typing.TypeVar(\"HomDel\"):\n call = record.calls[0]\n return HomDel(\n record.CHROM,\n record.POS,\n record.INFO[\"END\"],\n call.data[\"RCV\"],\n record.INFO[\"END\"] - record.POS + 1,\n call.sample,\n )\n\n def to_record(self) -> vcfpy.Record:\n return vcfpy.Record(\n self.chrom,\n self.pos_begin,\n [],\n \"N\",\n [vcfpy.SymbolicAllele(\"DEL\")],\n None,\n [],\n {\n \"END\": self.pos_end,\n \"SVLEN\": [self.pos_end - self.pos_begin + 1],\n \"SVMETHOD\": \"cnvetti-homdel-0.2\",\n },\n [\"GT\", \"CN\", \"RCV\", \"LCV\"],\n [\n vcfpy.Call(\n self.sample,\n {\n \"GT\": \"1\",\n \"CN\": 0.0,\n \"RCV\": self.raw_reads,\n \"LCV\": self.raw_reads / self.target_bases,\n },\n )\n ],\n )\n\n\ndef build_header(header_in: vcfpy.Header) -> vcfpy.Header:\n result = vcfpy.Header(\n lines=[\n vcfpy.HeaderLine(key=\"fileformat\", value=\"VCFv4.2\"),\n vcfpy.HeaderLine(key=\"fileDate\", value=datetime.datetime.now().strftime(r\"%Y%m%d\")),\n vcfpy.HeaderLine(key=\"source\", value=\"CNVetti::homdel\"),\n vcfpy.AltAlleleHeaderLine.from_mapping(\n {\n \"ID\": \"DEL\",\n \"Description\": \"The record describes a deletion (decrease in coverage)\",\n }\n ),\n vcfpy.InfoHeaderLine.from_mapping(\n {\n \"ID\": \"END\",\n \"Number\": 1,\n \"Type\": \"Integer\",\n \"Description\": \"End position of the variant described in this record\",\n }\n ),\n vcfpy.InfoHeaderLine.from_mapping(\n {\n \"ID\": \"SVTYPE\",\n \"Number\": 1,\n \"Type\": \"String\",\n \"Description\": \"Type of structural variant\",\n }\n ),\n vcfpy.InfoHeaderLine.from_mapping(\n {\n \"ID\": \"SVLEN\",\n \"Number\": \".\",\n \"Type\": \"Integer\",\n \"Description\": \"Difference in length between REF and ALT alleles\",\n }\n ),\n vcfpy.InfoHeaderLine.from_mapping(\n {\n \"ID\": \"SVMETHOD\",\n \"Number\": 1,\n \"Type\": \"String\",\n \"Description\": \"Type of approach used to detect SV\",\n }\n ),\n vcfpy.FormatHeaderLine.from_mapping(\n {\"ID\": \"GT\", \"Number\": 1, \"Type\": \"String\", \"Description\": \"Genotype\"}\n ),\n vcfpy.FormatHeaderLine.from_mapping(\n {\n \"ID\": \"CN\",\n \"Number\": 1,\n \"Type\": \"Float\",\n \"Description\": \"Copy number of the copy number variant\",\n }\n ),\n vcfpy.FormatHeaderLine.from_mapping(\n {\"ID\": \"RCV\", \"Number\": 1, \"Type\": \"Float\", \"Description\": \"Raw coverage value\"}\n ),\n vcfpy.FormatHeaderLine.from_mapping(\n {\n \"ID\": \"LCV\",\n \"Number\": 1,\n \"Type\": \"Float\",\n \"Description\": \"Length-normalized coverage value\",\n }\n ),\n ],\n samples=vcfpy.SamplesInfos(header_in.samples.names),\n )\n for line in header_in.lines:\n if line.key == \"contig\":\n result.add_contig_line({\"ID\": line.id, \"length\": line.length})\n return result\n\n\ndef process_contig(\n contig: str, reader: vcfpy.Reader, out_header: vcfpy.Header, max_rcv: float, max_lcv: float\n):\n cnvs = []\n curr = None\n for record in reader:\n call = record.calls[0]\n if call.data[\"RCV\"] < max_rcv or call.data[\"LCV\"] < max_lcv:\n if curr:\n curr = curr.extend(HomDel.from_record(record))\n else:\n curr = HomDel.from_record(record)\n elif curr:\n yield curr.to_record()\n curr = None\n if curr:\n yield curr.to_record()\n\n\ndef run(args):\n logger.info(\"Starting to convert coverage to hom dels\")\n logger.info(\"config = %s\", args)\n\n with contextlib.ExitStack() as stack:\n logger.info(\"Open input and output file...\")\n reader = stack.enter_context(vcfpy.Reader.from_path(args.in_vcf))\n out_header = build_header(reader.header)\n writer = stack.enter_context(vcfpy.Writer.from_path(args.out_vcf, out_header))\n\n logger.info(\"Processing contigs...\")\n for contig_line in out_header.get_lines(\"contig\"):\n for record in process_contig(\n contig_line.mapping[\"ID\"], reader, out_header, args.max_rcv, args.max_lcv\n ):\n writer.write_record(record)\n logger.info(\"Done processing contig %s.\", contig_line.mapping[\"ID\"])\n\n logger.info(\"All done. Have a nice day!\")\n\n\ndef main(argv=None):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"out_vcf\", metavar=\"OUT.vcf\", help=\"Path to output VCF file\")\n parser.add_argument(\"in_vcf\", metavar=\"IN.vcf\", help=\"Path to input VCF files \")\n parser.add_argument(\n \"--max-rcv\", type=int, default=10, help=\"Maximum number of raw fragment count\"\n )\n parser.add_argument(\n \"--max-lcv\", type=float, default=0.02, help=\"Maximum number of length-normalized fragments\"\n )\n parser.add_argument(\n \"--verbose\", \"-v\", default=False, action=\"store_true\", help=\"Enable verbose mode\"\n )\n\n args = parser.parse_args(argv)\n if args.verbose:\n logzero.loglevel(logging.DEBUG)\n else:\n logzero.loglevel(logging.INFO)\n return run(args)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"snappy_wrappers/tools/vcf_cnvetti_coverage_to_hom_del_calls.py","file_name":"vcf_cnvetti_coverage_to_hom_del_calls.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309956640","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 21:07:25 2019\n\n@author: Devashish\n\"\"\"\n\n# Calling in the wagner-whitin function and checking the output\nimport functions\ny=functions.wagner_whitin(100,7,1,400,500,[70,90,140,150,120,130],0)\n\n# Evlauate the output of the function\ny\n\n#### Min cost vs starting inventory ####\nevaluated_cost={'start inv':[],'cost':[]}\nfor si in range(0,400,5):\n x=wagner_whitin(100,7,1,400,500,[70,90,140,150,120,130],si)\n evaluated_cost['start inv'].append(si)\n evaluated_cost['cost'].append(x['Minimum Cost'])\n\n# Plotting Min cost vs. starting inventory:\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots() # make figure and axes separate objects\nplt.plot(evaluated_cost['start inv'],evaluated_cost['cost'])\nplt.xlabel('Starting Inv')\nplt.ylabel('Minimum Cost')\nplt.show()\nfig.savefig('Cost vs starting inventory.jpg', \n transparent=False, dpi=80, bbox_inches=\"tight\")\n\n### Plotting Min cost vs. per unit holding cost #####\ncomparative_stats={'holding cost':[],'cost':[]}\nfor hc in np.linspace(0,1,100):\n z=wagner_whitin(100,7,hc,400,500,[70,90,140,150,120,130],0)\n comparative_stats['holding cost'].append(hc)\n comparative_stats['cost'].append(z['Minimum Cost'])\n\nfig, ax = plt.subplots() # make figure and axes separate objects\nplt.plot(comparative_stats['holding cost'],comparative_stats['cost'])\nplt.xlabel('Per Unit Holding Cost')\nplt.ylabel('Minimum Cost')\nplt.show()\nfig.savefig('Total cost vs per unit holding cost.jpg', \n transparent=False, dpi=80, bbox_inches=\"tight\")\n\n\n#### Plot the production policy #####\n\nfig, ax = plt.subplots() # make figure and axes separate objects\nplt.step(range(1,len(y['Production_Schedule'])+2),\n y['Production_Schedule']+[0],where='post')\nplt.xlabel('Time Period')\nplt.ylabel('Production Amounts')\nplt.show()\nfig.savefig('Production Policy.jpg', \n transparent=False, dpi=80, bbox_inches=\"tight\")\n\n#### Plot the inventory policy #####\nig, ax = plt.subplots() # make figure and axes separate objects\nplt.step(range(1,len(y['Inventory_Schedule'])+2),\n y['Inventory_Schedule']+[0],where='post')\nplt.xlabel('Time Period')\nplt.ylabel('Inventory Amounts')\nplt.show() \nfig.savefig('Inventory Policy.jpg', \n transparent=False, dpi=80, bbox_inches=\"tight\")\n\n\n","sub_path":"Problemsets/PS8/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"523444735","text":"import unittest\nimport datetime\n\nfrom asyncwhois.parser import WhoIsParser, BaseParser\n\n\nclass TestWhoIsParserMethods(unittest.TestCase):\n\n def test_parse_dates(self):\n date_strings = [\n '11-aug-2020',\n '11-August-2020',\n '11-09-2020',\n '2020-09-20',\n '2020.09.20',\n '2020/09/20',\n '2020. 09. 20.',\n '2020.09.20 11:11:11',\n 'August 11 2020',\n '20200920'\n ]\n\n for date_string in date_strings:\n formatted_date = BaseParser._parse_date(date_string)\n self.assertIsInstance(formatted_date, datetime.datetime)","sub_path":"tests/test_parser_methods.py","file_name":"test_parser_methods.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"53004523","text":"import csv\nfrom os.path import splitext as splitext\n\n\nclass CarBase:\n def __init__(self, brand, photo_file_name, carrying):\n self.car_type = None\n self.photo_file_name = photo_file_name\n self.brand = brand\n self.carrying = carrying\n\n def get_photo_file_ext(self):\n return splitext(self.photo_file_name)[1]\n\n\nclass Car(CarBase):\n def __init__(self, brand, photo_file_name,\n carrying, passenger_seats_count):\n super().__init__(brand, photo_file_name, carrying)\n self.car_type = \"car\"\n self.passenger_seats_count = passenger_seats_count\n\n\nclass Truck(CarBase):\n def __init__(self, brand, photo_file_name, carrying, body_whl):\n super().__init__(brand, photo_file_name, carrying)\n self.car_type = \"truck\"\n if not body_whl:\n whl = [0., 0., 0.]\n else:\n whl = body_whl.split(\"x\")\n self.body_length = float(whl[0])\n self.body_width = float(whl[1])\n self.body_height = float(whl[2])\n\n def get_body_volume(self):\n return self.body_height * self.body_length * self.body_width\n\n\nclass SpecMachine(CarBase):\n def __init__(self, brand, photo_file_name, carrying, extra):\n super().__init__(brand, photo_file_name, carrying)\n self.car_type = \"spec_machine\"\n self.extra = extra\n\n\ndef get_car_list(csv_filename):\n car_list = []\n with open(csv_filename) as csv_fd:\n reader = csv.reader(csv_fd, delimiter=';')\n next(reader) # пропускаем заголовок\n for row in reader:\n if (len(row) < 7):\n continue\n if (row[0] == \"car\"):\n car_list.append(\n Car(row[1], row[3], float(row[5]), int(row[2])))\n elif (row[0] == \"truck\"):\n car_list.append(Truck(row[1], row[3], float(row[5]), row[4]))\n elif (row[0] == \"spec_machine\"):\n car_list.append(SpecMachine(\n row[1], row[3], float(row[5]), row[6]))\n return car_list\n\n\ndef _main():\n csv_filename = \"c:/Users/vryba/coursera_courses/ \\\n python/coursera_week3_cars.csv\"\n with open(csv_filename) as csv_fd:\n reader = csv.reader(csv_fd, delimiter=';')\n next(reader) # пропускаем заголовок\n for row in reader:\n if (len(row) < 7):\n continue\n print(row)\n print(get_car_list(csv_filename))\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"python/car_class.py","file_name":"car_class.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"342518969","text":"import tensorflow as tf\nimport numpy as np\nimport itertools\nimport multiprocessing\nimport evaluation\nfrom evaluation import PTBTokenizer, Cider, utils\nfrom tqdm import tqdm\n\n\ndef xe_loss(real, pred, scores):\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction=\"none\"\n )\n loss_ = loss_obj(real, pred)\n loss_ = tf.multiply(loss_, tf.expand_dims(scores, axis=-1))\n\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n mask = tf.cast(mask, dtype=loss_.dtype)\n\n loss_ *= mask\n\n return tf.reduce_mean(loss_)\n\n\ndef evaluate_metrics(model, dataloader, tokenizer, beam_size, epoch):\n gen = {}\n gts = {}\n\n # setup decoding graph\n @tf.function\n def beam_search(model, visual, cls, seqs_inp, max_len, eos_idx, beam_size, out_size):\n out, log_probs = model.beam_search(\n visual, cls, seqs_inp, max_len, eos_idx, beam_size, out_size\n )\n return out, log_probs\n\n with tqdm(\n desc=\"Epoch %d - evaluation\" % epoch, unit=\"it\", total=dataloader[\"data_steps\"],\n ) as pbar:\n for it, (visual, cls, seqs_inp, seqs_gts, caps_gts, _) in enumerate(\n dataloader[\"data\"]\n ):\n out, _ = beam_search(\n model, visual, cls, seqs_inp, 50, model.eos_idx, beam_size, out_size=1\n )\n caps_gen = tokenizer.sequences_to_texts(tf.squeeze(out).numpy())\n caps_gen = utils.clean_text(caps_gen, \"\")\n\n for i, (gts_i, gen_i) in enumerate(\n zip(caps_gts.numpy(), caps_gen)\n ):\n gen[\"%d_%d\" % (it, i)] = [gen_i]\n gts[\"%d_%d\" % (it, i)] = [gts_i.decode('utf-8')]\n\n pbar.update()\n\n score_gts = evaluation.PTBTokenizer.tokenize(gts)\n score_gen = evaluation.PTBTokenizer.tokenize(gen)\n scores, _ = evaluation.compute_scores(score_gts, score_gen)\n\n return scores, gts, gen\n\n\ndef evaluate_loss(model, dataloader, epoch):\n running_loss = 0.0\n loss_fn = xe_loss\n\n @tf.function\n def evaluate(visual, cls, seqs_inp, caps_gts):\n out = model(visual, cls, seqs_inp, caps_gts[:, :-1], True)\n loss = loss_fn(caps_gts[:, 1:], out)\n\n return loss\n\n with tqdm(\n desc=\"Epoch %d - validation\" % epoch, unit=\"it\", total=dataloader[\"data_steps\"]\n ) as pbar:\n for it, (visual, cls, seqs_inp, seqs_gts, caps_gts) in enumerate(dataloader[\"data\"]):\n loss = evaluate(visual, cls, seqs_inp, seqs_gts)\n running_loss += loss\n pbar.set_postfix(loss=running_loss.numpy() / (it + 1))\n pbar.update()\n\n return running_loss / dataloader[\"data_steps\"]\n\n\ndef train_xe(model, dataloader, optim, epoch):\n running_loss = 0.0\n loss_fn = xe_loss\n train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n\n @tf.function\n def train(visual, cls, seqs_inp, seqs_gts, scores):\n with tf.GradientTape() as tape:\n out = model(visual, cls, seqs_inp, seqs_gts[:, :-1], True)\n loss = loss_fn(seqs_gts[:, 1:], out, scores)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optim.apply_gradients(zip(gradients, model.trainable_variables))\n return train_loss(loss)\n\n with tqdm(\n desc=\"Epoch %d - train-xe\" % epoch, unit=\"it\", total=dataloader[\"data_steps\"]\n ) as pbar:\n for it, (visual, cls, seqs_inp, seqs_gts, caps_gts, scores) in enumerate(\n dataloader[\"data\"]\n ):\n loss = train(visual, cls, seqs_inp, seqs_gts, scores)\n running_loss += loss\n pbar.set_postfix(loss=loss.numpy())\n pbar.update()\n\n return running_loss / dataloader[\"data_steps\"]\n","sub_path":"multimodal_caption/scps/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520459973","text":"import csv\nimport json\nfrom datetime import datetime\nfrom time import sleep\nout_dict = {}\n\n\ndef main(file_input, file_output):\n \"\"\"\n Function opens cvs file for reading and json file for writing results.\n Also calls functions for export data.\n \"\"\"\n with open(file_input, 'r', encoding='utf-8') as file:\n csv_reader(file)\n with open(file_output, 'w', encoding='utf-8') as file:\n write_to_json(file)\n print('done')\n\n\ndef csv_reader(csv_file):\n \"\"\"\n Function reads lines from csv file and transfers data.\n \"\"\"\n for line in csv.DictReader(csv_file):\n e = export()\n next(e)\n e.send(line)\n\n\ndef export():\n \"\"\"\n Function is processing data and updating dictionary.\n \"\"\"\n while True:\n row = yield\n sleep(0.5)\n out_dict.update({str(datetime.now()): row})\n\n\ndef write_to_json(json_file):\n \"\"\"\n Function is converting dictionary to JSON format.\n \"\"\"\n json_file.write(json.dumps(out_dict, indent=4))\n\n\nif __name__ == '__main__':\n main('text.csv', 'out.json')\n","sub_path":"PavelKalinin_HW/laba_10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"614552635","text":"#!/usr/bin/env python3\n\nimport finplot as fplt\nimport pandas as pd\nimport requests\nfrom io import StringIO\nfrom time import time\n\n\n# load data and convert date\nend_t = int(time()) \nstart_t = end_t - 12*30*24*60*60*14 # twelve months\nsymbol = 'SPY'\nsymbol = 'AMD'\ninterval = '1d'\nurl = 'https://query1.finance.yahoo.com/v7/finance/download/%s?period1=%s&period2=%s&interval=%s&events=history' % (symbol, start_t, end_t, interval)\nr = requests.get(url)\ndf = pd.read_csv(StringIO(r.text))\n\ndf['Date'] = pd.to_datetime(df['Date']).astype('int64') # use finplot's internal representation, which is ns\n\n# ax,ax2 = fplt.create_plot('S&P 500 MACD', rows=2)\n\n# plot macd with standard colors first\n# macd = df.Close.ewm(span=12).mean() - df.Close.ewm(span=26).mean()\n# signal = macd.ewm(span=9).mean()\n# df['macd_diff'] = macd - signal\n# fplt.volume_ocv(df[['Date','Open','Close','macd_diff']], ax=ax2, colorfunc=fplt.strength_colorfilter)\n# # fplt.plot(macd, ax=ax2, legend='MACD')\n# # fplt.plot(signal, ax=ax2, legend='Signal')\n#\n# # change to b/w coloring templates for next plots\n# fplt.candle_bull_color = fplt.candle_bear_color = '#000'\n# fplt.volume_bull_color = fplt.volume_bear_color = '#333'\n# fplt.candle_bull_body_color = fplt.volume_bull_body_color = '#fff'\n#\n# # plot price and volume\n# fplt.candlestick_ochl(df[['Date','Open','Close','High','Low']], ax=ax)\n# hover_label = fplt.add_legend('', ax=ax)\n# axo = ax.overlay()\n# fplt.volume_ocv(df[['Date','Open','Close','Volume']], ax=axo)\n# fplt.plot(df.Volume.ewm(span=24).mean(), ax=axo, color=1)\n\n#######################################################\n## update crosshair and legend when moving the mouse ##\n\n# def update_legend_text(x, y):\n# row = df.loc[df.Date==x]\n# # format html with the candle and set legend\n# fmt = '%%.2f' % ('0b0' if (row.Open str:\n return urlencode(self, encoding=self.encoding)\n\n def submit(self, action=None) -> Response:\n action = action or self.url\n assert action != None\n result = self.session.post(action, data=self.urldata)\n assert result.ok\n return result\n\n @staticmethod\n def urlencode(d: dict) -> str:\n return urlencode(d, encoding=ViewState.encoding)\n\n @staticmethod\n def extract(html: str) -> dict:\n ret = {}\n tags = bsfilter(html, name='input',\n attrs=ViewState.pattern)\n for tag in tags:\n ret[tag.get('name')] = tag.get('value')\n return ret\n\n def copy(self):\n new_vs = type(self)(self.session, url=self.url)\n new_vs.url = self.url\n new_vs.form = self.form\n new_vs.encoding = self.encoding\n new_vs.update(dict.copy(self))\n return new_vs\n\n def __repr__(self):\n return '<{} url={}, {}>'.format(\n type(self).__name__,\n repr(self.url),\n dict.__repr__(self))\n","sub_path":"lib/viewstate.py","file_name":"viewstate.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413249520","text":"import tensorflow as tf\nimport sys\n\n\ndef get_tf_dtype(dtype: str):\n # TODO: add type supports + error handling\n tf_dtype = None\n\n if dtype == \"float32\":\n tf_dtype = tf.float32\n elif dtype == \"int64\":\n tf_dtype = tf.int64\n elif dtype == \"int32\":\n tf_dtype = tf.int32\n elif dtype == \"int8\":\n tf_dtype = tf.int8\n elif dtype == \"string\":\n tf_dtype = tf.string\n else:\n sys.exit(\"Error: Exit: dtype {} not recognized/supported\".format(dtype))\n\n return tf_dtype\n\n\ndef get_regularizer_fn(reg_str: str):\n\n # TODO: need to test/validate this contrib\n # TODO: need to allow modification for scale\n scale = 0.1\n\n if reg_str:\n reg_str = reg_str.lower()\n\n if reg_str == \"\":\n reg_fn = None # default is glorot\n elif reg_str == \"l1\":\n reg_fn = tf.contrib.layers.l1_regularizer(scale, scope=None)\n elif reg_str == \"l2\":\n reg_fn = tf.contrib.layers.l2_regularizer(scale, scope=None)\n elif reg_str == \"l1l2\":\n # TODO: how/is this different from elastic nets\n reg_fn = tf.contrib.layers.l1_l2_regularizer(\n scale_l1=1.0, scale_l2=1.0, scope=None\n )\n else:\n # TODO: Error\n reg_fn = None\n\n return reg_fn\n\n\ndef get_optimizer(MCd: dict):\n opt = MCd[\"optimizer\"].lower()\n optimizer = None\n if opt == \"adam\":\n optimizer = tf.train.AdamOptimizer(\n learning_rate=MCd[\"lr\"],\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-08,\n use_locking=False,\n name=\"Adam\",\n )\n elif opt == \"sgd\":\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=MCd[\"lr\"], name=\"GradientDescent\"\n )\n elif opt == \"adadelta\":\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate=MCd[\"lr\"],\n rho=0.95,\n epsilon=1e-08,\n use_locking=False,\n name=\"Adadelta\",\n )\n elif opt == \"adagrad\":\n optimizer = tf.train.AdagradOptimizer(\n learning_rate=MCd[\"lr\"],\n initial_accumulator_value=0.1,\n use_locking=False,\n name=\"Adagrad\",\n )\n # elif opt == \"momentum\":\n # tf.train.MomentumOptimizer(\n # learning_rate=MCd[\"lr\"],\n # momentum, # TODO: value\n # use_locking=False,\n # name=\"Momentum\",\n # use_nesterov=False,\n # )\n elif opt == \"ftrl\":\n optimizer = tf.train.FtrlOptimizer(\n learning_rate=MCd[\"lr\"],\n learning_rate_power=-0.5,\n initial_accumulator_value=0.1,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0,\n use_locking=False,\n name=\"Ftrl\",\n accum_name=None,\n linear_name=None,\n l2_shrinkage_regularization_strength=0.0,\n )\n elif opt == \"rmsprop\":\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate=MCd[\"lr\"],\n decay=0.9,\n momentum=0.0,\n epsilon=1e-10,\n use_locking=False,\n centered=False,\n name=\"RMSProp\",\n )\n else:\n # TODO: error handle?\n # realistically this should be caught by the initial check\n pass\n\n return optimizer\n\n\ndef get_activation_fn(act_str: str):\n\n if act_str:\n act_str = act_str.lower()\n\n act_fn = None # TODO: this should maybe be an identity function\n if act_str == \"sigmoid\":\n act_fn = tf.sigmoid\n elif act_str == \"tanh\":\n act_fn = tf.tanh\n elif act_str == \"elu\":\n act_fn = tf.nn.elu\n elif act_str == \"selu\":\n act_fn = tf.nn.selu\n elif act_str == \"softplus\":\n act_fn = tf.nn.softplus\n elif act_str == \"softsign\":\n act_fn = tf.nn.softsign\n elif act_str == \"relu\":\n act_fn = tf.nn.relu\n # elif act == \"leaky\":\n # act_fn = tf.nn.leaky_relu\n elif act_str == \"relu6\":\n act_fn = tf.nn.relu6\n elif act_str == \"identity\":\n act_fn = tf.identity\n else:\n # TODO: Error logging\n # the reasoning here is that the relu is subjectively the most\n # common/default activation function in DNNs, but I don't LOVE this\n sys.exit(\"No activation function has been set\")\n\n return act_fn\n\n\ndef get_logits_and_preds(loss_str: str, hidden_out, num_classes: int, logger) -> tuple:\n # create the output layer (logits and preds) based on the type of loss function used.\n if loss_str == \"sigmoid\":\n logits = tf.layers.dense(hidden_out, num_classes, name=\"logits\")\n preds = tf.sigmoid(logits, name=\"y_proba\")\n elif loss_str == \"softmax\":\n logits = tf.layers.dense(hidden_out, num_classes, name=\"logits\")\n preds = tf.nn.softmax(logits, name=\"y_proba\")\n elif (\n loss_str == \"softmax_binary_segmentation_temp\"\n or loss_str == \"softmax_multi_segmentation_temp\"\n ):\n logits = hidden_out\n preds = tf.nn.softmax(logits, name=\"y_proba\")\n elif loss_str == \"mse\" or loss_str == \"rmse\":\n logits = tf.layers.dense(hidden_out, num_classes, name=\"logits\")\n preds = logits\n else:\n logger.fatal(\"preds cannot be created as: {}\".format(loss_str))\n sys.exit(\"final_type: {} -- is not supported or defined.\".format(loss_str))\n logger.debug(\"pred created as {}: {}\".format(loss_str, preds))\n\n return (logits, preds)\n\n\ndef get_initializer_fn(init_str: str):\n # NOTE: will use uniform (not normal) by default\n\n # elif opts[\"kernel_initializer\"] == \"he\":\n # init_fn = lambda shape, dtype=tf.float32: tf.truncated_normal(shape, 0., stddev=np.sqrt(2/shape[0]))\n if init_str:\n init_str = init_str.lower()\n\n if init_str == \"\":\n init_fn = None # default is glorot\n elif init_str == \"glorot\":\n init_fn = tf.glorot_uniform_initializer(seed=None, dtype=tf.float32)\n elif init_str == \"zeros\" or init_str == \"zero\":\n init_fn = tf.zeros_initializer(dtype=tf.float32)\n elif init_str == \"ones\" or init_str == \"one\":\n init_fn = tf.ones_initializer(dtype=tf.float32)\n elif init_str == \"rand\" or init_str == \"random\":\n # TODO: this will need a value for maxval\n init_fn = tf.random_uniform_initializer(\n minval=0, maxval=None, seed=None, dtype=tf.float32\n )\n elif init_str == \"he\":\n # TODO: unsure about this one...\n tf.contrib.layers.variance_scaling_initializer(\n factor=2.0, mode=\"FAN_IN\", uniform=False, seed=None, dtype=tf.float32\n )\n else:\n # TODO: Error\n init_fn = None\n return init_fn\n\n\ndef get_run_options(temp_trace_level: str):\n\n if temp_trace_level == \"full\":\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n elif temp_trace_level == \"software\":\n run_options = tf.RunOptions(trace_level=tf.RunOptions.SOFTWARE_TRACE)\n elif temp_trace_level == \"hardware\":\n run_options = tf.RunOptions(trace_level=tf.RunOptions.HARDWARE_TRACE)\n elif temp_trace_level == \"None\":\n run_options = None\n else:\n run_options = None\n\n return run_options\n","sub_path":"yamlflow/build/get_components.py","file_name":"get_components.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"158205970","text":"# Copyright 2017 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Import .whl files into Bazel.\"\"\"\n\ndef _whl_impl(repository_ctx):\n \"\"\"Core implementation of whl_library.\"\"\"\n\n args = [\n repository_ctx.attr.python_interpreter,\n repository_ctx.path(repository_ctx.attr._script),\n \"--whl\",\n repository_ctx.path(repository_ctx.attr.whl),\n \"--requirements\",\n repository_ctx.attr.requirements,\n ]\n\n if repository_ctx.attr.extras:\n args += [\n \"--extras=%s\" % extra\n for extra in repository_ctx.attr.extras\n ]\n\n if repository_ctx.attr.srcs_version:\n args += [ \"--srcs_version\", repository_ctx.attr.srcs_version ]\n\n result = repository_ctx.execute(args)\n if result.return_code:\n fail(\"whl_library failed: %s (%s)\" % (result.stdout, result.stderr))\n\nwhl_library = repository_rule(\n attrs = {\n \"extras\": attr.string_list(doc = \"\"\"\nA subset of the \"extras\" available from this .whl for which\nrequirements has the dependencies.\n\"\"\"),\n \"python_interpreter\": attr.string(default = \"python\", doc = \"\"\"\nThe command to run the Python interpreter used when unpacking the wheel.\n\"\"\"),\n \"requirements\": attr.string(doc = \"\"\"\nThe name of the pip_import repository rule from which to load this\n.whl's dependencies.\n\"\"\"),\n \"whl\": attr.label(\n mandatory = True,\n allow_single_file = True,\n doc = \"\"\"\nThe path to the .whl file. The name is expected to follow [this\nconvention](https://www.python.org/dev/peps/pep-0427/#file-name-convention)).\n\"\"\",\n ),\n \"srcs_version\": attr.string(\n doc = \"Set the srcs_version attribute for all the py_library in this wheel (optional).\",\n ),\n \"_script\": attr.label(\n executable = True,\n default = Label(\"//tools:whltool.par\"),\n cfg = \"host\",\n ),\n },\n implementation = _whl_impl,\n doc = \"\"\"A rule for importing `.whl` dependencies into Bazel.\n\nThis rule is currently used to implement `pip_import`. It is not intended to\nwork standalone, and the interface may change. See `pip_import` for proper\nusage.\n\nThis rule imports a `.whl` file as a `py_library`:\n```python\nwhl_library(\n name = \"foo\",\n whl = \":my-whl-file\",\n requirements = \"name of pip_import rule\",\n)\n```\n\nThis rule defines `@foo//:pkg` as a `py_library` target.\n\"\"\",\n)\n","sub_path":"python/whl.bzl","file_name":"whl.bzl","file_ext":"bzl","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"83795055","text":"import sys\r\nfrom PyQt5 import uic\r\nfrom PyQt5 import QtGui\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton\r\nfrom PyQt5.QtGui import QPainter, QBrush, QPen, QColor\r\nfrom PyQt5.QtCore import Qt\r\nimport random, copy\r\n\r\nclass Window(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n uic.loadUi('UI.ui', self)\r\n self.title = \"PyQt5 круги\"\r\n self.draw = None\r\n self.InitWindow()\r\n\r\n def InitWindow(self):\r\n self.pushButton.clicked.connect(self.run)\r\n self.show()\r\n\r\n def run(self):\r\n self.draw = True\r\n self.update()\r\n\r\n def paintEvent(self, event):\r\n if self.draw:\r\n qp = QPainter()\r\n qp.begin(self)\r\n qp.setPen(QPen(QColor(255, 255, 0), 3, Qt.SolidLine))\r\n #qp.setPen(QColor(255, 255, 0))\r\n #qp.setBrush(QBrush(Qt.red, Qt.SolidPattern))\r\n rad = random.randrange(100, 255)\r\n x = random.randrange(455)\r\n y = random.randrange(455)\r\n qp.drawEllipse(x, y, rad, rad)\r\n qp.end()\r\n\r\n\r\nApp = QApplication(sys.argv)\r\nwindow = Window()\r\nsys.exit(App.exec())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69903121","text":"#\n# Copyright (C) 2019 by YOUR NAME HERE\n#\n# This file is part of RoboComp\n#\n# RoboComp is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# RoboComp is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with RoboComp. If not, see .\n\nimport sys, Ice, os\nfrom PySide import QtGui, QtCore\n\nROBOCOMP = ''\ntry:\n\tROBOCOMP = os.environ['ROBOCOMP']\nexcept KeyError:\n\tprint('$ROBOCOMP environment variable not set, using the default value /opt/robocomp')\n\tROBOCOMP = '/opt/robocomp'\n\npreStr = \"-I/opt/robocomp/interfaces/ -I\"+ROBOCOMP+\"/interfaces/ --all /opt/robocomp/interfaces/\"\nIce.loadSlice(preStr+\"CommonBehavior.ice\")\nimport RoboCompCommonBehavior\n\nadditionalPathStr = ''\nicePaths = [ '/opt/robocomp/interfaces' ]\ntry:\n\tSLICE_PATH = os.environ['SLICE_PATH'].split(':')\n\tfor p in SLICE_PATH:\n\t\ticePaths.append(p)\n\t\tadditionalPathStr += ' -I' + p + ' '\n\ticePaths.append('/opt/robocomp/interfaces')\nexcept:\n\tprint('SLICE_PATH environment variable was not exported. Using only the default paths')\n\tpass\n\n# load detector\nice_BodyHandJointsDetector = False\nfor p in icePaths:\n\tif os.path.isfile(p+'/BodyHandJointsDetector.ice'):\n\t\tpreStr = \"-I/opt/robocomp/interfaces/ -I\"+ROBOCOMP+\"/interfaces/ \" + additionalPathStr + \" --all \"+p+'/'\n\t\twholeStr = preStr+\"BodyHandJointsDetector.ice\"\n\t\tIce.loadSlice(wholeStr)\n\t\tice_PoseEstimation = True\n\t\tbreak\nif not ice_BodyHandJointsDetector:\n\tprint('Couldn\\'t load BodyHandJointsDetector')\n\tsys.exit(-1)\nfrom RoboCompBodyHandJointsDetector import *\n\n# load camera simple\nice_CameraSimple = False\nfor p in icePaths:\n\tif os.path.isfile(p+'/CameraSimple.ice'):\n\t\tpreStr = \"-I/opt/robocomp/interfaces/ -I\"+ROBOCOMP+\"/interfaces/ \" + additionalPathStr + \" --all \"+p+'/'\n\t\twholeStr = preStr+\"CameraSimple.ice\"\n\t\tIce.loadSlice(wholeStr)\n\t\tice_CameraSimple = True\n\t\tbreak\nif not ice_CameraSimple:\n\tprint('Couldn\\'t load CameraSimple')\n\tsys.exit(-1)\nfrom RoboCompCameraSimple import *\n\n\n\n\nclass GenericWorker(QtCore.QObject):\n\tkill = QtCore.Signal()\n\n\n\tdef __init__(self, mprx):\n\t\tsuper(GenericWorker, self).__init__()\n\n\t\tself.camerasimple_proxy = mprx[\"CameraSimpleProxy\"]\n\t\tself.bodyhandjointsdetector_proxy = mprx[\"BodyHandJointsDetectorProxy\"]\n\n\n\t\tself.mutex = QtCore.QMutex(QtCore.QMutex.Recursive)\n\t\tself.Period = 30\n\t\tself.timer = QtCore.QTimer(self)\n\n\n\t@QtCore.Slot()\n\tdef killYourSelf(self):\n\t\trDebug(\"Killing myself\")\n\t\tself.kill.emit()\n\n\t# \\brief Change compute period\n\t# @param per Period in ms\n\t@QtCore.Slot(int)\n\tdef setPeriod(self, p):\n\t\tprint(\"Period changed\", p)\n\t\tPeriod = p\n\t\ttimer.start(Period)\n","sub_path":"src/BodyHandJointsDetectorClient/src/genericworker.py","file_name":"genericworker.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"254669268","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objects as go\nfrom plotly import subplots\nimport plotly.express as px\nimport pandas as pd\nimport numpy as np\nfrom dash.dependencies import Input, Output, State\nimport cv2\nfrom PIL import Image\nfrom io import BytesIO\nimport base64\n# import dash_bootstrap_components as dbc\n\n# app = dash.Dash(external_stylesheets=[dbc.themes. PULSE])\n\napp = dash.Dash(__name__)\n\n#server=app.server\n\ntext1=\"\"\"\n\tlorem ipsum\n\"\"\"\ntext2=\"\"\"\nlorem ipsum\n\"\"\"\ntext3=\"\"\"\nlorem ipsum\n\"\"\"\ntext4=\"\"\"\nlorem ipsum\n\"\"\"\ntext5=\"\"\"\nlorem ipsum\n\"\"\"\ntext6=\"\"\"\nlorem ipsum\n\"\"\"\n\ndef get_classification(ratio):\n\tratio =round(ratio,1)\n\ttoret=\"\"\n\tif(ratio>=3 and ratio<3.5):\n\t\ttoret=\"Slender\"\n\telif(ratio>=2.1 and ratio<3):\n\t\ttoret=\"Medium\"\n\telif(ratio>=1.1 and ratio<2.1):\n\t\ttoret=\"Bold\"\n\telif(ratio>0.9 and ratio<=1):\n\t\ttoret=\"Round\"\n\telse:\n\t\ttoret=\"Dust\"\n\treturn toret\n\nclassification = {\"Slender\":0, \"Medium\":0, \"Bold\":0, \"Round\":0, \"Dust\":0}\navg = {\"Slender\":0, \"Medium\":0, \"Bold\":0, \"Round\":0, \"Dust\":0}\nimg = cv2.imread(\"./assets/rice.png\",0)#load in greyscale mode\n\n#convert into binary\nret,binary = cv2.threshold(img,160,255,cv2.THRESH_BINARY)# 160 - threshold, 255 - value to assign, THRESH_BINARY_INV - Inverse binary\n# print(ret)\n# print(binary)\n#averaging filter\nkernel = np.ones((5,5),np.float32)/9\ndst = cv2.filter2D(binary,-1,kernel)# -1 : depth of the destination image\n\nkernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n# print(kernel)\n# print(dst)\n# print(kernel2)\n# erosion\nerosion = cv2.erode(dst,kernel2,iterations = 1)\n# print(erosion)\n# dilation \ndilation = cv2.dilate(erosion,kernel2,iterations = 1)\n# print(dilation)\n# edge detection\nedges = cv2.Canny(dilation,100,200)\n# print(edges)\n## Size detection\ncontours, hierarchy = cv2.findContours(erosion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n# print(\"No. of rice grains=\",len(contours))\n# print(\"No. of rice grains=\",contours)\n# print(\"No. of rice grains=\",len(hierarchy))\n# print(\"No. of rice grains=\",hierarchy)\ntotal_ar=0\n# print(contours[0])\nfor cnt in contours:\n\tx,y,w,h = cv2.boundingRect(cnt)\n\taspect_ratio = float(w)/h\n\tif(aspect_ratio<1):\n\t\taspect_ratio=1/aspect_ratio\n\t# print(round(aspect_ratio,2),get_classification(aspect_ratio))\n\tclassification[get_classification(aspect_ratio)] += 1\n\tif get_classification(aspect_ratio) != \"Dust\":\n\t\ttotal_ar+=aspect_ratio\n\tif get_classification(aspect_ratio) != \"Dust\":\n\t\tavg[get_classification(aspect_ratio)] += aspect_ratio\n\navg_ar=total_ar/len(contours)\n# print(avg_ar)\n# print(classification[\"Medium\"])\n# print(avg['Slender'])\nif classification['Slender']!=0:\n\tavg['Slender'] = avg['Slender']/classification['Slender']\n\t# print(avg['Slender'])\nif classification['Medium']!=0:\n\tavg['Medium'] = avg['Medium']/classification['Medium']\nif classification['Bold']!=0:\n\tavg['Bold'] = avg['Bold']/classification['Bold']\nif classification['Round']!=0:\n\tavg['Round'] = avg['Round']/classification['Round']\n# print(img)\n# print(binary)\n# print(dst)\n# print(erosion)\n# print(dilation)\n# print(edges)\ncv2.imwrite(\"./assets/img.jpg\", img)\ncv2.imwrite(\"./assets/binary.jpg\", binary)\ncv2.imwrite(\"./assets/dst.jpg\", dst)\ncv2.imwrite(\"./assets/erosion.jpg\", erosion)\ncv2.imwrite(\"./assets/dilation.jpg\", dilation)\ncv2.imwrite(\"./assets/edges.jpg\", edges)\n\n# print(classification)\n# print(avg_ar)\n# print(avg)\n\n\ndef readb64(base64_string):\n sbuf = BytesIO()\n sbuf.write(base64.b64decode(base64_string))\n pimg = Image.open(sbuf)\n return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)\n\ndef update_image(pic):\n\timg = readb64(pic)\n\timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tclassification1 = {\"Slender\":0, \"Medium\":0, \"Bold\":0, \"Round\":0, \"Dust\":0}\n\tavg1 = {\"Slender\":0, \"Medium\":0, \"Bold\":0, \"Round\":0, \"Dust\":0}\n\t#convert into binary\n\tret,binary = cv2.threshold(img,160,255,cv2.THRESH_BINARY)# 160 - threshold, 255 - value to assign, THRESH_BINARY_INV - Inverse binary\n\t#averaging filter\n\tkernel = np.ones((5,5),np.float32)/9\n\tdst = cv2.filter2D(binary,-1,kernel)# -1 : depth of the destination image\n\n\tkernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n\n\t#erosion\n\terosion = cv2.erode(dst,kernel2,iterations = 1)\n\n\t#dilation \n\tdilation = cv2.dilate(erosion,kernel2,iterations = 1)\n\n\t#edge detection\n\tedges = cv2.Canny(dilation,100,200)\n\n\t## Size detection\n\tcontours, hierarchy = cv2.findContours(erosion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t#print(\"No. of rice grains=\",len(contours))\n\ttotal_ar1=0\n\tfor cnt in contours:\n\t\tx,y,w,h = cv2.boundingRect(cnt)\n\t\taspect_ratio = float(w)/h\n\t\tif(aspect_ratio<1):\n\t\t\taspect_ratio=1/aspect_ratio\n\t\t#print(round(aspect_ratio,2),get_classification(aspect_ratio))\n\t\tclassification1[get_classification(aspect_ratio)] += 1\n\t\tif get_classification(aspect_ratio) != \"Dust\":\n\t\t\ttotal_ar1+=aspect_ratio\n\t\tif get_classification(aspect_ratio) != \"Dust\":\n\t\t\tavg1[get_classification(aspect_ratio)] += aspect_ratio\n\tavg_ar1=total_ar1/len(contours)\n\tif classification1['Slender']!=0:\n\t\tavg1['Slender'] = avg1['Slender']/classification1['Slender']\n\tif classification1['Medium']!=0:\n\t\tavg1['Medium'] = avg1['Medium']/classification1['Medium']\n\tif classification1['Bold']!=0:\n\t\tavg1['Bold'] = avg1['Bold']/classification1['Bold']\n\tif classification1['Round']!=0:\n\t\tavg1['Round'] = avg1['Round']/classification1['Round']\n\tcv2.imwrite(\"./assets/img1.jpg\", img)\n\tcv2.imwrite(\"./assets/binary1.jpg\", binary)\n\tcv2.imwrite(\"./assets/dst1.jpg\", dst)\n\tcv2.imwrite(\"./assets/erosion1.jpg\", erosion)\n\tcv2.imwrite(\"./assets/dilation1.jpg\", dilation)\n\tcv2.imwrite(\"./assets/edges1.jpg\", edges)\n\treturn classification1,avg1,avg_ar1\n\ndef get_image(path):\n\timg = Image.open(path)\n\t#Constants\n\timg_width = 710\n\timg_height = 550\n\tscale_factor = 0.5\n\tfig = go.Figure()\n\tfig.add_trace(\n\t\tgo.Scatter(\n\t\t\tx=[0, img_width * scale_factor],\n\t\t\ty=[0, img_height * scale_factor],\n\t\t\tmode=\"markers\",\n\t\t\tmarker_opacity=0\n\t\t)\n\t)\n\tfig.update_xaxes(\n\t\tvisible=False,\n\t\trange=[0, img_width * scale_factor]\n\t)\n\tfig.update_yaxes(\n\t\tvisible=False,\n\t\trange=[0, img_height * scale_factor],\n\t\tscaleanchor=\"x\"\n\t)\n\tfig.add_layout_image(\n\t\tdict(\n\t\t\tx=0,\n\t\t\tsizex=img_width * scale_factor,\n\t\t\ty=img_height * scale_factor,\n\t\t\tsizey=img_height * scale_factor,\n\t\t\txref=\"x\",\n\t\t\tyref=\"y\",\n\t\t\topacity=1.0,\n\t\t\tlayer=\"below\",\n\t\t\tsizing=\"stretch\",\n\t\t\tsource=img)\n\t)\n\tfig.update_layout(\n\t\twidth=img_width * scale_factor,\n\t\theight=img_height * scale_factor,\n\t\tmargin={\"l\": 0, \"r\": 0, \"t\": 0, \"b\": 0},\n\t)\n\t# fig.show(config={'doubleClick': 'reset'})\n\treturn fig\n\ndef get_plot1(classification = classification, avg = avg, avg_ar = avg_ar):\n\tfig = subplots.make_subplots(rows=1,cols=1,specs=[[{\"type\":\"bar\"}]], shared_xaxes=True)\n\t#print(list(classification.keys()))\n\t#print(list(classification.values()))\n\tplot1 = go.Bar(x=list(classification.keys()), y=list(classification.values()), name=\"Particles\")\n\tplot2 = go.Bar(x=list(avg.keys()), y=list(avg.values()), name=\"Avg. Aspect Ratio\")\n\tfig.add_trace(plot1,1,1)\n\tfig.add_trace(plot2,1,1)\n\tfig.add_shape(\n\t\ttype=\"line\",\n\t\tx0=0,\n\t\ty0=round(avg_ar,2),\n\t\tx1=5,\n\t\ty1=round(avg_ar,2),\n\t\tline=dict(\n\t\t\tcolor=\"LightSeaGreen\",\n\t\t\twidth=4,\n\t\t\tdash=\"dashdot\",\n\t\t),\n\t)\n\tfig.update_layout(\n\t\twidth = 600,\n\t\theight = 350,\n\t\tmargin = {\"l\": 5, \"r\": 5, \"t\": 30, \"b\": 5},\n\t\ttitle = \"Average Aspect Ratio Vs Classification\",\n\t\ttemplate = \"plotly_dark\"\n\t)\n\treturn fig\n\ndef get_plot2(classification = classification):\n\tfig = subplots.make_subplots(rows=1,cols=1,specs=[[{\"type\":\"pie\"}]])\n\trice = sum(list(classification.values())) - classification['Dust']\n\tdust = classification['Dust']\n\tvalues = [rice, dust]\n\tlabels = [\"Rice\", \"Dust\"]\n\tplot1 = go.Pie(labels=labels, values=values, hole=.3)\n\tfig.add_trace(plot1,1,1)\n\tfig.update_layout(\n\t\twidth = 600,\n\t\theight = 350,\n\t\tmargin = {\"l\": 65, \"r\": 5, \"t\": 60, \"b\": 50},\n\t\ttitle = \"Quality Analysis\",\n\t\ttemplate = \"plotly_dark\"\n\t)\n\treturn fig\n\napp.layout = html.Div([\n\thtml.Div([\n\t\thtml.Div([\n\t\t\t# html.Img(\n\t\t\t# \tsrc=\"/assets/logo.jpg\",\n\t\t\t# \tstyle={\"height\" : \"40px\", \"width\" : \"40px\", \"border-radius\":\"20px\"}\n\t\t\t# )\n\t\t],style={\"float\":\"left\",\"padding\" : \"5px 0 5px 50px\"}),\n\t\thtml.Div(\n\t\t\tchildren=\"Rice quality analysis using Image Processing\",\n\t\t\tstyle={\"float\":\"left\",\"padding\" : \"10px 0 10px 10px\",\"font-size\": \"20px\", \"font-weight\" :\"600\", \"font-family\" : \"Noto Sans\", \"text-align\": \"center\"}\n\t\t),\n\t\thtml.Div([\n\t\t\thtml.Div([html.A(\"Home\",href=\"#home\")], style={\"float\":\"left\",\"padding\":\"0 10px 0 10px\",\"align-items\": \"center\",\"font-size\": \"15px\", \"font-weight\" :\"600\"}),\n\t\t\thtml.Div([html.A(\"About Project\",href=\"#about-project\")], style={\"float\":\"left\",\"padding\":\"0 10px 0 10px\",\"align-items\": \"center\",\"font-size\": \"15px\", \"font-weight\" :\"600\"}),\n\t\t\thtml.Div([html.A(\"About Us\",href=\"#about-us\")], style={\"float\":\"left\",\"padding\":\"0 10px 0 10px\",\"align-items\": \"center\",\"font-size\": \"15px\", \"font-weight\" :\"600\"}),\n\t\t\thtml.Div([html.A(\"Source Code\",href=\"#bottom\")], style={\"float\":\"left\",\"padding\":\"0 10px 0 10px\",\"align-items\": \"center\",\"font-size\": \"15px\", \"font-weight\" :\"600\"}),\n\t\t],style={\"float\":\"right\", \"padding\": \"10px 50px 10px 0px\"})\n\t],className=\"nav\"),\n\thtml.Div([],style={\"height\":\"50px\"},id=\"home\"),\n\thtml.Div([\n\t\thtml.Div([\n\t\t\tdcc.Upload([\n\t\t\t\t\t'Drag and Drop or ',\n\t\t\t\t\thtml.A('Select a File')\n\t\t\t\t],\n\t\t\t\tstyle={\n\t\t\t\t\t# 'position': 'absolute',\n\t\t\t\t\t'width': '100%',\n\t\t\t\t\t'height': '60px',\n\t\t\t\t\t'lineHeight': '60px',\n\t\t\t\t\t'borderWidth': '1px',\n\t\t\t\t\t#'borderStyle': 'dashed',\n\t\t\t\t\t'borderRadius': '5px',\n\t\t\t\t\t'text-align': 'center', \"align-items\" : \"center\"\n\t\t\t\t }, id=\"upload-image\")\n\t\t], style={\"text-align\" : \"center\",'width': 'auto', 'margin': 'auto'}),\n\t\t# dcc.Upload([\n\t\t# \t\t\t'Drag and Drop or ',\n\t\t# \t\t\thtml.A('Select a File')\n\t\t# \t\t],\n\t\t# \t\tstyle={\n\t\t# \t\t\t# 'position': 'absolute',\n\t\t# \t\t\t'width': '50%',\n\t\t# \t\t\t'height': '60px',\n\t\t# \t\t\t'lineHeight': '60px',\n\t\t# \t\t\t'borderWidth': '1px',\n\t\t# \t\t\t'borderStyle': 'dashed',\n\t\t# \t\t\t'borderRadius': '5px',\n\t\t# \t\t\t'textAlign': 'center', \"align-items\" : \"center\"\n\t\t# \t\t }, id=\"upload-image\"),\n\t\t# html.H1(children=\"Visualisation of Results\", style={\"text-align\":\"center\", \"margin\":\"0\", \"padding-bottom\" : \"20px\", \"color\" : \"black\"}),\n\t\thtml.Div([\n\t\t\thtml.H1(children=\"Images\", style={\"text-align\":\"center\", \"margin\":\"0\", \"padding-bottom\" : \"20px\"}),\n\t\t\t\thtml.Div([\n\t\t\t\t\thtml.Div([\n\t\t\t\t\t\tdcc.Graph(figure=get_image(\"./assets/img.jpg\"),id=\"img\"),\n\t\t\t\t\t\thtml.P(\"Original Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t\t\t\thtml.Div([\n\t\t\t\t\t\tdcc.Graph(figure=get_image(\"./assets/binary.jpg\"),id=\"binary\"),\n\t\t\t\t\t\thtml.P(\"Binary Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t\t\t\thtml.Div([\n\t\t\t\t\t\tdcc.Graph(figure=get_image(\"./assets/dst.jpg\"),id=\"dst\"),\n\t\t\t\t\t\thtml.P(\"Destination Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"})\n\t\t\t\t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"}),\n\t\t\t\thtml.Div([]),\n\t\t\t\thtml.Div([\n\t\t\t\t\thtml.Div([\n\t\t\t\t\t\tdcc.Graph(figure=get_image(\"./assets/erosion.jpg\"),id=\"erosion\"),\n\t\t\t\t\t\thtml.P(\"Erosion\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t\t\t\thtml.Div([\n\t\t\t\t\t\tdcc.Graph(figure=get_image(\"./assets/dilation.jpg\"),id=\"dilation\"),\n\t\t\t\t\t\thtml.P(\"Dilation\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t\t\t\thtml.Div([\n\t\t\t\t\t\tdcc.Graph(figure=get_image(\"./assets/edges.jpg\"),id=\"edges\"),\n\t\t\t\t\t\thtml.P(\"Edge Detection\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"})\n\t\t\t\t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"})\n\t\t\t],style = {\"color\":\"black\", \"background-color\" : \"burlywood\", \"border-radius\":\"40px 40px 40px 40px\", \"padding\" : \"20px 0 20px 0\"},id='images'),\n\t\t# html.Div([\n\t\t# \thtml.Div([\n\t\t# \t\tdcc.Graph(figure=get_plot1(),id=\"graph1\"),\n\t\t# \t\thtml.P(\"Original Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t# \t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t# \thtml.Div([\n\t\t# \t\tdcc.Graph(figure=get_plot2(),id=\"graph2\"),\n\t\t# \t\thtml.P(\"Binary Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t# \t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t# ], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"}),\n\t\thtml.Div([]),\n\t\thtml.Div([\n\t\t\thtml.Div([\n\t\t\t\t html.H1(children=\"Visualisation of Results\", style={\"text-align\":\"center\", \"margin\":\"0\", \"padding-bottom\" : \"20px\", \"color\" : \"black\"}),\n\t\t\t\t# dcc.Upload([\n\t\t\t\t# \t'Drag and Drop or ',\n\t\t\t\t# \thtml.A('Select a File')\n\t\t\t\t# ],\n\t\t\t\t# style={\n\t\t\t\t# \t# 'position': 'absolute',\n\t\t\t\t# \t'width': '100%',\n\t\t\t\t# \t'height': '60px',\n\t\t\t\t# \t'lineHeight': '60px',\n\t\t\t\t# \t'borderWidth': '1px',\n\t\t\t\t# \t'borderStyle': 'dashed',\n\t\t\t\t# \t'borderRadius': '5px',\n\t\t\t\t# \t'textAlign': 'center'\n\t\t\t\t# }, id=\"upload-image\"),\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\", \"width\" : \"100%\"})\n\t],style = {\"color\":\"black\", \"padding\" : \"20px 0 20px 0\", \"color\" : \"whitesmoke\",\"background-color\" : \"lightcoral\"},id='plots'),\n\thtml.Div([\n\t\t\thtml.Div([\n\t\t\t\tdcc.Graph(figure=get_plot1(),id=\"graph1\"),\n\t\t\t\thtml.P(\"Original Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t\thtml.Div([\n\t\t\t\tdcc.Graph(figure=get_plot2(),id=\"graph2\"),\n\t\t\t\thtml.P(\"Binary Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t\t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"}),\n\t# html.Div([\n\t# \thtml.H1(children=\"Images\", style={\"text-align\":\"center\", \"margin\":\"0\", \"padding-bottom\" : \"20px\"}),\n\t# \thtml.Div([\n\t# \t\thtml.Div([\n\t# \t\t\tdcc.Graph(figure=get_image(\"./assets/img.jpg\"),id=\"img\"),\n\t# \t\t\thtml.P(\"Original Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t# \t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t# \t\thtml.Div([\n\t# \t\t\tdcc.Graph(figure=get_image(\"./assets/binary.jpg\"),id=\"binary\"),\n\t# \t\t\thtml.P(\"Binary Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t# \t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t# \t\thtml.Div([\n\t# \t\t\tdcc.Graph(figure=get_image(\"./assets/dst.jpg\"),id=\"dst\"),\n\t# \t\t\thtml.P(\"Destination Image\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t# \t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"})\n\t# \t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"}),\n\t# \thtml.Div([]),\n\t# \thtml.Div([\n\t# \t\thtml.Div([\n\t# \t\t\tdcc.Graph(figure=get_image(\"./assets/erosion.jpg\"),id=\"erosion\"),\n\t# \t\t\thtml.P(\"Erosion\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t# \t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t# \t\thtml.Div([\n\t# \t\t\tdcc.Graph(figure=get_image(\"./assets/dilation.jpg\"),id=\"dilation\"),\n\t# \t\t\thtml.P(\"Dilation\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t# \t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"}),\n\t# \t\thtml.Div([\n\t# \t\t\tdcc.Graph(figure=get_image(\"./assets/edges.jpg\"),id=\"edges\"),\n\t# \t\t\thtml.P(\"Edge Detection\", style={\"margin\":\"0\",\"padding-bottom\":\"10px\"})\n\t# \t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 20px 0 20px\"})\n\t# \t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"})\n\t# ],style = {\"color\":\"black\", \"background-color\" : \"burlywood\", \"border-radius\":\"40px 40px 40px 40px\", \"padding\" : \"20px 0 20px 0\"},id='images'),\n\thtml.Div([\n\t\thtml.H1(children=\"About Project\", style={\"text-align\":\"center\"}),\n\t\thtml.P(children=text1),\n\t\thtml.P(children=text2),\n\t\thtml.P(children=text3),\n\t\thtml.P(children=text4),\n\t\thtml.P(children=text5),\n\t\thtml.P(children=text6),\n\t],style = {\"color\":\"white\", \"padding\":\"10px 50px 10px 50px\",\"background-color\" : \"aqua\"},id=\"about-project\"),\n\thtml.Div([\n\t\thtml.H1(children=\"About Us\", style={\"text-align\":\"center\", \"margin\":\"0\", \"padding-bottom\" : \"20px\"}),\n\t\thtml.Div([\n\t\t\thtml.Div([\n\t\t\t\thtml.Img(src=\"/assets/ramu.jpg\", style={\"height\":\"120px\",\"height\":\"120px\",\"border-radius\":\"60px\"}),\n\t\t\t\thtml.P(\"Adarsh\", style={\"margin\":\"0\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 50px 0 50px\"}),\n\t\t\thtml.Div([\n\t\t\t\thtml.Img(src=\"/assets/kishore.jpg\", style={\"height\":\"120px\",\"height\":\"120px\",\"border-radius\":\"60px\"}),\n\t\t\t\thtml.P(\"Nakshatra\", style={\"margin\":\"0\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 50px 0 50px\"}),\n\t\t\thtml.Div([\n\t\t\t\thtml.Img(src=\"/assets/sart.jpg\", style={\"height\":\"120px\",\"height\":\"120px\",\"border-radius\":\"60px\"}),\n\t\t\t\thtml.P(\"Yashashwi\", style={\"margin\":\"0\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 50px 0 50px\"})\n\t\t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"}),\n\t\thtml.Div([]),\n\t\thtml.Div([\n\t\t\thtml.Div([\n\t\t\t\thtml.Img(src=\"/assets/yash.jpg\", style={\"height\":\"120px\",\"height\":\"120px\",\"border-radius\":\"60px\"}),\n\t\t\t\thtml.P(\"Pranjal\", style={\"margin\":\"0\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 50px 0 50px\"}),\n\t\t\thtml.Div([\n\t\t\t\thtml.Img(src=\"/assets/gaut.jpg\", style={\"height\":\"120px\",\"height\":\"120px\",\"border-radius\":\"60px\"}),\n\t\t\t\thtml.P(\"Sharwari\", style={\"margin\":\"0\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 50px 0 50px\"}),\n\t\t\thtml.Div([\n\t\t\t\thtml.Img(src=\"/assets/gaut.jpg\", style={\"height\":\"120px\",\"height\":\"120px\",\"border-radius\":\"60px\"}),\n\t\t\t\thtml.P(\"Shrushti\", style={\"margin\":\"0\"})\n\t\t\t], style = {\"display\": \"block\", \"justify-content\": \"center\", \"align-items\": \"center\", \"padding\":\"0 50px 0 50px\"})\n\t\t], style = {\"display\": \"flex\", \"justify-content\": \"center\", \"align-items\": \"center\", \"text-align\":\"center\"})\n\t],style = {\"color\":\"black\", \"background-color\" : \"lightsteelblue\", \"border-radius\":\"40px 40px 0 0\", \"padding\" : \"20px 0 20px 0\"},id='about-us'),\n\thtml.Div([\n\t\thtml.Div([\n\t\t\thtml.Div([html.A(\"Github\",href=\"https://www.google.com/\")], style={\"padding\":\"0 10px 0 10px\",\"align-items\": \"center\",\"font-size\": \"15px\", \"font-weight\" :\"600\"}),\n\t\t],style={\"padding\": \"10px 50px 10px 0px\", \"height\": \"50px\",\"align-items\": \"center\",\"background-color\": \"black\",\"color\": \"whitesmoke\",\"width\": \"100%\",\"list-style-type\": \"none\",\"margin\": \"0\",\"overflow\": \"hidden\",\"text-align\": \"center\",\"right\": \"0\",\"bottom\": \"0\",\"left\": \"0\",\"clear\": \"both\",\"position\": \"relative\"})\n\t],className=\"foo\",id=\"bottom\")\n])\n\ndef parse_contents(contents, filename):\n\tprint(contents)\n\n@app.callback([Output('img', 'figure'),\n\t\t\t Output('binary', 'figure'),\n\t\t\t Output('dst', 'figure'),\n\t\t\t Output('erosion', 'figure'),\n\t\t\t Output('dilation', 'figure'),\n\t\t\t Output('edges', 'figure'),\n\t\t\t Output('graph1', 'figure'),\n\t\t\t Output('graph2', 'figure')],\n\t\t\t [Input('upload-image', 'contents')])\ndef update_output(list_of_contents):\n\tif list_of_contents is not None:\n\t\tind = str(list_of_contents).find(\",\")\n\t\tcla,av,av_ar = update_image(list_of_contents[ind:])\n\t\treturn get_image(\"./assets/img1.jpg\"), get_image(\"./assets/binary1.jpg\"), get_image(\"./assets/dst1.jpg\"), get_image(\"./assets/erosion1.jpg\"), get_image(\"./assets/dilation1.jpg\"), get_image(\"./assets/edges1.jpg\"), get_plot1(cla, av, av_ar), get_plot2(cla)\n\telse:\n\t\treturn get_image(\"./assets/img.jpg\"), get_image(\"./assets/binary.jpg\"), get_image(\"./assets/dst.jpg\"), get_image(\"./assets/erosion.jpg\"), get_image(\"./assets/dilation.jpg\"), get_image(\"./assets/edges.jpg\"), get_plot1(), get_plot2()\n\nif __name__ == '__main__':\n\tapp.run_server(debug=False)\n\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":21005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"30667835","text":"import pandas as pd\nimport openpyxl\nimport csv\nimport os\n\nwb = openpyxl.load_workbook('good_images_cat.xlsx')\nsheetNames = wb.get_sheet_names()\n\nfullDataForDownload = []\n\nsymbols = (u\"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\",\n u\"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA\")\n\ntr = {ord(a):ord(b) for a, b in zip(*symbols)}\n\nfor sheetName in sheetNames:\n data = pd.read_excel('good_images_cat.xlsx', sheet_name=sheetName)\n sheetName = sheetName.translate(tr)\n for i in range(0,data.shape[0]):\n fullDataForDownload.append({'category':sheetName,'id':data.iloc[i][1],'gkId':data.iloc[i][2]})\n print ('Finished ',sheetName)\n\nkeys = fullDataForDownload[0].keys()\nprint (keys)\nwith open('downloadList.csv', 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(fullDataForDownload)","sub_path":"createDownloadList.py","file_name":"createDownloadList.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"45647480","text":"# ## MASTER YODA: Given a sentence, return a sentence with the words reversed\n# master_yoda('I am home') --> 'home am I'\n# master_yoda('We are ready') --> 'ready are We'\n# Note: The .join() method may be useful here. \n# The .join() method allows you to join together strings in a list with some connector string. \n# For example, some uses of the .join() method:\n\n# >>> \"--\".join(['a','b','c'])\n# >>> 'a--b--c'\n# This means if you had a list of words you wanted to turn back into a sentence, you could just join them with a single space string:\n\n# >>> \" \".join(['Hello','world'])\n# >>> \"Hello world\"\n\ndef master_yoda(text):\n text = text.split()\n text = text[::-1]\n text = ' '.join(text)\n return text\n\n# test\n\nprint(master_yoda('I am home'))\nprint(master_yoda('We are ready'))\nprint(master_yoda('Hello world'))\n","sub_path":"LEVEL_1_ PROBLEMS-MASTER_YODA.py","file_name":"LEVEL_1_ PROBLEMS-MASTER_YODA.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"324934989","text":"\"\"\"\nThis is the toy example problem in the SNOPT documentation.\n\n min (x1 + x2 + x3)**2 + 3x3 + 5x4\n st.\n x3 >= 0, x4 >= 0\n x1**2 + x2**2 + x3 = 2\n x2**4 + x4 = 4\n 2x1 + 4x2 >= 0\n\n\n The linear part of the objective 3x3 + 5x4 is given\n as part of the constraints:\n\n min (x1 + x2 + x3)**2 + e_4\n st.\n x3 >= 0, x4 >= 0\n x1**2 + x2**2 + x3 = 2\n x2**4 + x4 = 4\n 2x1 + 4x2 >= 0\n inf > 3x3 + 5x4 > -inf\n\n The Jacobian matrix is\n [2*x1 2*x2 1.0 0]\n [0 4*x3 0 1.0]\n [2.0 4.0 0 0]\n [0.0 0.0 3.0 5.0]\n with\n iObj = 4 (indicating the linear objective term)\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom optimize.snopt7 import SNOPT_solver\n\ndef toycon(mode,nnCon,nnJac,neJac,x,nState,cu,iu,ru):\n # Nonlinear terms of the gradient only\n fCon = np.zeros(nnCon,float)\n if mode == 0 or mode == 2:\n fCon[0] = x[0]**2 + x[1]**2\n fCon[1] = x[1]**4\n\n gCon = np.zeros(neJac,float)\n if mode >= 1:\n gCon[0] = 2.0*x[0]\n gCon[1] = 2.0*x[1]\n gCon[2] = 4.0*x[1]**3\n\n return mode, fCon, gCon\n\n\ndef toyobj(mode,nnObj,x,nState,cu,iu,ru):\n sum = x[0] + x[1] + x[2]\n # Nonlinear objective term only\n fObj = 0.0\n if mode == 0 or mode == 2:\n fObj = sum**2\n\n gObj = np.zeros(nnObj,float)\n if mode == 1 or mode == 2:\n gObj[0] = 2.0*sum\n gObj[1] = 2.0*sum\n gObj[2] = 2.0*sum\n\n return mode, fObj, gObj\n\n\n\nsnoptb = SNOPT_solver()\ninf = 1.0e+20\n\nsnoptb.setOption('Infinite bound',inf)\nsnoptb.setOption('Print file','sntoyb.out')\n\nm = 4\nn = 4\n\nnnCon = 2\nnnJac = 2\nnnObj = 3\n\n# J contains the sparsity pattern of the Jacobian matrix.\n# For nonlinear elements, enter any nonzero number (in this case 100).\n# Linear elements must be correctly defined.\nJ = np.array([ [100.0, 100.0, 1.0, 0],\n [0 , 100.0, 0, 1.0],\n [2.0 , 4.0, 0, 0],\n [0.0 , 0.0, 3.0, 5.0]])\n\n# Alternatively, the user can provide the sparsity pattern in\n# sparse-by-column format. Here, indJ contains the row indices,\n# locJ are the column pointers, and J contains the matrix values.\n#\n# indJ = np.array([0,2,0,1,2,0,3,1,3],int)\n# locJ = np.array([0,2,5,7,9],int)\n# J = np.array([100.0, 2.0, 100.0, 100.0, 4.0, 1.0, 3.0, 1.0, 5.0],float)\n\n\nbl = -inf*np.ones(n+m)\nbu = inf*np.ones(n+m)\n\nbl[2] = 0.0\nbl[3] = 0.0\n\nbl[4] = 2.0\nbu[4] = 2.0\n\nbl[5] = 4.0\nbu[5] = 4.0\n\nbl[6] = 0.0\n\niObj = 4\n\nsnoptb.setOption('Verbose',True)\n\nnames = np.array(['12345678']*(n+m))\n\nsnoptb.snoptb(name=' sntoyb',m=m,n=n,nnCon=nnCon,nnObj=nnObj,nnJac=nnJac,iObj=iObj,\\\n bl=bl,bu=bu,J=J,funcon=toycon,funobj=toyobj,Names=names)\n\n","sub_path":"examples/sntoyb.py","file_name":"sntoyb.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650175367","text":"from typing import Any, Dict, Optional, Union\n\nfrom sqlalchemy.orm import Session\n\nfrom app.crud.base import CRUDBase\nfrom app.crud import user\nfrom app.models import Project\nfrom app.schemas.project_schemas import ProjectCreate, ProjectUpdate\n\n\nclass CRUDProject(CRUDBase[Project, ProjectCreate, ProjectUpdate]):\n def get_by_id(self, db: Session, *, id: int) -> Optional[Project]:\n return db.query(Project).filter_by(id=id).first()\n\n def create(self, db: Session, *, obj_in: ProjectCreate) -> Project:\n db_obj = Project()\n db_obj.name = obj_in.name\n db_obj.description = obj_in.description\n db_obj.status = obj_in.status\n db.add(db_obj)\n\n user_list = obj_in.user_list\n users = user.get_all_users(db, list_id=user_list)\n print(users)\n if users:\n db_obj.users = users\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def update(\n self,\n db: Session,\n *,\n db_obj: Project,\n obj_in: Union[ProjectUpdate, Dict[str, Any]]\n ) -> Project:\n if isinstance(obj_in, dict):\n update_data = obj_in\n else:\n update_data = obj_in.dict(exclude_unset=True)\n if update_data['user_list']:\n del update_data['user_list']\n return super().update(db, db_obj=db_obj, obj_in=update_data)\n\n\nproject = CRUDProject(Project)\n","sub_path":"app/crud/crud_project.py","file_name":"crud_project.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"524243752","text":"from django.conf.urls import url\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\napp_name = \"myapp\"\n\nurlpatterns = [\n # fmt: off\n url(r\"^$\", RedirectView.as_view(pattern_name=\"myapp:bootstrap3.custom-form\", permanent=False)),\n url(\"bootstrap3/custom-form.html\", views.Bootstrap3_CustomFormView.as_view(), name=\"bootstrap3.custom-form\"),\n url(r\"^bootstrap3/model-form-(?P[0-9]+).html$\", views.Bootstrap3_UpdateView.as_view(), name=\"bootstrap3.model-form-1\"),\n url(\"bootstrap4/custom-form.html\", views.Bootstrap4_CustomFormView.as_view(), name=\"bootstrap4.custom-form\"),\n url(r\"^bootstrap4/model-form-(?P[0-9]+).html$\", views.Bootstrap4_UpdateView.as_view(), name=\"bootstrap4.model-form-1\"),\n]\n","sub_path":"dev/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"280570483","text":"import math\nimport numpy as np\nfrom scipy.ndimage.morphology import *\nimport PIL\nfrom info import Info\nfrom scipy import misc\n\nclass Node:\n def __init__(self, pic, name, rotate, array, picarray, maxdimensions):\n self.pic = pic\n self.name=name\n self.rotate=rotate\n self.array=array\n self.picarray=picarray\n self.globalpiccombo=None\n self.globaldirection=None\n self.globalnode=None\n self.globalnewarray=None\n self.Evilnode=False\n self.bestpicarray=None\n self.bestpicarraytemp=None\n self.x=int(maxdimensions[0])\n self.y=int(maxdimensions[1])\n self.secondbestnode=None\n self.secondbestnodescore=None\n self.bestNode=None\n self.mapofnodeandedges={}\n self.haschanged=True\n self.degree=0\n self.combos=[]\n\n\n def scoreforleft(self, pic1, pic2, node2, location, location2): #as in pic2 terms\n score=0\n x=pic1.shape[0]\n i=0\n if (node2,self,\"left\", location, location2) in self.mapofnodeandedges:\n return self.mapofnodeandedges[(node2,self,\"left\", location, location2)]\n try:\n while True:\n tuple=abs(pic1[i,x-1]-pic2[i,0])\n r=tuple[0].astype(int)\n g=tuple[1].astype(int)\n b=tuple[2].astype(int)\n score+=math.sqrt((r*r)+(g*g)+(b*b))\n i+=1\n except IndexError: \n pass\n #if (node2,self,\"left\",location, location2) in self.mapofnodeandedges: \n \n #if self.mapofnodeandedges[(node2,self,\"left\",location, location2)]!=int (round(score)): \n # print self.mapofnodeandedges[(node2,self,\"left\",location, location2)] \n # print \"and what It should be \"\n # print int(round(score)) \n #print (node2,self,\"left\",location, location2) \n self.mapofnodeandedges[(node2,self,\"left\", location, location2)] =int (round(score))\n return int(round(score))\n\n def scoreforright(self, pic1, pic2, node2, location, location2): #as in pic 2 bottom\n score=0\n bottomsize=0\n x=pic1.shape[0]\n i=0\n if (node2,self,\"right\", location, location2) in self.mapofnodeandedges:\n return self.mapofnodeandedges[(node2,self,\"right\", location, location2)]\n try:\n while True: \n tuple=abs(pic1[i,0]-pic2[i,x-1])\n r=tuple[0].astype(int)\n g=tuple[1].astype(int)\n b=tuple[2].astype(int)\n score+=math.sqrt((r*r)+(g*g)+(b*b)) \n i+=1\n except IndexError:\n pass \n #if (node2,self,\"right\",location, location2) in self.mapofnodeandedges: \n \n #if self.mapofnodeandedges[(node2,self,\"right\",location, location2)]!=int (round(score)): \n # print self.mapofnodeandedges[(node2,self,\"right\",location, location2)] \n #print \"and what It should be \"\n #print int(round(score)) \n #print (node2,self,\"right\",location, location2) \n\n self.mapofnodeandedges[(node2,self,\"right\", location, location2)] =int (round(score)) \n return int(round(score))\n\n def scorefortop(self, pic1, pic2, node2, location, location2):\n score=0\n x=pic1.shape[0]\n i=0\n if (node2,self,\"top\",location, location2) in self.mapofnodeandedges:\n return self.mapofnodeandedges[(node2,self,\"top\", location, location2)] \n try:\n while True:\n tuple=abs(pic1[x-1,i]-pic2[0,i])\n r=tuple[0].astype(int)\n g=tuple[1].astype(int)\n b=tuple[2].astype(int)\n score+=math.sqrt((r*r)+(g*g)+(b*b))\n i+=1\n except IndexError:\n pass \n #if (node2,self,\"top\",location, location2) in self.mapofnodeandedges :\n \n # if self.mapofnodeandedges[(node2,self,\"top\",location, location2)]!=int (round(score)): \n # print self.mapofnodeandedges[(node2,self,\"top\",location, location2)] \n # print \"and what It should be \"\n # print int(round(score)) \n #print (node2,self,\"top\",location, location2) \n self.mapofnodeandedges[(node2,self,\"top\", location, location2)] =int (round(score)) \n return int(round(score))\n\n def scoreforbottom(self, pic1, pic2, node2,location, location2): #as in pic 2's right\n score=0\n x=0\n i=0\n x=pic1.shape[0]\n if (node2,self,\"bottom\", location, location2) in self.mapofnodeandedges:\n return self.mapofnodeandedges[(node2,self,\"bottom\", location,location2)] \n try:\n while True:\n tuple=abs(pic1[0,i]-pic2[x-1,i])\n r=tuple[0].astype(int)\n g=tuple[1].astype(int)\n b=tuple[2].astype(int)\n score+=math.sqrt((r*r)+(g*g)+(b*b))\n i+=1\n except IndexError:\n pass\n #if (node2,self,\"bottom\",location, location2) in self.mapofnodeandedges :\n \n #if self.mapofnodeandedges[(node2,self,\"bottom\",location, location2)]!=int (round(score)): \n #print self.mapofnodeandedges[(node2,self,\"bottom\",location, location2)] \n #print \"and what It should be \"\n #print int(round(score)) \n #print (node2,self,\"bottom\",location,location2) \n self.mapofnodeandedges[(node2,self,\"bottom\",location, location2)] =int (round(score)) \n return int(round(score))\n\n def getscore(self, pair1, pair2, nodearray1, nodearray2, direction, node2, r, c, location):\n piece1=self.picarray\n piece2=node2.picarray\n h1 = piece1.shape[0]# the x and y of the pieces\n w1 = piece1.shape[1]\n h2 = piece2.shape[0]\n w2 = piece2.shape[1]\n oldpadded1=np.zeros( (h1+2*h2,w1+2*w2), dtype=\"object\" )\n padded1 = np.zeros( (h1+2*h2,w1+2*w2), dtype=\"object\" )\n padded1[h2:(h2+h1),w2:(w2+w1)] = piece1\n temp=np.zeros( (h1+2*h2,w1+2*w2), dtype=\"object\" )\n temp[r:(h2+r),c:(w2+c)]=piece2\n distancex=0\n distancey=0\n stuff=temp.nonzero()\n for y in range(0, len(stuff[0])):\n storeing=stuff[0][y],stuff[1][y]\n distancex=storeing[0]-pair2[0]\n distancey=storeing[1]-pair2[1]\n padded1[pair2[0]+distancex][pair2[1]+distancey]=temp[storeing[0], storeing[1]]\n if temp[pair2[0], pair2[1]]==0 or padded1[pair2[0], pair2[1]]==0:\n raise ValueError('SOMETHING WENT WRONG')\n if direction==\"right\":\n self.bestpicarraytemp=padded1\n self.globaldirection=\"right\"\n self.globalnode=node2 \n return self.scoreforright(padded1[pair1[0], pair1[1]].pic, temp[pair2[0], pair2[1]].pic, node2, temp[pair2[0], pair2[1]].name,padded1[pair1[0], pair1[1]].name )\n elif direction==\"left\":\n self.globaldirection=\"left\"\n self.globalnode=node2 \n self.bestpicarraytemp=padded1\n return self.scoreforleft(padded1[pair1[0], pair1[1]].pic, temp[pair2[0], pair2[1]].pic,node2, temp[pair2[0], pair2[1]].name, padded1[pair1[0], pair1[1]].name)\n elif direction==\"down\":\n self.globaldirection=\"down\"\n self.globalnode=node2\n self.bestpicarraytemp=padded1\n return self.scoreforbottom(padded1[pair1[0], pair1[1]].pic, temp[pair2[0], pair2[1]].pic,node2, temp[pair2[0], pair2[1]].name, padded1[pair1[0], pair1[1]].name)\n elif direction==\"up\":\n self.globaldirection=\"up\"\n self.globalnode=node2\n self.bestpicarraytemp=padded1\n return self.scorefortop(padded1[pair1[0], pair1[1]].pic, temp[pair2[0], pair2[1]].pic,node2,temp[pair2[0], pair2[1]].name, padded1[pair1[0], pair1[1]].name)\n else:\n raise ValueError('INVALID DIRECTION')\n return None\n \t\n def checkforcompatibility(self, booleanarray):\n whattokeep=np.nonzero(booleanarray)\n smallestx1=min(np.nonzero(booleanarray)[1])\n smallesty1=min(np.nonzero(booleanarray)[0])\n biggestx1=max(np.nonzero(booleanarray)[1])\n biggesty1=max(np.nonzero(booleanarray)[0])\n biggest=biggestx1-smallestx1+1\n if(biggesty1-smallesty1+1>biggest):\n biggest=biggesty1-smallesty1+1\n storeing=np.zeros((biggest,biggest ), dtype=\"object\")\n for y in range(0, len(whattokeep[0])):\n pair=[whattokeep[0][y], whattokeep[1][y]]\n storeing[pair[0]-smallesty1] [pair[1]-smallestx1]=booleanarray[pair[0]] [ pair[1]]\n temp=storeing\n if temp.shape[0]>self.x or temp.shape[1]>self.y:\n return False\n return True \n\n def compare(self, node2):\n piece1=self.array\n piece2=node2.array\n h1 = piece1.shape[0]# the x and y of the pieces\n w1 = piece1.shape[1]\n h2 = piece2.shape[0]\n w2 = piece2.shape[1]\n padded1 = np.zeros( (h1+2*h2,w1+2*w2) )\n padded1[h2:(h2+h1),w2:(w2+w1)] = piece1\n dilation_mask = np.asarray( [[0,1,0], [1,1,1,], [0,1,0]] )\n result = binary_dilation(input=padded1,structure=dilation_mask)\n neighboring_connections = result - padded1\n smallestx1=min(np.nonzero(padded1)[1])\n smallesty1=min(np.nonzero(padded1)[0])\n biggestx1=max(np.nonzero(padded1)[1])\n biggesty1=max(np.nonzero(padded1)[0])\n bestscore=100000000000\n bestpic=None\n direction=None\n othernode=None\n newnodegraph=None \n for x in range(h1 + 2*h2 - (h2-1)):\n for y in range(w1 + 2*w2 - (w2-1)):\n pad_with_piece2 = np.zeros(neighboring_connections.shape)\n pad_with_piece2[x:(x+h2),y:(y+w2)] = piece2 \n connect_map = np.logical_and(neighboring_connections,pad_with_piece2)\n overlap_map = np.logical_and(padded1,pad_with_piece2)\n #print overlap_map\n has_connections = np.sum(connect_map[:]) > 0\n has_overlap = np.sum(overlap_map[:]) > 0\n score=10000000000\n newnodegraph=padded1+pad_with_piece2\n if has_connections and not has_overlap and self.checkforcompatibility(newnodegraph): #and pad_with_piece2containsthis: This is ruins it#so paddedwith 2 is the one that changes \n newnodegraph=padded1+pad_with_piece2\n store= np.nonzero(padded1) \n score=0 #in here something is messed up!!!!!\n for i in range(0,len(store[0])):\n temp=[store[0][i], store[1][i]] #these are the non zero pairs for padded 1 these are correct!\n numofcompar=0 \n if pad_with_piece2[temp[0]][temp[1]+1]==1:\n numofcompar+=1\n score+=(self.getscore(temp, (connect_map.nonzero()[0][0], connect_map.nonzero()[1][0]),padded1,pad_with_piece2,\"left\", node2, x, y, i)) #left of the first one\n if pad_with_piece2[temp[0]][temp[1]-1]==1:\n numofcompar+=1\n score+=(self.getscore(temp, (connect_map.nonzero()[0][0], connect_map.nonzero()[1][0]),padded1,pad_with_piece2,\"right\", node2, x, y, i)) #right of the first one\n if pad_with_piece2[temp[0]+1][temp[1]]==1:\n numofcompar+=1\n score+=(self.getscore(temp, (connect_map.nonzero()[0][0], connect_map.nonzero()[1][0]),padded1,pad_with_piece2,\"up\",node2, x, y,i))#down of the first one\n if pad_with_piece2[temp[0]-1][temp[1]]==1:\n numofcompar+=1\n score+=(self.getscore(temp, (connect_map.nonzero()[0][0], connect_map.nonzero()[1][0]),padded1,pad_with_piece2,\"down\",node2, x,y, i)) #up of the first one\n if numofcompar!=0:\n score=(score/numofcompar) \n score=score/(i+1)\n #new stuff hopefully it doesn't kill this program\n if self.globaldirection!=\"down\" and self.globaldirection!=\"right\":\n allvalues=Info()\n allvalues.secondbestnodescore=bestscore\n allvalues.picture=self.globalpiccombo\n allvalues.direction=self.globaldirection\n allvalues.bestNode=self\n allvalues.connectNode=self.globalnode\n allvalues.combo=newnodegraph\n allvalues.picarray=self.bestpicarraytemp\n allvalues.secondbestnodescore=bestscore\n allvalues.secondbestnode=self.bestNode\n allvalues.bestscore=score\n self.combos.append(allvalues)\n if score50K']))\r\n\r\n\r\n'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Python_Apps/P7_Random_Forest_ORNEK2.py","file_name":"P7_Random_Forest_ORNEK2.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69210213","text":"# https://adventofcode.com/2021/day/8\nimport sys\n\n# Decodes which combination of segments corresponds to which number\n# Note: This is probably too compliacted. I just looked at the numbers and wrote down how would i deduce them and it works\ndef decodeNumbersMapping(line):\n mapping = {}\n # Parse signals into sets and sort them by length for easy access by number of segments\n sets = list(map(lambda x: set(x), line.split(\" \")))\n sets.sort(key = lambda x: len(x))\n\n # Signals with only one possible mapping\n mapping[1] = sets[0]\n mapping[4] = sets[2]\n mapping[7] = sets[1]\n mapping[8] = sets[9]\n\n # Number 6 is 6-segments signal that also has common segments with both segments of number 1\n # This leaves only two possible 6-segment signals to be number 0 or 9\n if sets[0].intersection(sets[6]) != sets[0]:\n mapping[6] = sets[6]\n possible09 = [sets[7], sets[8]]\n elif sets[0].intersection(sets[7]) != sets[0]:\n mapping[6] = sets[7]\n possible09 = [sets[6], sets[8]]\n elif sets[0].intersection(sets[8]) != sets[0]:\n mapping[6] = sets[8]\n possible09 = [sets[6], sets[7]]\n\n # Number 3 is 5-segments signal that also has common segments with both segments of number 1\n # This leaves only two possible 5-segment signals to be number 2 or 5\n if sets[0].intersection(sets[3]) == sets[0]:\n mapping[3] = sets[3]\n possible25 = [sets[4], sets[5]]\n elif sets[0].intersection(sets[4]) == sets[0]:\n mapping[3] = sets[4]\n possible25 = [sets[3], sets[5]]\n elif sets[0].intersection(sets[5]) == sets[0]:\n mapping[3] = sets[5]\n possible25 = [sets[3], sets[4]]\n\n # Segments 'b' and 'e' are the only common segments of number 6 and 3\n be = mapping[6] - mapping[3]\n\n # Number 0 is signal that has both 'b' and 'e' segments. The other 6-segment one is number 9\n if be.intersection(possible09[0]) == be:\n mapping[0] = possible09[0]\n mapping[9] = possible09[1]\n else:\n mapping[0] = possible09[1]\n mapping[9] = possible09[0]\n\n # Number 2 is signal that has 2 segments in common with number 4. The other 5-segment one is 5\n if len(possible25[0].intersection(mapping[4])) == 2:\n mapping[2] = possible25[0]\n mapping[5] = possible25[1]\n else:\n mapping[2] = possible25[1]\n mapping[5] = possible25[0]\n\n return mapping\n\n# Puzzle PART ONE\n# Count how many numbers 1, 4, 7 or 8 are in the line (the ones that are recognizable just by number of segments)\ndef countEasyNumbers(segments):\n return len(list(filter(lambda num: len(num) in [2, 3, 4, 7], segments)))\n\n# Decode signal of one digit, base on mapping between numbers and signals\ndef decodeDigit(digit, mapping):\n for number, segments in mapping.items():\n if digit == segments:\n return str(number)\n \n# Decode number on one line by decoding its individual digits and concatenating them\ndef decodeNumber(segments, mapping):\n numStr = \"\".join(map(lambda digit: decodeDigit(digit, mapping), segments))\n \n return int(numStr)\n\n# MAIN\nfile = open(sys.argv[1], 'r')\ninput = file.readlines()\ncountEasy = 0\nsumTotal = 0\n\nfor line in input:\n line = line.strip().split(\" | \")\n mapping = decodeNumbersMapping(line[0])\n segments = list(map(lambda x: set(x), line[1].split(\" \")))\n \n countEasy += countEasyNumbers(segments)\n sumTotal += decodeNumber(segments, mapping)\n\nprint(f\"I: Number of easily decoded digits is {countEasy}\")\nprint(f\"II: Sum of all decoded numbers is {sumTotal}\")\n\nfile.close()","sub_path":"2021/Day08/SevenSegmentSearch.py","file_name":"SevenSegmentSearch.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70377842","text":"import os\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']\n).get_hosts('all')\n\n\ndef test_k3s_file(host):\n f = host.file('/etc/k3s.conf')\n\n assert f.exists\n assert f.user == 'root'\n assert f.group == 'root'\n\n\ndef test_k3s_is_installed(host):\n k3s = host.package(\"k3s\")\n assert k3s.is_installed\n\n\ndef test_k3s_running_and_enabled(host):\n k3s = host.service(\"k3sd\")\n assert k3s.is_running\n assert k3s.is_enabled\n","sub_path":"roles/k3s-all/molecule/default/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"389594027","text":"from mrjob.job import MRJob\nimport heapq as hp\nimport re\n\nclass FirstStep(MRJob):\n def mapper(self,_,line):\n lis=line.split(' ')\n for num in lis:\n yield (int(num),1)\n def reducer(self,word,values):\n yield ('red',(sum(values),word))\n \nclass SecondStep(MRJob):\n def reducer(self,key,records):\n self.heap1=[] \n for item in records:\n hp.heappush(self.heap1,(int(item[0]),int(item[1])))\n #print hp.nlargest(100,self.heap1)\n yield ('key',(hp.nlargest(100,self.heap1)))\n #yield ('key',self.heap1)\n \nclass ThirdStep(MRJob):\n def reducer(self,key,items):\n self.com_heap=[]\n for item in items:\n hp.heappush(self.com_heap,item)\n yield ('key2',hp.nlargest(100,self.com_heap)) \n\nclass SteppedJob(MRJob):\n def steps(self):\n return FirstStep().steps()+SecondStep().steps()+ThirdStep().steps()\n\nif __name__ == '__main__':\n SteppedJob.run()","sub_path":"my_mr1.py","file_name":"my_mr1.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632231148","text":"#cheacking errors by using try, except , else, finally , key words\n#ex multiple exception\n'''try:\n a=int(input('enter a value'))\n d=int(input('enter d value'))\n c=a/d\n print(c)\nexcept ZeroDivisionError as x:\n print('the error is in',x)\n##except TypeError as x:\n print('the error is in',x)\nexcept NameError as x:\n print('the error is',x)\nexcept ValueError as x:\n print('the error is in',x)\nfinally:\n print('thank you')'''\n#\n#\n#single exception in multiple errors\nn=int(input('enter any value'))\nm=int(input('enter a number'))\ntry:\n c=n+m\n print(c)\nexcept (ZeroDivisionError,NameError,TypeError,ValueError) as x:\n print('the error is in ',x)\nfinally:\n print('thank you')","sub_path":"CheckingErrors.py","file_name":"CheckingErrors.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"170260796","text":"# this slices up spec.md (from starlark-go) into multiple markdown files for use in qri docs\n# we may never use it again, but it was useful when first setting up these docs pages\n\nimport re\n\nwith open('./spec.md', 'r') as f:\n raw = f.read()\n sections = re.findall('(?:^|\\n)##\\s[^\\n]*\\n+.*?(?=\\n##?\\s|$)', raw, re.DOTALL)\n print(len(sections))\n\n for section in sections:\n # get the title\n section = section.strip()\n title = re.match('^##\\s(.+)', section).group(1) # get the h2 heading\n slug = title.lower().replace(' ', '-') # make snakecase\n\n print(title)\n lines = section.split('\\n')\n\n # create frontmatter\n frontmatter = \"\"\"---\nmetaTitle: \"{}\"\n---\"\"\".format(title)\n\n # remove first line\n section = '\\n'.join(lines[1:])\n\n # prepend frontmatter\n section = frontmatter + section\n\n writefile = open('../../content/docs/reference/starlark-language/{}.md'.format(slug), \"w\")\n writefile.write(section)\n writefile.close()\n","sub_path":"scripts/starlark-go-docs/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"446301452","text":"import requests\nimport bs4\nimport re\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\ndef send_email():\n sender = '810754420@qq.com'\n receivers = ['810754420@qq.com'] # 接收邮件\n \n # 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码\n message = MIMEText('可以查成绩啦~~~~~~~', 'plain', 'utf-8')\n message['From'] = Header(\"Python\", 'utf-8') # 发送者\n message['To'] = Header(\"落墨\", 'utf-8') # 接收者\n \n subject = '2019年4月自考成绩查询'\n message['Subject'] = Header(subject, 'utf-8')\n \n try:\n smtpObj = smtplib.SMTP('smtp.qq.com',25)\n smtpObj.login(sender,\"jigtnzntkipmbfgd\")\n smtpObj.sendmail(sender, receivers, message.as_string())\n print (\"邮件发送成功\")\n smtpObj.quit()\n except smtplib.SMTPException:\n print (\"Error: 无法发送邮件\")\n\ndef open_url(url):\n # 使用代理\n # proxies = {\"http\": \"127.0.0.1:1080\", \"https\": \"127.0.0.1:1080\"}\n headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}\n\n # res = requests.get(url, headers=headers, proxies=proxies)\n res = requests.get(url, headers=headers)\n\n return res\n\ndef query(size):\n\n while size == 7 :\n time.sleep(100)\n host = \"http://www.shmeea.edu.cn/page/24300/\"\n req = open_url(host)\n # 解决中文乱码\n if req.encoding == 'ISO-8859-1':\n encodings = requests.utils.get_encodings_from_content(req.text)\n if encodings:\n encoding = encodings[0]\n else:\n encoding = req.apparent_encoding\n #如果设置为replace,则会用?取代非法字符;\n encode_content = req.content.decode(encoding, 'replace')\n\n soup = bs4.BeautifulSoup(encode_content, 'html.parser')\n size = int(soup.find('ul', id='changePage').div.text[3])\n print(time.ctime()+\" \"+\"记录数:\"+str(size))\n\ndef main():\n # 查询\n query(7)\n # 发送邮件\n send_email()\n\nif __name__ == \"__main__\":\n main()","sub_path":"queryScore.py","file_name":"queryScore.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"81770584","text":"'''\nWe want to shrink the ~2mil length dict so that\nwe don't have to load the entire thing into memory\n'''\n\nimport sqlite3\nfrom tqdm import tqdm\nimport json\nfrom nltk.tokenize import WhitespaceTokenizer\nimport numpy as np\nimport pickle\n\ne_file = '../data/glove.840B.300d.txt'\ne_dict = {}\n\nn_rows = sum(1 for line in open(e_file, encoding = \"utf8\"))\nwith open(e_file, encoding=\"utf8\") as embedding_file:\n for x in tqdm(range(n_rows)):\n word = next(embedding_file).split(' ')\n e_dict[word[0].lower()] = np.array([float(emb) for emb in word[1:]])\n\nconn = sqlite3.connect(\"../data/amazon.db\")\nc = conn.cursor()\nc.execute(\"SELECT REVIEW FROM Review;\")\n\nused_words = set()\n\nfor row in c:\n used_words.update(WhitespaceTokenizer().tokenize(row[0]))\n\nconn.close()\n\nsmall_dict = {word: e_dict[word] for word in used_words}\n\npickle.dump(small_dict, open(\"../data/e_dict.p\", \"wb\"))","sub_path":"scripts/shrink_dict.py","file_name":"shrink_dict.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"284883497","text":"import pandas\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\n\nfrom app.info import questionWithComplete, absoluteFeatures\n\ndef convertStringTest(params, values):\n newParams = params[:]\n\n for i in range(len(params)):\n if(type(values[i]) == str):\n newParams[i] = 'is_' + values[i]\n values[i] = 1\n \n return [newParams, values] \n\ndef convertStringData(params, newParams, values, test):\n assign = {}\n\n for i in range(len(newParams)):\n if(not newParams[i] in params):\n assign[newParams[i]] = 0\n\n values = values.assign(**assign)\n\n for i in range(len(values)):\n for i2 in range(len(params)):\n if params[i2] in questionWithComplete:\n values[newParams[i2]][i] = 1 if values[params[i2]][i] == newParams[i2][3:] else 0\n \n return values\n\ndef ml(params, newParams, test_x):\n data = pandas.read_csv('all.csv')\n\n data = convertStringData(params, newParams, data, test_x)\n\n train_x = data[newParams]\n data_y = data[['Unnamed: 0']]\n dropList = []\n\n for i in range(len(absoluteFeatures)):\n absoluteFeature = absoluteFeatures[i]\n\n if absoluteFeature in params:\n print(newParams)\n \n for i2 in range(len(train_x)):\n index = int(params.index(absoluteFeature))\n\n if(train_x[newParams[index]][i2] != test_x[index]):\n dropList.append(data_y['Unnamed: 0'][i2])\n\n train_x = train_x.drop(dropList)\n data_y = data_y.drop(dropList)\n\n test_x = [test_x]\n\n print(train_x)\n print(test_x)\n\n model = LinearSVC()\n model.fit(train_x, data_y.values.ravel())\n\n prevision = model.predict(test_x)\n\n data = pandas.read_csv('all.csv')\n\n return data[prevision[0]:prevision[0] + 1]\n\n# Example:\n#print(ml(['intelligence', 'power', 'gender', 'is_tall', 'publisher'], ['intelligence', 'power', 'gender', 'is_tall', 'is_Marvel Comics'], [2, 1, 0, 1, 1]))","sub_path":"app/algorithm/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"392593450","text":"#! /usr/bin/env python\n\nimport argparse\nimport logging\n\nfrom find import find\n\nlogger = logging.getLogger('app')\nlogger.setLevel(logging.DEBUG)\n\ndef set_logging(debug):\n ch = logging.StreamHandler()\n if debug:\n ch.setLevel(logging.DEBUG)\n else:\n ch.setLevel(logging.INFO)\n ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n logger.addHandler(ch)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-v','--value', type=int, required=True, metavar='n', help='hash value of solution string')\n parser.add_argument('-l','--length', type=int, required=True, metavar='n', help='length of solution string')\n parser.add_argument('-d','--debug', action='store_true', help='set logging level to debug')\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n set_logging(args.debug)\n logger.info('starting')\n s = find(args.value, args.length)\n print('\\n\\nSoultion: \\'{}\\'\\n\\n'.format(s))\n logger.info('done')\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"86132771","text":"#!/usr/local/bin/python\n'''Exact diagonalization of Hubbard model. \n Adeline C. Sun May 20 2016\n'''\nimport numpy as np\nimport numpy.linalg as nl\nimport matplotlib.pyplot as plt\n\ndef lm_spin0(U, t, T = 0.1):\n '''Calculate the local moment in the sector with one up and one down\n electrons for low temperature. \n local moment = <(n_+ - n_-)^2>\n basis:\n |0 +->, |+ ->, |- +>, |+- 0>\n '''\n beta = 1./T\n H = np.array([[U, -t, -t, 0.], \\\n [-t, 0., 0., -t],\\\n [-t, 0., 0., -t],\\\n [0., -t, -t, U]])\n m2 = np.array([0., 2., 2., 0.])\n e, w = nl.eigh(H)\n lm = 0.\n Z = 0.\n for i in range(4):\n lm += np.exp(-beta*e[i])*(w[:, i]**2.).dot(m2)\n Z += np.exp(-beta*e[i])\n return lm/Z\n\ndef plt_lm():\n t = 1.\n rat = np.linspace(0., 20., 100, endpoint = False)\n lm = []\n for r in rat:\n lm.append(lm_spin0(t*r, t))\n lm = np.asarray(lm)\n plt.plot(rat, lm)\n plt.xlabel(r'$\\frac{U}{t}$')\n plt.ylabel(r'$\\langle m^2\\rangle$')\n plt.show()\n\nif __name__ == \"__main__\":\n plt_lm()\n\n","sub_path":"ed.py","file_name":"ed.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287796844","text":"# coding=utf-8\n\n\"\"\"\n LCD1602 Plugin for Octoprint\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom octoprint.printer.estimation import PrintTimeEstimator\nimport octoprint.plugin\nimport octoprint.events\nfrom RPLCD.i2c import CharLCD\nimport time\nimport datetime\nimport os\nimport sys\nfrom fake_rpi import printf\nimport fake_rpi\n\n\nclass LCD1602Plugin(octoprint.plugin.StartupPlugin,\n octoprint.plugin.EventHandlerPlugin,\n octoprint.plugin.ProgressPlugin):\n\n mylcd = CharLCD(i2c_expander='PCF8574', address=0x27, port=1, cols=20, rows=4, dotsize=8, charmap='A02', auto_linebreaks=True, backlight_enabled=True)\n\n debut0s1 = (\n 0b01111, \n 0b11000,\n 0b10000,\n 0b10000,\n 0b10000,\n 0b10000,\n 0b11000,\n 0b01111\n )\n\n debut1s1 = (\n 0b01111, \n 0b11000,\n 0b10011,\n 0b10111,\n 0b10111,\n 0b10011,\n 0b11000,\n 0b01111\n )\n\n mid0s2 = (\n 0b11111, \n 0b00000,\n 0b00000,\n 0b00000,\n 0b00000,\n 0b00000,\n 0b00000,\n 0b11111\n )\n\n mid1s2 = (\n 0b11111, \n 0b00000,\n 0b11000,\n 0b11000,\n 0b11000,\n 0b11000,\n 0b00000,\n 0b11111\n )\n\n mid2s2 = (\n 0b11111, \n 0b00000,\n 0b11011,\n 0b11011,\n 0b11011,\n 0b11011,\n 0b00000,\n 0b11111\n )\n\n fin0s1 = (\n 0b11110, \n 0b00011,\n 0b00001,\n 0b00001,\n 0b00001,\n 0b00001,\n 0b00011,\n 0b11110\n )\n\n fin1s1 = (\n 0b11110, \n 0b00011,\n 0b00001,\n 0b00001,\n 0b00001,\n 0b00001,\n 0b00011,\n 0b11110\n )\n mylcd.create_char(0, debut0s1)\n mylcd.create_char(1, debut1s1)\n mylcd.create_char(2, mid0s2)\n mylcd.create_char(3, mid1s2)\n mylcd.create_char(4, mid2s2)\n mylcd.create_char(5, fin0s1)\n mylcd.create_char(6, fin1s1)\n \n\n def JobIsDone(self,mylcd):\n\n # create final anim\n self.birdy = [ '^_-' , '^_^', '-_^' , '^_^', '0_0', '-_-', '^_-', '^_^','@_@','*_*','$_$','<_<','>_>']\n\n for pos in range(0,13):\n mylcd.cursor_pos = (0,0)\n mylcd.write_string('¯\\_(ツ)_/¯ \\n Impression Finie ')\n\n \n def on_after_startup(self):\n mylcd = self.mylcd\n self._logger.info(\"plugin charge\")\n\n \n def on_print_progress(self,storage,path,progress):\n mylcd = self.mylcd\n mylcd.cursor_pos = (1,9)\n mylcd.write_string(str(progress)+'%')\n if progress==1 :\n self.start_date=time.time()\n \n if progress>1 and progress<100:\n now=time.time()\n elapsed=now-self.start_date\n average=elapsed/(progress-1)\n remaining=int((100-progress)*average)\n remaining=str(datetime.timedelta(seconds=remaining))\n mylcd.cursor_pos = (3,0)\n mylcd.write_string('Restant : ' +remaining)\n\n if progress==100 :\n self.JobIsDone(mylcd)\n\n if progress==0:\n mylcd.cursor_pos = (0,0)\n mylcd.write_string(\"Impression \")\n mylcd.cursor_pos = (1,0)\n mylcd.write_string(\" \")\n mylcd.cursor_pos = (3,0)\n mylcd.write_string(\" \")\n\n if progress==1 :\n mylcd.cursor_pos=(2,0)\n mylcd.write_string(unichr(1))\n \n if progress>1 :\n if progress==2:\n mylcd.cursor_pos=(2, 1)\n mylcd.write_string(unichr(3))\n\n elif progress==5:\n mylcd.cursor_pos=(2, 1)\n mylcd.write_string(unichr(4))\n\n elif progress==8:\n mylcd.cursor_pos=(2, 2)\n mylcd.write_string(unichr(3))\n\n elif progress==11:\n mylcd.cursor_pos=(2, 2)\n mylcd.write_string(unichr(4))\n \n elif progress==14:\n mylcd.cursor_pos=(2, 3)\n mylcd.write_string(unichr(3))\n \n elif progress==17:\n mylcd.cursor_pos=(2, 3)\n mylcd.write_string(unichr(4))\n \n elif progress==20:\n mylcd.cursor_pos=(2, 4)\n mylcd.write_string(unichr(3))\n \n elif progress==23:\n mylcd.cursor_pos=(2, 4)\n mylcd.write_string(unichr(4))\n \n elif progress==26:\n mylcd.cursor_pos=(2, 5)\n mylcd.write_string(unichr(3))\n \n elif progress==27:\n mylcd.cursor_pos=(2, 5)\n mylcd.write_string(unichr(4))\n \n elif progress==26:\n mylcd.cursor_pos=(2, 6)\n mylcd.write_string(unichr(3))\n \n elif progress==28:\n mylcd.cursor_pos=(2, 6)\n mylcd.write_string(unichr(4))\n \n elif progress==30:\n mylcd.cursor_pos=(2, 7)\n mylcd.write_string(unichr(3))\n \n elif progress==32:\n mylcd.cursor_pos=(2, 7)\n mylcd.write_string(unichr(4))\n \n elif progress==34:\n mylcd.cursor_pos=(2, 8)\n mylcd.write_string(unichr(3))\n \n elif progress==36:\n mylcd.cursor_pos=(2, 8)\n mylcd.write_string(unichr(4))\n \n elif progress==39:\n mylcd.cursor_pos=(2, 9)\n mylcd.write_string(unichr(3))\n \n elif progress==42:\n mylcd.cursor_pos=(2, 9)\n mylcd.write_string(unichr(4))\n \n elif progress==45:\n mylcd.cursor_pos=(2, 10)\n mylcd.write_string(unichr(3))\n \n elif progress==49:\n mylcd.cursor_pos=(2, 10)\n mylcd.write_string(unichr(4))\n \n elif progress==52:\n mylcd.cursor_pos=(2, 11)\n mylcd.write_string(unichr(3))\n \n elif progress==56:\n mylcd.cursor_pos=(2, 11)\n mylcd.write_string(unichr(4))\n \n elif progress==59:\n mylcd.cursor_pos=(2, 12)\n mylcd.write_string(unichr(3))\n \n elif progress==62:\n mylcd.cursor_pos=(2, 12)\n mylcd.write_string(unichr(4))\n \n elif progress==65:\n mylcd.cursor_pos=(2, 13)\n mylcd.write_string(unichr(3))\n \n elif progress==67:\n mylcd.cursor_pos=(2, 13)\n mylcd.write_string(unichr(4))\n \n elif progress==70:\n mylcd.cursor_pos=(2, 14)\n mylcd.write_string(unichr(3))\n \n elif progress==72:\n mylcd.cursor_pos=(2, 14)\n mylcd.write_string(unichr(4))\n \n elif progress==75:\n mylcd.cursor_pos=(2, 15)\n mylcd.write_string(unichr(3))\n \n elif progress==78:\n mylcd.cursor_pos=(2, 15)\n mylcd.write_string(unichr(4))\n \n elif progress==81:\n mylcd.cursor_pos=(2, 16)\n mylcd.write_string(unichr(3))\n \n elif progress==85:\n mylcd.cursor_pos=(2, 16)\n mylcd.write_string(unichr(4))\n \n elif progress==88:\n mylcd.cursor_pos=(2, 17)\n mylcd.write_string(unichr(3))\n \n elif progress==92:\n mylcd.cursor_pos=(2, 17)\n mylcd.write_string(unichr(4))\n \n elif progress==95:\n mylcd.cursor_pos=(2, 18)\n mylcd.write_string(unichr(3))\n \n elif progress==99:\n mylcd.cursor_pos=(2, 18)\n mylcd.write_string(unichr(4))\n \n elif progress==100:\n mylcd.cursor_pos=(2, 19)\n mylcd.write_string(6)\n\n\n def on_event(self,event,payload):\n mylcd = self.mylcd\n \n if event in \"Shutdown\":\n mylcd.clear()\n mylcd.write_string('Bye bye ^_^')\n time.sleep(1)\n mylcd._set_backlight_enabled(False)\n mylcd.close()\n \n \n if event in \"PrinterStateChanged\":\n \n if payload[\"state_string\"] in \"Offline\":\n mylcd.cursor_pos = (0,0)\n mylcd.write_string('Deconnexion')\n time.sleep(5)\n mylcd._set_backlight_enabled(False)\n\n if event in \"Connected\":\n mylcd._set_backlight_enabled(True)\n mylcd.write_string(\"Connecte\")\n mylcd.cursor_pos = (1,9)\n mylcd.write_string(\"0%\")\n CreaBar = 1\n mylcd.cursor_pos = (2, 0)\n mylcd.write_string(unichr(0))\n while CreaBar <= 18: \n mylcd.cursor_pos = (2, CreaBar)\n mylcd.write_string(unichr(2))\n CreaBar += 1\n\n mylcd.cursor_pos = (2,19)\n mylcd.write_string(unichr(6))\n \n if payload[\"state_string\"] in \"Operational\":\n mylcd._set_backlight_enabled(True)\n mylcd.cursor_pos = (1,9)\n mylcd.write_string(\"0%\")\n CreaBar = 1\n mylcd.cursor_pos = (2, 0)\n mylcd.write_string(unichr(0))\n while CreaBar <= 18: \n mylcd.cursor_pos = (2, CreaBar)\n mylcd.write_string(unichr(2))\n CreaBar += 1\n\n mylcd.cursor_pos = (2,19)\n mylcd.write_string(unichr(6))\n \n if payload[\"state_string\"] in \"Cancelling\":\n mylcd.clear()\n mylcd.write_string('Annulee') \n time.sleep(0.2)\n \n if payload[\"state_string\"] in \"PrintCancelled\":\n mylcd.clear()\n time.sleep(0.5)\n mylcd.write_string('Annulee' ) \n time.sleep(2)\n \n if payload[\"state_string\"] in \"Paused\":\n mylcd.clear()\n time.sleep(0.5)\n mylcd.write_string('Pause') \n\n if payload[\"state_string\"] in \"Resuming\":\n mylcd.clear()\n mylcd.write_string('Reprise') \n time.sleep(0.2)\n \n if payload[\"state_string\"] in \"Printing\":\n mylcd._set_backlight_enabled(True)\n CreaBar = 1\n mylcd.cursor_pos = (2, 0)\n mylcd.write_string(unichr(0))\n while CreaBar <= 18: \n mylcd.cursor_pos = (2, CreaBar)\n mylcd.write_string(unichr(2))\n CreaBar += 1\n\n mylcd.cursor_pos = (2,19)\n mylcd.write_string(unichr(6))\n\n\n def get_update_information(self):\n return dict(\n LCD1602Plugin=dict(\n displayName=\"LCD1602 display\",\n displayVersion=self._plugin_version,\n\n type=\"github_release\",\n current=self._plugin_version,\n user=\"n3bojs4\",\n repo=\"OctoPrint-Lcd1602\",\n\n pip=\"https://github.com/n3bojs4/octoprint-LCD1602/archive/{target}.zip\"\n )\n )\n\n __plugin_name__ = \"LCD1602 I2c display\"\n\n def __plugin_load__():\n\t global __plugin_implementation__\n__plugin_implementation__ = LCD1602Plugin()\n\nglobal __plugin_hooks__\n__plugin_hooks__ = {\n\t\t\"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information\n\t}\n","sub_path":"octoprint_LCD1602/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"83899087","text":"from django.contrib.sessions.middleware import SessionMiddleware\nfrom django.test.client import RequestFactory\n\nfrom funfactory.urlresolvers import reverse\nfrom mock import ANY, patch\nfrom nose.tools import eq_, ok_\nfrom waffle import Flag\n\nfrom flicks.base.tests import TestCase\nfrom flicks.base.tests.tools import redirects_\nfrom flicks.users.models import UserProfile\nfrom flicks.users.tests import UserFactory\nfrom flicks.users.views import Verify\nfrom flicks.videos.models import Vote\nfrom flicks.videos.tests import VideoFactory\n\n\nclass ProfileTests(TestCase):\n def setUp(self):\n super(ProfileTests, self).setUp()\n self.user = UserFactory.create()\n self.browserid_login(self.user.email)\n\n def _profile(self, method, locale='en-US', **kwargs):\n with self.activate(locale):\n func = self.client.post if method == 'post' else self.client.get\n response = func(reverse('flicks.users.profile'), kwargs)\n return response\n\n def test_get(self):\n \"\"\"Render 'users/profile.html' on GET.\"\"\"\n response = self._profile('get')\n self.assertTemplateUsed(response, 'users/profile.html')\n\n def test_post_invalid(self):\n \"\"\"\n If POSTed with invalid values, render the form again and do not create\n a profile.\n \"\"\"\n response = self._profile('post', full_name='blah',\n privacy_policy_agree=False)\n self.assertTemplateUsed(response, 'users/profile.html')\n ok_(not UserProfile.objects.filter(user=self.user).exists())\n\n def test_post_valid(self):\n \"\"\"\n If POSTed with valid values, create a profile and redirect to\n flicks.videos.upload.\n \"\"\"\n response = self._profile('post', locale='fr', full_name='blah',\n nickname='blah', country='fr',\n privacy_policy_agree=True)\n redirects_(response, 'flicks.videos.upload', locale='fr')\n ok_(UserProfile.objects.filter(user=self.user).exists())\n eq_(UserProfile.objects.get(user=self.user).locale, 'fr')\n\n @patch('flicks.users.views.newsletter_subscribe')\n def test_mailing_list_signup(self, newsletter_subscribe):\n \"\"\"\n If the user has checked the mailing_list_signup checkbox, trigger the\n newsletter_subscribe task.\n \"\"\"\n self._profile('post', locale='fr', full_name='blah', nickname='blah',\n country='fr', privacy_policy_agree=True,\n mailing_list_signup=True, mailing_list_format='html')\n newsletter_subscribe.delay.assert_called_with(self.user.email,\n source_url=ANY,\n format='html')\n\n\n@patch('flicks.users.views.super', create=True)\nclass VerifyTests(TestCase):\n def setUp(self):\n super(VerifyTests, self).setUp()\n Flag.objects.create(name='voting', everyone=True)\n\n self.factory = RequestFactory()\n self.request = self.factory.post('/')\n self.user = UserFactory.create()\n\n self.request.user = self.user\n SessionMiddleware().process_request(self.request)\n self.verify = Verify(request=self.request)\n\n def test_login_success_no_key(self, mock_super):\n \"\"\"If there is no video id in the session, do nothing.\"\"\"\n response = self.verify.login_success(1, 'asdf', blah='foo')\n\n login_success = mock_super.return_value.login_success\n eq_(response, login_success.return_value)\n login_success.assert_called_with(1, 'asdf', blah='foo')\n\n def test_login_success_invalid_video(self, mock_super):\n \"\"\"If the video ID is invalid, remove the session key and abort.\"\"\"\n self.request.session['vote_video'] = 'asdf'\n self.verify.login_success()\n ok_('vote_video' not in self.request.session)\n ok_(not Vote.objects.filter(user=self.user).exists())\n\n def test_login_success_missing_video(self, mock_super):\n \"\"\"\n If the video ID doesn't match an existing video, remove the session key\n and abort.\n \"\"\"\n self.request.session['vote_video'] = '99999'\n self.verify.login_success()\n ok_('vote_video' not in self.request.session)\n ok_(not Vote.objects.filter(user=self.user).exists())\n\n def test_login_success_vote_exists(self, mock_super):\n \"\"\"\n If the user has already voted for the video, remove the session key and\n do nothing.\n \"\"\"\n video = VideoFactory.create()\n Vote.objects.create(user=self.user, video=video)\n self.request.session['vote_video'] = unicode(video.id)\n\n self.verify.login_success()\n ok_('vote_video' not in self.request.session)\n eq_(Vote.objects.filter(user=self.user, video=video).count(), 1)\n\n def test_login_success_no_vote(self, mock_super):\n \"\"\"\n If the user hasn't voted for the video, create a vote for it and remove\n the session key.\n \"\"\"\n video = VideoFactory.create()\n self.request.session['vote_video'] = unicode(video.id)\n\n self.verify.login_success()\n ok_('vote_video' not in self.request.session)\n eq_(Vote.objects.filter(user=self.user, video=video).count(), 1)\n\n def test_login_failure_no_key(self, mock_super):\n \"\"\"If login fails and the user wasn't voting, do nothing.\"\"\"\n response = self.verify.login_failure(1, 'asdf', foo='bar')\n\n login_failure = mock_super.return_value.login_failure\n eq_(response, login_failure.return_value)\n login_failure.assert_called_with(1, 'asdf', foo='bar')\n\n def test_login_failure_with_key(self, mock_super):\n \"\"\"If the session key for voting exists when login fails, remove it.\"\"\"\n self.request.session['vote_video'] = '392'\n self.verify.login_failure()\n ok_('vote_video' not in self.request.session)\n","sub_path":"flicks/users/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"96858951","text":"import time\nfrom image_transform import Image_Transform\nfrom dataset import MyDataset\nfrom lib import *\nfrom utils import make_datapath_list, train_model, evaluate_epoch, update_param\nfrom collections import defaultdict\nfrom torchvision.models import resnet50\nfrom torch.utils.tensorboard import SummaryWriter\n\ndef main():\n history = defaultdict(list)\n resize = 224\n mean = (0.485, 0.456, 0.406)\n std = (0.229, 0.224, 0.225)\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n EPOCHS = 5\n best_val_acc = 0.0\n writer = SummaryWriter()\n\n train_list = make_datapath_list(\"training_set\")\n val_list = make_datapath_list(\"test_set\")\n\n #dataset\n train_ds = MyDataset(train_list, transform=Image_Transform(resize, mean, std), phase='training_set')\n val_ds = MyDataset(val_list, transform=Image_Transform(resize, mean, std), phase='test_set')\n\n # Dataloader\n batch_size = 16\n\n train_dataloader = DataLoader(train_ds, batch_size, shuffle=True)\n test_dataloader = DataLoader(val_ds, batch_size, shuffle=False)\n dataloader_dict = {\"train\": train_dataloader, 'test': test_dataloader}\n\n # Use resnet 50\n use_pretrained = True\n model = resnet50(pretrained=use_pretrained)\n # print(net)\n model.fc = nn.Sequential(nn.Linear(in_features=2048, out_features=2, bias=True),\n nn.Sigmoid())\n model = model.to(device)\n\n criterion = nn.CrossEntropyLoss()\n\n # optimizer\n params1, params2 = update_param(model)\n optimizer = optim.SGD([\n {'params': params1, 'lr': 1e-4},\n {'params': params2, 'lr': 1e-3},\n ], momentum=0.9)\n\n for epoch in range(EPOCHS):\n# time.sleep(0.5)\n print(f'\\nEpoch: [{epoch + 1}/{EPOCHS}]')\n print('-' * 40)\n\n train_acc, train_loss = train_model(model, dataloader_dict['train'], criterion, optimizer, \\\n device, writer, epoch)\n val_acc, val_loss = evaluate_epoch(model, dataloader_dict['test'], criterion, \\\n device, writer, epoch)\n\n print('Train Loss: {:.4f}\\t Train Acc: {:.4f}'.format(train_loss, train_acc))\n print('Val Loss: {:.4f}\\t Val Acc: {:.4f}'.format(val_loss, val_acc))\n\n history['train_acc'].append(train_acc)\n history['train_loss'].append(train_loss)\n history['val_acc'].append(val_acc)\n history['val_loss'].append(val_loss)\n\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n torch.save(model.state_dict(), 'best_model.pth')\n writer.flush()\n writer.close()\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420808490","text":"from urllib.request import _parse_proxy\n\nimport faker\nfrom scrapy.exceptions import NotConfigured\nimport random\nimport logging\n\nlogger = logging.getLogger()\n\n#处理代理ip,将用户名密码去掉\ndef reform_url(url):\n proxy_type,user,password,hostport = _parse_proxy(url)\n return '%s://%s' % (proxy_type,hostport)\n\n\nclass RandomProxyMiddleware(object):\n\n def __init__(self,settings):\n self.proxies = settings.getlist('PROXIES')\n self.max_failed = settings.getint('PROXY_MAX_FAILED',3)\n self.stats = {}.fromkeys(map(reform_url,self.proxies),0)\n\n def random_proxy(self):\n return random.choice(self.proxies)\n\n @classmethod\n def from_crawler(cls,crawler):\n if not crawler.settings.getbool(\"HTTPPROXY_ENABLED\"):\n raise NotConfigured\n if not crawler.settings.getlist(\"PROCIES\"):\n raise NotConfigured\n return cls(crawler.settings)\n\n def process_request(self,request,spider):\n if 'proxy' not in request.meta:\n # print(\"111111111111111111111111111111111\")\n request.meta['proxy'] = self.random_proxy()\n\n def process_response(self,request,response,spider):\n cur_proxy = request.meta['proxy']\n #如果该代理不能用,就将它的值+1,也代表着失败次数+1,默���的每个代理Ip对应的值为0\n if response.status > 400:\n self.stats[cur_proxy] += 1\n #如果键为当前代理的值大于最大失败次数时,就从代理池里删除此代理\n if self.stats[cur_proxy] > self.max_failed:\n for proxy in self.proxies:\n if reform_url(proxy) == cur_proxy:\n self.stats.remove(proxy)\n break\n logger.warning('proxy %s remove from proxies list' % cur_proxy)\n return response","sub_path":"qianmu2/qianmu/middlewares/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"374974440","text":"# following https://towardsdatascience.com/topic-modeling-articles-with-nmf-8c6b2a227a45\n# this runs two versions of an NMF model, one is BOW and uses gensim's model, the other is tf-idf and uses sklearn's model\n# we use bow to get best number of topics and then find those topics with the tfidf nmf model\nimport os\nimport nltk.tokenize.casual\nimport re\nfrom nltk.stem import SnowballStemmer\nfrom nltk.corpus import stopwords\nimport string\nimport contractions\nimport pandas as pd\nimport gensim\nfrom gensim import models, corpora\nfrom gensim.models.nmf import Nmf\nfrom gensim.models.coherencemodel import CoherenceModel\nimport numpy as np\nimport operator\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\n\n# function to process and tokenize the text\ndef process_text(text):\n text = nltk.tokenize.casual.casual_tokenize(text)\n text = [each.lower() for each in text]\n text = [re.sub('[0-9]+', '', each) for each in text]\n text = [contractions.fix(each) for each in text]\n text = [SnowballStemmer('english').stem(each) for each in text]\n text = [w for w in text if w not in string.punctuation]\n text = [w for w in text if w not in stop_words]\n text = [each for each in text if len(each) > 1]\n text = [each for each in text if ' ' not in each]\n return text\n \n\ndef find_best_num_topics(dictionary, corpus, topic_nums):\n # Run the nmf model and calculate the coherence score\n coherence_scores = []\n for num in topic_nums:\n nmf = Nmf(\n corpus=corpus,\n num_topics=num,\n id2word=dictionary,\n chunksize=10,\n passes=5,\n kappa=.1,\n minimum_probability=0.01,\n w_max_iter=300,\n w_stop_condition=0.0001,\n h_max_iter=100,\n h_stop_condition=0.001,\n eval_every=10,\n normalize=True,\n random_state=42\n )\n # print(nmf.print_topics(num, 5))\n # Run the coherence model to get the score\n cm = CoherenceModel(\n model=nmf,\n texts=texts,\n dictionary=dictionary,\n coherence='c_v'\n )\n coherence_scores.append(round(cm.get_coherence(), 5))\n return coherence_scores\n\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()\n\n\nif __name__ == '__main__':\n stop_words = stopwords.words('english')\n files = os.listdir('./dnc_speeches')\n # iterate over the list getting each file \n speeches = []\n for fle in files:\n # open the file and then call .read() to get the text \n with open('./dnc_speeches/'+fle) as f:\n if(fle != 'alexandria_ocasio-cortex.txt'):\n text = f.read()\n speeches.append(text)\n texts = [process_text(each) for each in speeches]\n tfidf_vectorizer = TfidfVectorizer(\n min_df=3,\n max_df=0.85,\n max_features=1000,\n ngram_range=(1, 2),\n preprocessor=' '.join\n ) \n tfidf = tfidf_vectorizer.fit_transform(texts)\n dictionary = gensim.corpora.dictionary.Dictionary(texts)\n\n # Filter out extremes to limit the number of features\n dictionary.filter_extremes(\n no_below=3,\n no_above=0.85,\n keep_n=1000\n )\n # Create the bag-of-words format (list of (token_id, token_count))\n corpus = [dictionary.doc2bow(text) for text in texts]\n\n # Create a list of the topic numbers we want to try\n topic_nums = [3, 5, 10, 15, 20]\n coherence_scores = find_best_num_topics(dictionary, corpus, topic_nums)\n scores = list(zip(topic_nums, coherence_scores))\n print(scores)\n best_num_topics = sorted(scores, key=operator.itemgetter(1), reverse=True)[0][0]\n print(best_num_topics)\n bow_nmf = Nmf(\n corpus=corpus,\n num_topics=best_num_topics,\n id2word=dictionary,\n ) \n print(bow_nmf.print_topics(best_num_topics, 5))\n tfidf_nmf = NMF(\n n_components=best_num_topics,\n init='random',\n random_state=0\n ).fit(tfidf)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n print_top_words(tfidf_nmf, tfidf_feature_names, 5)\n\n # test with a new speech, AOC's\n # Transform the new data with the fitted models\n with open('./dnc_speeches/alexandria_ocasio-cortex.txt') as f2:\n new_text = f2.read()\n new_text = process_text(new_text)\n tfidf_new = tfidf_vectorizer.transform(new_text)\n X_new = tfidf_nmf.transform(tfidf_new)\n # Get the top predicted topic\n predicted_topics = [np.argsort(each)[::-1][0] for each in X_new]\n print(predicted_topics)\n\n\n\n","sub_path":"nmf_topic_extraction.py","file_name":"nmf_topic_extraction.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"514821761","text":"import pandas as pd\n\nimport sklearn\n\ndef build_features(input_file):\n\n df = pd.read_csv(input_file, sep = \";\")\n\n #change 'male' and 'female' to 0 and 1 for numeric calculations\n #df.replace({'Sex': {\"male\": 0,\"female\": 1}} )\n df['Sex']=df[\"Sex\"].replace([\"female\", \"male\"], [1,0])\n\n\n\n\n\n df['FamilySize'] = df['SibSp'] + df['Parch']+1\n df[\"IsAlone\"] = 0\n df.loc[df[\"FamilySize\"] == 1, \"IsAlone\"] = 1\n output_file = input_file.split('.')[0] + \"_bf.csv\"\n df.to_csv(output_file, sep = \";\")\n print(df.head(5))\n print(df.columns)\n\n\n\n","sub_path":"src/build_features.py","file_name":"build_features.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415549303","text":"import argparse\nfrom bs4 import BeautifulSoup\nimport csv\n\ndef clean(s):\n\treturn s.replace(\"\\n\",\"\").lstrip(\" \").rstrip(\" \").encode(\"utf-8\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument('file', metavar='F', \n help='a file to read')\nargs = parser.parse_args()\nf = open(args.file).read()\n#f = f.replace(\"\", \"\")\n#f = f.replace(\"\", \"\")\nsoup = BeautifulSoup(f,\"html.parser\")\n#tables = soup.find_all(\"table\",class_=\"wikitable\")\n#for table in tables:\n#\ttrs = table.find_all(\"tr\")\n\t# first = True\n#\tfor tr in trs:\n#\t\ttds = tr.find_all(\"td\")\n#\t\tfor td in tds:\n#\t\t\tprint('\"' + td.contents + '\",')\n#\t\tprint(\"\\n\")\n\n\n\n\n#html = open(\"table.html\").read()\n#soup = BeautifulSoup(html)\n\ni = 1\ntables = soup.find_all(\"table\")\nfor table in tables:\n\toutput_rows = []\n\tfor table_row in table.find_all(\"tr\"):\n\t\t#columns = table_row.find_all(\"th\")\n\t\t#output_row = []\n\t\t#for column in columns:\n\t\t#\toutput_row.append(clean(column.text))\n\t\t#output_rows.append(output_row)\n\t\tcolumns = table_row.find_all([\"td\", \"th\"])\n\t\toutput_row = []\n\t\tfor column in columns:\n\t\t\toutput_row.append(clean(column.text))\n\t\toutput_rows.append(output_row)\n\twith open(args.file + str(i) + '.csv', 'wb') as csvfile:\n\t\twriter = csv.writer(csvfile)\n\t\twriter.writerows(output_rows)\n\ti = i + 1\n\n","sub_path":"python/table2csv.py","file_name":"table2csv.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299689771","text":"MAP_HEIGHT = 24\nMAP_WIDTH = 24\nWINDOW_HEIGHT = 680\nWINDOW_WIDTH = 680\nRECT_HEIGHT = 25\nRECT_WIDTH = 25\n\nnum_of_bots = 1\nbomb_range = 2\npath_bomb = './Images/bomba.jpg'\npath_bomberman = './Images/Bombardman.jpg'\npath_bot = './Images/bot.png'\npath_can_destroy = './Images/do_rozwalenia.jfif'\npath_cant_destroy = './Images/nie_do_rozwalenia.jfif'\npath_road = './Images/droga.jpg'\n","sub_path":"GameParams.py","file_name":"GameParams.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"534334541","text":"import requests\nimport json\n\n\nclass APIAboutFood:\n def __init__(self, username, foodname, tk):\n self.username = username\n self.foodname = foodname\n self.tk = tk\n\n def get_food_detail(self):\n url = \"http://localhost:8000/api/food/\"\n r = requests.get(url + self.foodname, headers={'Authorization': 'JWT ' + self.tk})\n if r.status_code == 200:\n res = r.json()\n else:\n res = None\n return res\n\n def get_user_food(self):\n url = \"http://localhost:8000/api/food/user/\"\n r = requests.post(url + self.username, data={'foodname': self.foodname}, headers={'Authorization': 'JWT ' + self.tk})\n if r.status_code == 200:\n res = json.loads(r.json())\n else:\n res = {'nope': 1}\n return res\n\n def get_user_food_by_date(self, date):\n url = \"http://localhost:8000/api/food/date/\"\n r = requests.get(url + self.username + '/' + date, headers={'Authorization': 'JWT ' + self.tk})\n if r.status_code == 200:\n res = r.json()\n else:\n res = dict()\n res['B'] = res['L'] = res['D'] = '-'\n return res\n\n def update_user_food_by_date(self, requested_data):\n url = \"http://localhost:8000/api/food/date/\"\n r = requests.post(url + self.username, data=requested_data, headers={'Authorization': 'JWT ' + self.tk})\n if r.status_code == 200:\n res = '요청이 정상적으로 반영되었습니다.'\n else:\n res = '정보를 다시 확인해주세요.'\n return res\n\n def delete_user_food_by_date(self, date, mealkind):\n url = \"http://localhost:8000/api/food/date/\"\n r = requests.delete(url + self.username + '/' + date + '/' + mealkind, headers={'Authorization': 'JWT ' + self.tk})\n if r.status_code == 200:\n res = '요청이 정상적으로 반영되었습니다.'\n else:\n res = '정보를 다시 확인해주세요.'\n return res\n","sub_path":"code/eatenAway/food/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259408594","text":"# -*- coding: utf-8 -*-\n\n# ***************************************************************************\n# * Copyright (C) 2019 by Hilscher GmbH *\n# * netXsupport@hilscher.com *\n# * *\n# * This program is free software; you can redistribute it and/or modify *\n# * it under the terms of the GNU General Public License as published by *\n# * the Free Software Foundation; either version 2 of the License, or *\n# * (at your option) any later version. *\n# * *\n# * This program is distributed in the hope that it will be useful, *\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of *\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\n# * GNU General Public License for more details. *\n# * *\n# * You should have received a copy of the GNU General Public License *\n# * along with this program; if not, write to the *\n# * Free Software Foundation, Inc., *\n# * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *\n# ***************************************************************************\n\nimport hashlib\nimport os\nimport os.path\nimport sqlite3\nimport xml.dom.minidom\n\n\nclass SnippetLibrary:\n # Print debug messages.\n __fDebug = False\n\n # The filename for the database.\n __strDatabasePath = None\n\n # The database connection.\n __tDb = None\n\n # The list of folders to scan recursively for snippets.\n __astrSnippetSearchPaths = None\n\n # The snippet library was already scanned if this flag is set.\n __fSnipLibIsAlreadyScanned = None\n\n def __init__(self, strDatabasePath, astrSnippetSearchPaths, debug=False):\n self.__fDebug = bool(debug)\n\n # Set the filename of the SQLITE3 database.\n self.__strDatabasePath = strDatabasePath\n if self.__fDebug:\n print('[SnipLib] Configuration: Database path = \"%s\"' %\n strDatabasePath)\n\n # The connection to the database is not open yet.\n self.__tDb = None\n\n # Convert all search paths to absolute paths.\n self.__astrSnippetSearchPaths = []\n for strPath in astrSnippetSearchPaths:\n self.__astrSnippetSearchPaths.append(os.path.abspath(strPath))\n\n # Print all search paths in debug mode.\n if self.__fDebug:\n for strPath in self.__astrSnippetSearchPaths:\n print('[SnipLib] Configuration: Search path \"%s\"' % strPath)\n\n # The snippet library was not scanned yet.\n self.__fSnipLibIsAlreadyScanned = False\n\n def __xml_get_all_text(self, tNode):\n astrText = []\n for tChild in tNode.childNodes:\n if(\n tChild.nodeType == tChild.TEXT_NODE or\n tChild.nodeType == tChild.CDATA_SECTION_NODE\n ):\n astrText.append(str(tChild.data))\n return ''.join(astrText)\n\n def __xml_get_node(self, tBaseNode, strTagName):\n tNode = None\n for tChildNode in tBaseNode.childNodes:\n if tChildNode.nodeType == tChildNode.ELEMENT_NODE:\n if tChildNode.localName == strTagName:\n tNode = tChildNode\n break\n\n return tNode\n\n def __get_snip_hash(self, strAbsPath):\n # Get the SHA384 hash.\n tFile = open(strAbsPath, 'rb')\n tHash = hashlib.sha384()\n fEof = False\n while fEof is False:\n strData = tFile.read(2048)\n tHash.update(strData)\n if len(strData) < 2048:\n fEof = True\n strDigest = tHash.hexdigest()\n tFile.close()\n\n # Return the hash.\n return strDigest\n\n def __db_open(self):\n tDb = self.__tDb\n if tDb is None:\n tDb = sqlite3.connect(self.__strDatabasePath)\n self.__tDb = tDb\n\n tCursor = tDb.cursor()\n\n # Construct the \"CREATE\" statement for the \"snippets\" table.\n strCreateStatement = (\n 'CREATE TABLE snippets ('\n 'id INTEGER PRIMARY KEY, '\n 'search_path TEXT NOT NULL, '\n 'path TEXT NOT NULL, '\n 'hash TEXT NOT NULL, '\n 'groupid TEXT NOT NULL, '\n 'artifact TEXT NOT NULL, '\n 'version TEXT NOT NULL, '\n 'clean INTEGER DEFAULT 0)'\n )\n if self.__fDebug:\n print('[SnipLib] Database: The current CREATE statement for the '\n '\"snippet\" table is \"%s\".' % strCreateStatement)\n\n # Compare the current \"CREATE\" statement with the statement of the\n # existing table.\n tCursor.execute('SELECT sql FROM sqlite_master WHERE name=\"snippets\"')\n tRes = tCursor.fetchone()\n if tRes is None:\n # The table does not exist yet. Create it now.\n if self.__fDebug:\n print('[SnipLib] Database: The \"snippet\" table does not yet '\n 'exist. Create it now.')\n tCursor.execute(strCreateStatement)\n tDb.commit()\n elif tRes[0] != strCreateStatement:\n if self.__fDebug:\n print('[SnipLib] Database: The existing \"snippet\" table has '\n 'a different CREATE statement: \"%s\".' % tRes[0])\n print('[SnipLib] Database: Delete the existing table and '\n 're-create it.')\n # Delete the old table.\n tCursor.execute('DROP TABLE snippets')\n tDb.commit()\n # Create a new table.\n tCursor.execute(strCreateStatement)\n tDb.commit()\n else:\n if self.__fDebug:\n print('[SnipLib] Database: The existing \"snippet\" table was '\n 'created with the correct statement.')\n\n def __snippet_get_gav(self, strPath):\n strGroup = None\n strArtifact = None\n strVersion = None\n\n # Parse the snippet.\n try:\n tXml = xml.dom.minidom.parse(strPath)\n except xml.dom.DOMException as tException:\n # Invalid XML, ignore.\n strArtifact = 'No valid XML: %s' % repr(tException)\n tXml = None\n\n if tXml is not None:\n # Search for the \"Info\" node.\n tInfoNode = self.__xml_get_node(tXml.documentElement, 'Info')\n if tInfoNode is None:\n # No Info node -> ignore the file.\n strArtifact = 'It has no \"Info\" node.'\n else:\n # Get the \"group\", \"artifact\" and \"version\" attributes.\n strGroup = tInfoNode.getAttribute('group')\n strArtifact = tInfoNode.getAttribute('artifact')\n strVersion = tInfoNode.getAttribute('version')\n if len(strGroup) == 0:\n strGroup = None\n strArtifact = (\n 'The \"group\" attribute of an \"Info\" node must not '\n 'be empty.'\n )\n elif len(strArtifact) == 0:\n strGroup = None\n strArtifact = (\n 'The \"artifact\" attribute of an \"Info\" node must not '\n 'be empty.'\n )\n elif len(strVersion) == 0:\n strGroup = None\n strArtifact = (\n 'The \"version\" attribute of an \"Info\" node must not '\n 'be empty.'\n )\n\n # Return the group, artifact and version.\n return strGroup, strArtifact, strVersion\n\n def __sniplib_invalidate(self, strSearchPath):\n tCursor = self.__tDb.cursor()\n\n # Show all files which are invalidated.\n if self.__fDebug:\n print('[SnipLib] Scan: Invalidating all cached entries for the '\n 'search path \"%s\".' % strSearchPath)\n tCursor.execute(\n 'SELECT path,groupid,artifact,version FROM snippets WHERE '\n 'search_path=?', (strSearchPath, )\n )\n atRes = tCursor.fetchall()\n if atRes is None or len(atRes) == 0:\n print('[SnipLib] Scan: -> No cached entries found for the '\n 'search path \"%s\".' % strSearchPath)\n else:\n for tRes in atRes:\n print(\n '[SnipLib] Scan: -> Invalidating entry G=\"%s\" '\n 'A=\"%s\" V=\"%s\" at \"%s\".' % (\n tRes[1],\n tRes[2],\n tRes[3],\n tRes[0]\n )\n )\n\n # Mark all files to be deleted. This flag will be cleared for all\n # files which are present.\n tCursor.execute(\n 'UPDATE snippets SET clean=1 WHERE search_path=?',\n (strSearchPath, )\n )\n self.__tDb.commit()\n\n def __sniplib_scan(self, strSearchPath):\n if self.__fDebug:\n print('[SnipLib] Scan: Scanning search path \"%s\".' %\n strSearchPath)\n\n tCursor = self.__tDb.cursor()\n # Search all files recursively.\n for strRoot, astrDirs, astrFiles in os.walk(strSearchPath,\n followlinks=True):\n # Process all files in this folder.\n for strFile in astrFiles:\n # Get the extension of the file.\n strDummy, strExt = os.path.splitext(strFile)\n if strExt == '.xml':\n # Get the absolute path for the file.\n strAbsPath = os.path.join(strRoot, strFile)\n\n # Get the stamp of the snip.\n strDigest = self.__get_snip_hash(strAbsPath)\n\n if self.__fDebug:\n print('[SnipLib] Scan: -> Found snippet at \"%s\" '\n 'with the hash \"%s\".' % (strAbsPath, strDigest))\n\n # Search the snippet in the database.\n tCursor.execute(\n 'SELECT id,hash FROM snippets WHERE search_path=? '\n 'AND path=?', (\n strSearchPath,\n strAbsPath\n )\n )\n atResults = tCursor.fetchone()\n if atResults is None:\n # The snippet is not present in the database yet.\n if self.__fDebug:\n print(\n '[SnipLib] Scan: -> The snippet is not '\n 'registered in the cache yet. Make a new '\n 'entry now.'\n )\n strGroup, strArtifact, strVersion = \\\n self.__snippet_get_gav(strAbsPath)\n if strGroup is None:\n if self.__fDebug:\n print(\n '[SnipLib] Scan: -> Warning: '\n 'Ignoring file \"%s\". %s' % (\n strAbsPath,\n strArtifact\n )\n )\n\n # Make a new entry.\n tCursor.execute(\n 'INSERT INTO snippets '\n '(search_path, path, hash, groupid, '\n 'artifact, version) VALUES '\n '(?, ?, ?, ?, ?, ?)', (\n strSearchPath,\n strAbsPath,\n strDigest,\n strGroup,\n strArtifact,\n strVersion\n )\n )\n\n else:\n # Compare the hash of the file.\n if atResults[1] == strDigest:\n # The hash is the same -> the file is\n # already known.\n if self.__fDebug:\n print('[SnipLib] Scan: -> The snippet is'\n ' already registered in the cache.')\n\n # Found the file. Do not delete it from the\n # database.\n tCursor.execute(\n 'UPDATE snippets SET clean=0 WHERE id=?',\n (atResults[0], )\n )\n\n else:\n # The hash differs. Update the entry with the new\n # hash, group, artifact and version.\n if self.__fDebug:\n print(\n '[SnipLib] Scan: -> The snippet has '\n 'a different hash than the entry in the '\n 'cache. Update the metadata now.'\n )\n\n strGroup, strArtifact, strVersion = \\\n self.__snippet_get_gav(strAbsPath)\n if strGroup is None:\n if self.__fDebug:\n print(\n '[SnipLib] Scan: -> Warning: '\n 'Ignoring file \"%s\". %s' % (\n strAbsPath,\n strArtifact\n )\n )\n else:\n tCursor.execute(\n 'UPDATE snippets SET hash=?, groupid=?, '\n 'artifact=?, version=?, clean=0 WHERE '\n 'id=?', (\n strDigest,\n strGroup,\n strArtifact,\n strVersion,\n atResults[0]\n )\n )\n\n def __sniplib_forget_invalid_entries(self, strSearchPath):\n # Remove all entries from the cache which are marked for clean.\n tCursor = self.__tDb.cursor()\n\n # Show all files which are removed from the cache.\n if self.__fDebug:\n print('[SnipLib] Scan: Remove all invalidated entries from the '\n 'cache for the search path \"%s\".' % strSearchPath)\n tCursor.execute(\n 'SELECT path,groupid,artifact,version FROM snippets WHERE '\n 'clean!=0 AND search_path=?', (strSearchPath, )\n )\n atRes = tCursor.fetchall()\n if atRes is None or len(atRes) == 0:\n print('[SnipLib] Scan: -> No cache entries are removed.')\n else:\n for tRes in atRes:\n print(\n '[SnipLib] Scan: -> Removing cache entry G=\"%s\" '\n 'A=\"%s\" V=\"%s\" at \"%s\".' % (\n tRes[1],\n tRes[2],\n tRes[3],\n tRes[0]\n )\n )\n\n tCursor.execute(\n 'DELETE FROM snippets WHERE clean!=0 AND search_path=?',\n (strSearchPath, )\n )\n self.__tDb.commit()\n\n def find(self, strGroup, strArtifact, strVersion, atParameter):\n # Open the connection to the database.\n self.__db_open()\n\n # Scan each search path.\n if self.__fSnipLibIsAlreadyScanned is not True:\n for strSearchPath in self.__astrSnippetSearchPaths:\n self.__sniplib_invalidate(strSearchPath)\n self.__sniplib_scan(strSearchPath)\n self.__sniplib_forget_invalid_entries(strSearchPath)\n self.__fSnipLibIsAlreadyScanned = True\n\n # Search for the snippet in each search path. Stop on the first hit.\n atMatch = None\n tCursor = self.__tDb.cursor()\n for strSearchPath in self.__astrSnippetSearchPaths:\n tCursor.execute(\n 'SELECT path FROM snippets WHERE search_path=? AND groupid=? '\n 'AND artifact=? AND version=?', (\n strSearchPath,\n strGroup,\n strArtifact,\n strVersion\n )\n )\n atResult = tCursor.fetchone()\n if atResult is not None:\n atMatch = atResult\n break\n\n # Get the snippet name for messages.\n strSnippetName = 'G=\"%s\", A=\"%s\", V=\"%s\"' % (\n strGroup,\n strArtifact,\n strVersion\n )\n\n if atMatch is None:\n # No matching snippet found.\n raise Exception('No matching snippet found for %s.' %\n strSnippetName)\n\n strAbsPath = atMatch[0]\n if self.__fDebug:\n print('[SnipLib] Resolve: Found %s at \"%s\".' % (\n strSnippetName, strAbsPath))\n\n # Try to parse the snippet file.\n try:\n tXml = xml.dom.minidom.parse(strAbsPath)\n except xml.dom.DOMException as tException:\n # Invalid XML, ignore.\n raise Exception('Failed to parse the snippet %s: %s' % (\n strSnippetName,\n repr(tException)\n ))\n\n tRootNode = tXml.documentElement\n\n # Find all parameters.\n # The \"ParameterList\" node is optional.\n atParameterList = {}\n tParameterListNode = self.__xml_get_node(tRootNode, 'ParameterList')\n if tParameterListNode is not None:\n # Loop over all child nodes.\n for tChildNode in tParameterListNode.childNodes:\n if tChildNode.nodeType == tChildNode.ELEMENT_NODE:\n if tChildNode.localName == 'Parameter':\n # Get the \"name\" atribute.\n strName = tChildNode.getAttribute('name')\n if len(strName) == 0:\n raise Exception(\n 'Failed to parse the snippet %s: a parameter '\n 'node is missing the \"name\" attribute!' %\n strSnippetName\n )\n # Get the \"default\" attribute. It is optional.\n tDefault = None\n if tChildNode.hasAttribute('default'):\n tDefault = tChildNode.getAttribute('default')\n # Is the parameter already present?\n if strName in atParameterList:\n raise Exception(\n 'Failed to parse the snippet %s: the '\n 'parameter is requested more than once in '\n 'the snippet definition!' %\n strSnippetName\n )\n else:\n atParameterList[strName] = tDefault\n else:\n raise Exception(\n 'Failed to parse the snippet %s: unexpected '\n 'tag \"%s\".' % (\n strSnippetName,\n tChildNode.localName\n )\n )\n\n # Combine the parameters.\n atReplace = {}\n astrMissing = []\n # Add all default values and find missing values.\n for strName, tDefault in iter(atParameterList.items()):\n if tDefault is not None:\n atReplace[strName] = tDefault\n if strName not in atParameter:\n astrMissing.append(strName)\n if len(astrMissing) != 0:\n raise Exception(\n 'Failed to instanciate snippet %s: missing parameter %s' % (\n strSnippetName,\n ', '.join(astrMissing)\n )\n )\n\n # Add all required parameters which have assigned values.\n # Find unused parameter.\n astrUnused = []\n for strName, strValue in iter(atParameter.items()):\n if strName in atParameterList:\n atReplace[strName] = strValue\n else:\n astrUnused.append(strName)\n\n if len(astrUnused) != 0:\n if self.__fDebug:\n print(\n '[SnipLib] Resolve: the snippet %s does not use the '\n 'following parameters: %s' % (\n strSnippetName,\n ', '.join(astrUnused)\n )\n )\n\n # Find the \"Snippet\" node.\n tSnippetNode = self.__xml_get_node(tRootNode, 'Snippet')\n if tSnippetNode is None:\n raise Exception(\n 'The snippet definition \"%s\" has no \"Snippet\" node.' %\n strAbsPath\n )\n\n # Get the text contents.\n strSnippet = self.__xml_get_all_text(tSnippetNode)\n\n return (strSnippet, atReplace, strAbsPath)\n","sub_path":"site_scons/hboot_image_compiler/snippet_library.py","file_name":"snippet_library.py","file_ext":"py","file_size_in_byte":22010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83650489","text":"import numpy as np\nimport torch\nimport matplotlib\n# matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport torch.nn as nn # containing various building blocks for your neural networks\nimport torch.optim as optim # implementing various optimization algorithms\nimport torch.nn.functional as F # a lower level (compared to torch.nn) interface\nimport math\nfrom scipy.io import loadmat\nimport random\nfrom random import randint\n\n# Local imports\nimport CodingFunctions\nimport Utils\nimport UtilsPlot\nimport Decoding\n\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\nuse_gpu = True\nif use_gpu:\n device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\nclass CNN(torch.nn.Module):\n def __init__(self, architecture, K):\n \"\"\"\n In the constructor we instantiate two nn.Linear modules and assign them as\n member variables.\n \"\"\"\n super(CNN, self).__init__()\n\n #################### Coding Function and Scene Parameters\n sourceExponent = 9\n ambientExponent = 6\n\n #### Coding (Initialize at Hamiltonian)\n self.N = 10000\n self.K = K\n (ModFs_np,DemodFs_np) = CodingFunctions.GetHamK3(N = self.N)\n temp = torch.tensor(ModFs_np, device=device, dtype=dtype)\n self.ModFs = temp[:,:K].clone().detach().requires_grad_(True)\n temp = torch.tensor(DemodFs_np, device=device, dtype=dtype)\n self.DemodFs = temp[:,:K].clone().detach().requires_grad_(True)\n\n self.architecture = architecture\n #### Global parameters\n speedOfLight = 299792458. * 1000. # mm / sec \n #### Sensor parameters\n self.T = 0.1 # Integration time. Exposure time in seconds\n self.readNoise = 20 # Standard deviation in photo-electrons\n #### Coding function parameters\n dMax = 10000 # maximum depth\n fMax = speedOfLight/(2*float(dMax)) # Maximum unambiguous repetition frequency (in Hz)\n self.tauMin = 1./fMax\n fSampling = float(dMax)*fMax # Sampling frequency of mod and demod functuion\n self.dt = self.tauMin/float(self.N)\n self.pAveSourcePerPixel = np.power(10, sourceExponent) # Source power. Avg number of photons emitted by the light source per second. \n # self.pAveSourcePerPixel = pAveSource/nPixels # Avg number of photons arriving to each pixel per second. If all light is reflected back.\n freq = fMax # Fundamental frequency of modulation and demodulation functions\n self.tau = 1/freq\n #### Scene parameters\n self.pAveAmbientPerPixel = np.power(10, ambientExponent) # Ambient light power. Avg number of photons per second due to ambient light sources\n # self.pAveAmbientPerPixel = pAveAmbient/nPixels # Avg # of photons per second arriving to each pixel\n self.meanBeta = 1e-4 # Avg fraction of photons reflected from a scene points back to the detector\n #### Camera gain parameter\n ## The following bound is found by assuming the max brightness value is obtained when demod is 1. \n self.gamma = 1./(self.meanBeta*self.T*(self.pAveAmbientPerPixel+self.pAveSourcePerPixel)) # Camera gain. Ensures all values are between 0-1.\n\n #### CNN Initialization\n CNN.network(self, architecture, None, True)\n \n \n def forward(self, gt_depths):\n \"\"\"\n In the forward function we accept a Tensor of input data and we must return\n a Tensor of output data. We can use Modules defined in the constructor as\n well as arbitrary operators on Tensors.\n \"\"\"\n\n #################### Simulation\n ## Set area under the curve of outgoing ModF to the totalEnergy\n ModFs_scaled = Utils.ScaleMod(self.ModFs, device=device, tau=self.tauMin, pAveSource=self.pAveSourcePerPixel)\n # Calculate correlation functions (NxK matrix) and normalize it (zero mean, unit variance)\n CorrFs = Utils.GetCorrelationFunctions(ModFs_scaled,self.DemodFs,device=device,dt=self.dt)\n NormCorrFs = (CorrFs.t() - torch.mean(CorrFs,1)) / torch.std(CorrFs,1)\n NormCorrFs = NormCorrFs.t()\n # Compute brightness values\n BVals = Utils.ComputeBrightnessVals(ModFs=ModFs_scaled, DemodFs=self.DemodFs, CorrFs=CorrFs, depths=gt_depths, \\\n pAmbient=self.pAveAmbientPerPixel, beta=self.meanBeta, T=self.T, tau=self.tau, dt=self.dt, gamma=self.gamma) \n #### Add noise\n # Calculate variance\n noiseVar = BVals*self.gamma + math.pow(self.readNoise*self.gamma, 2) \n # Add noise to all brightness values\n BVals = Utils.GetClippedBSamples(nSamples=1,BMean=BVals,BVar=noiseVar,device=device)\n BVals = BVals.permute(0,3,1,2) # Put channel dimension at right position\n\n # Normalize BVals\n BVals_mean = torch.mean(BVals)\n BVals_std = torch.std(BVals)\n BVals = (BVals - BVals_mean)/BVals_std\n\n #### CNN Network\n out = CNN.network(self, self.architecture, BVals)\n\n decodedDepths = torch.squeeze(out, 1) # Remove channel dimension\n return decodedDepths\n\n\n def network(self, architecture, BVals=None, init=False):\n if architecture == 'sequential':\n if init == True:\n self.layer_down1 = nn.Sequential(\n nn.Conv2d(self.K, 32, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU())\n self.layer_down2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU())\n self.layer_down3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU())\n\n self.layer_same1 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU())\n\n self.layer_up1 = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),\n nn.ReLU())\n self.layer_up2 = nn.Sequential(\n nn.BatchNorm2d(64),\n nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1),\n nn.ReLU())\n self.layer_up3 = nn.Sequential(\n nn.BatchNorm2d(32),\n nn.ConvTranspose2d(32, 8, kernel_size=4, stride=2, padding=1),\n nn.ReLU())\n\n self.layer_skip_nonlinearity1 = nn.Sequential(\n nn.Conv2d(self.K, 8, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(8),\n nn.ReLU())\n self.layer_skip_nonlinearity2 = nn.Sequential(\n nn.Conv2d(8, 16, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU())\n\n\n self.layer_combine = nn.Sequential(\n nn.Conv2d(24,1, kernel_size=1, stride=1, padding=0))\n \n else:\n # Down Convolution\n x = self.layer_down1(BVals)\n x = self.layer_down2(x)\n x = self.layer_down3(x)\n # Same size Convolution\n x = self.layer_same1(x)\n # Up Convolution\n x = self.layer_up1(x)\n x = self.layer_up2(x)\n x = self.layer_up3(x)\n # Skip layer and combination with CNN\n x_skip = self.layer_skip_nonlinearity1(BVals)\n x_skip = self.layer_skip_nonlinearity2(x_skip)\n x = self.layer_combine(torch.cat([x, x_skip], 1))\n return x\n\n \n# Load data\ndata = loadmat('patches_train_test_val_64.mat')\ntrain = data['patches_train']\nval = data['patches_val']\ntest = data['patches_test']\n\n#val = torch.from_numpy(train[50000:51000,:,:])\n#test = torch.from_numpy(train[70000:71000,:,:])\ntrain = torch.from_numpy(train[:50000,:,:])\nval = torch.from_numpy(val)\ntest = torch.from_numpy(test)\n\ntrain_gt_depths = train.float().to(device).requires_grad_(True)\nval_gt_depths = val.float().to(device).requires_grad_(False)\ntest_gt_depths = test.float().to(device).requires_grad_(False)\n\ntrain_gt_depths_mean = torch.mean(train_gt_depths)\ntrain_gt_depths_std = torch.std(train_gt_depths)\n\ntrain_normalized_gt_depths = (train_gt_depths-train_gt_depths_mean)/train_gt_depths_std\nval_normalized_gt_depths = (val_gt_depths-train_gt_depths_mean)/train_gt_depths_std\ntest_normalized_gt_depths = (test_gt_depths-train_gt_depths_mean)/train_gt_depths_std\nprint(\"DATA IMPORTED\")\n\nK_NUMBER = [2,1]\nfor K in K_NUMBER:\n # Construct our model by instantiating the class defined above\n # Choose from: 'sequential', 'skip_connection'\n model = CNN('sequential', K)\n if use_gpu:\n model.cuda()\n print(\"MODEL MADE\")\n # Construct our loss function and an Optimizer. The call to model.parameters()\n # in the SGD constructor will contain the learnable parameters of the two\n # nn.Linear modules which are members of the model.\n criterion = torch.nn.MSELoss(reduction='mean')\n parameters = list(model.parameters())\n parameters.append(model.ModFs)\n parameters.append(model.DemodFs)\n optimizer = optim.Adam(parameters, lr = 3e-4)\n\n with torch.autograd.detect_anomaly():\n iteration = 0\n increased = 0\n patience = 100\n train_batch_size = 64\n val_every = 100\n train_enumeration = torch.arange(train_gt_depths.shape[0])\n train_enumeration = train_enumeration.tolist()\n train_loss_history = []\n val_loss_history = []\n\n while increased <= patience:\n train_ind = random.sample(train_enumeration, train_batch_size)\n # Forward pass: Compute predicted y by passing x to the model\n train_depths_pred = model(train_gt_depths[train_ind,:,:])\n # Compute and print loss\n train_loss = criterion(train_depths_pred, train_normalized_gt_depths[train_ind])\n train_loss_history.append(train_loss.item())\n train_depths_pred_unnorm = train_depths_pred*train_gt_depths_std+train_gt_depths_mean\n train_MSE = criterion(train_depths_pred_unnorm, train_gt_depths[train_ind]) \n iteration = iteration + 1\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n train_loss.backward(retain_graph=True)\n optimizer.step()\n\n if use_gpu:\n torch.cuda.empty_cache()\n\n if iteration == 1 or iteration%val_every == 0:\n with torch.no_grad():\n val_depths_pred = model(val_gt_depths)\n val_loss = criterion(val_depths_pred, val_normalized_gt_depths)\n val_loss_history.append(val_loss.item())\n val_depths_pred_unnorm = val_depths_pred*train_gt_depths_std+train_gt_depths_mean\n val_MSE = criterion(val_depths_pred_unnorm, val_gt_depths)\n print(\"Iteration: %d, K: %d, Train Loss: %f, Val Loss: %f, Train MSE: %f, Val MSE: %f\" %(iteration, K, train_loss.item(), val_loss.item(), train_MSE, val_MSE))\n if iteration == 1 or val_loss < best_val_loss:\n best_val_loss = val_loss\n best_iteration = iteration\n best_model = model\n model_name = 'results/model_nn_K' + str(K) + '_patched_point'\n torch.save(model, model_name)\n increased = 0\n else:\n increased = increased + 1\n\n results = \"results/results_nn_K\" + str(K) + \"_patched_point\"\n file = open(results, \"w\")\n print(\"DONE TRAINING\")\n print(\"Best Validation Loss:\", best_val_loss.item())\n file.write(\"Best Validation Loss:\" + str(best_val_loss.item()) + \"\\n\")\n print(\"Best Iteration:\", best_iteration)\n file.write(\"Best Iteration:\" + str(best_iteration) + \"\\n\")\n\n # Plot training and validation loss over iterations\n fig, ax = plt.subplots()\n ax.plot(np.arange(len(train_loss_history)), train_loss_history, label='training loss')\n ax.plot(np.arange(0,len(val_loss_history)*val_every,val_every), val_loss_history, label='validation loss')\n ax.legend(loc='best')\n # plt.show(block=True)\n plot_name = 'results/loss_history_nn_K' + str(K) + '_patched_point.png'\n plt.savefig(plot_name)\n\n # Loss on test set\n with torch.no_grad():\n test_depths_pred = best_model(test_gt_depths)\n test_loss = criterion(test_depths_pred, test_normalized_gt_depths)\n test_depths_pred_unnorm = test_depths_pred*train_gt_depths_std+train_gt_depths_mean\n test_MSE = criterion(test_depths_pred_unnorm, test_gt_depths)\n print(\"Evaluate best model on test set:\")\n print(\"Test Loss: %f, Test MSE: %f\" %(test_loss.item(), test_MSE))\n file.write(\"Test Loss:\" + str(test_loss.item()) + \", Test MSE:\" + str(test_MSE) + \"\\n\")\n\n # Stitch test patches back together and show two example depth maps from test set\n D = 64\n row_patch_num = 7\n col_patch_num = 9\n patch_num_scene = row_patch_num * col_patch_num\n test_depths_pred_unnorm_cpu = test_depths_pred_unnorm.cpu().numpy()\n test_gt_depths_cpu = test_gt_depths.cpu().numpy()\n num = np.floor(test_depths_pred_unnorm_cpu.shape[0] / patch_num_scene)\n n1 = randint(0, num-1)\n n2 = randint(0, num-1)\n im1 = np.zeros((row_patch_num*D, col_patch_num*D))\n im1_gt = np.zeros((row_patch_num*D, col_patch_num*D))\n im2 = np.zeros((row_patch_num*D, col_patch_num*D))\n im2_gt = np.zeros((row_patch_num*D, col_patch_num*D))\n ind1 = int(patch_num_scene * n1)\n ind2 = int(patch_num_scene * n2)\n for r in range(row_patch_num):\n for c in range(col_patch_num):\n im1[r*D:(r+1)*D, c*D:(c+1)*D] = np.squeeze(test_depths_pred_unnorm_cpu[ind1, :, :])\n im1_gt[r*D:(r+1)*D, c*D:(c+1)*D] = np.squeeze(test_gt_depths_cpu[ind1, :, :])\n im2[r*D:(r+1)*D, c*D:(c+1)*D] = np.squeeze(test_depths_pred_unnorm_cpu[ind2, :, :])\n im2_gt[r*D:(r+1)*D, c*D:(c+1)*D] = np.squeeze(test_gt_depths_cpu[ind2, :, :])\n ind1 = ind1 + 1\n ind2 = ind2 + 1\n im1_max = np.amax(im1_gt)\n im1_min = np.amin(im1_gt)\n im2_max = np.amax(im2_gt)\n im2_min = np.amin(im2_gt)\n\n fig = plt.figure()\n plt.subplot(1, 3, 1)\n plt.imshow(im1_gt)\n plt.set_cmap('jet')\n plt.clim(im1_min,im1_max)\n plt.colorbar()\n plt.subplot(1, 3, 2)\n plt.imshow(im1)\n plt.set_cmap('jet')\n plt.clim(im1_min,im1_max)\n plt.colorbar()\n plt.subplot(1, 3, 3)\n plt.imshow(im1_gt-im1)\n plt.set_cmap('bwr')\n plt.clim(-500,500)\n plt.colorbar()\n # plt.show(block=True)\n plot_name = 'results/depth_plot_nn_one_K' + str(K) + '_patched_point.png'\n plt.savefig(plot_name)\n\n fig = plt.figure()\n plt.subplot(1, 3, 1)\n plt.imshow(im2_gt)\n plt.set_cmap('jet')\n plt.clim(im2_min,im2_max)\n plt.colorbar()\n plt.subplot(1, 3, 2)\n plt.imshow(im2)\n plt.set_cmap('jet')\n plt.clim(im2_min,im2_max)\n plt.colorbar()\n plt.subplot(1, 3, 3)\n plt.imshow(im2_gt-im2)\n plt.set_cmap('bwr')\n plt.clim(-500,500)\n plt.colorbar()\n # plt.show(block=True)\n plot_name = 'results/depth_plot_nn_two_K' + str(K) + '_patched_point.png'\n plt.savefig(plot_name)\n\n\n # Save coding functions\n ModFs_scaled = Utils.ScaleMod(model.ModFs, device=device, tau=model.tauMin, pAveSource=model.pAveSourcePerPixel)\n\n UtilsPlot.PlotCodingScheme(model.ModFs,model.DemodFs,device)\n ModFs_np = model.ModFs.cpu().detach().numpy()\n DemodFs_np = model.DemodFs.cpu().detach().numpy()\n CorrFs = Utils.GetCorrelationFunctions(model.ModFs,model.DemodFs,device=device)\n CorrFs_np = CorrFs.cpu().detach().numpy()\n name = 'results/coding_functions_nn_K' + str(K) + '_patched_point.npz'\n np.savez(name, ModFs=ModFs_np, DemodFs=DemodFs_np, CorrFs=CorrFs_np)\n file.close()\n\n","sub_path":"Code/script_nn_final_patched_point.py","file_name":"script_nn_final_patched_point.py","file_ext":"py","file_size_in_byte":16189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"1052240","text":"\"\"\"\nInsert media item at random time stamps, N times.\n\nCopyright 2021, Francesco Roberto Dani\nwww.francesco-dani.com\nf.r.d@hotmail.it\n\n\"\"\"\n\n##############################\n### CHANGE PARAMETERS HERE ###\n##############################\n\n# Path to media item\n#MEDIA_FILE_PATH = \"/Users/admin/Desktop/LABA - Sound Design - Video Files/Horror_lights_01/audio/Light.wav\"\nMEDIA_FILE_PATH = \"/Users/admin/Documents/Samples/SoundLibrary/Instruments/Lo-Fi/drum/drum_101/77.wav\"\n\n# Number of insertions\nN = 15\n\n# Start time (s)\nSTART_TIME = 0.0\n\n# End time (s)\nEND_TIME = 36.0\n\n# Step (s)\nSTEP = 0.25\n\n\n\n#########################\n### END OF PARAMETERS ###\n#########################\n\nimport os\nimport sys\nimport random\ntimes = [round(random.uniform(START_TIME, END_TIME) / STEP) * STEP for i in range(N)]\ntimes = list(dict.fromkeys(times))\nfor index in range(N):\n\ttime = round(random.uniform(START_TIME, END_TIME) / STEP) * STEP\n\tRPR_SetEditCurPos(time, True, True)\n\tif index == 0:\n\t\tmedia = RPR_InsertMedia(MEDIA_FILE_PATH, 1) # 1=add new track\n\telse:\n\t\tmedia = RPR_InsertMedia(MEDIA_FILE_PATH, 0) # 0=add to current track\n\n\n\n\n\n\n\n","sub_path":"ReaScript/insert_item_randomly.py","file_name":"insert_item_randomly.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281905470","text":"import requests\nimport re\n\n\nclass Api:\n def __init__(self, host: str):\n self.host = host\n self.url = f\"http://{self.host}\"\n self.api_url = f\"{self.url}/api\"\n self.session = requests.Session()\n\n def get_middleware_token(self, html: str) -> str:\n return html.split('csrfmiddlewaretoken\" value=\"')[1].split('\"')[0]\n\n def sing_up(self, username: str, password: str, fname: str, lname:str ) -> requests.Response:\n r = self.session.get(f\"{self.url}/signup/\")\n\n r = self.session.post(f\"{self.url}/signup/\", data={\n \"csrfmiddlewaretoken\": self.get_middleware_token(r.text),\n \"username\": username, \n \"password\": password, \n \"fname\": fname, \n \"lname\": lname\n })\n return r\n\n def login(self, username: str, password: str) -> requests.Response:\n r = self.session.get(f\"{self.url}/login/\")\n \n r = self.session.post(f\"{self.url}/login/\", data={\n \"csrfmiddlewaretoken\": self.get_middleware_token(r.text),\n \"username\": username, \n \"password\": password\n })\n return r\n\n def get_user_barks(self, username: str) -> requests.Response:\n r = self.session.get(f\"{self.url}/{username}/\")\n if r.status_code == 200:\n return r\n\n def add_bark(self, username: str, text: str, is_private: bool = False) -> requests.Response:\n r = self.session.get(f\"{self.url}/{username}/\")\n\n data = {\n \"csrfmiddlewaretoken\": self.get_middleware_token(r.text),\n \"bark_text\": text\n }\n\n if is_private:\n data[\"is_private\"] = 1\n\n r = self.session.post(f\"{self.url}/add_bark/\", data=data)\n return r\n\n def get_bark(self, bark_id: int) -> requests.Response:\n r = self.session.get(f\"{self.url}/get_bark/{bark_id}/\")\n if r.status_code == 200:\n return r\n\n def comment_bark(self, bark_id: int, text: str, is_private: bool = False) -> requests.Response:\n r = self.session.get(f\"{self.url}/get_bark/{bark_id}/\")\n \n data = {\n \"csrfmiddlewaretoken\": self.get_middleware_token(r.text),\n \"comment_text\": text\n }\n\n if is_private:\n data[\"is_private\"] = 1\n \n r = self.session.post(f\"{self.url}/leave_comment/{bark_id}/\", data=data)\n return r\n \n def add_friend(self, username: str) -> requests.Response:\n r = self.session.get(f\"{self.url}/add_friend/{username}/\")\n return r\n\n def confirm_friend(self, username: str) -> requests.Response:\n r = self.session.get(f\"{self.url}/confirm_friend/{username}/\")\n return r\n\n def generate_token(self) -> str:\n r = self.session.get(f\"{self.url}/generate_token/\")\n if r.status_code == 200:\n tokens = re.findall(r\"[\\da-f]{32}\", r.text)\n if tokens:\n return tokens[0]\n \n def api_index(self, token) -> dict:\n r = requests.get(f\"{self.api_url}/\", headers={\"Token\": token})\n if r.status_code == 200:\n return r.json()\n\n def api_barks(self, token, username) -> list:\n r = requests.get(f\"{self.api_url}/barks/{username}/\", headers={\"Token\": token})\n if r.status_code == 200:\n return r.json()\n\n def api_comments(self, token, bark_id) -> list:\n r = requests.get(f\"{self.api_url}/comments/{bark_id}/\", headers={\"Token\": token})\n if r.status_code == 200:\n return r.json()\n\n def api_user_info(self, token, username) -> dict:\n r = requests.get(f\"{self.api_url}/user/{username}\", headers={\"Token\": token})\n if r.status_code == 200:\n return r.json()\n\n def api_get_users(self, token, page_n) -> list:\n r = requests.get(f\"{self.api_url}/users/{page_n}\", headers={\"Token\": token})\n if r.status_code == 200:\n return r.json()\n\n def api_get_last_barks(self, token, page_n) -> list:\n r = requests.get(f\"{self.api_url}/last_barks/{page_n}/\", headers={\"Token\": token})\n if r.status_code == 200:\n return r.json()\n\n def logout(self):\n self.session.get(f\"{self.url}/logout/\")\n","sub_path":"sploits/barker/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618134554","text":"#!/usr/bin/env python\n# coding=gb2312\nimport sys\nsys.path.append(\"../public\")\nimport time\nimport datetime\nfrom tm_config import DBConf\nfrom tm_config import logger\nfrom tm_NSqlDB import NSqlDB\nfrom send_message import email\nfrom send_message import gsmsend\n\ndef change_monitor():\n table_name = \"monitor\"\n try:\n cursor = NSqlDB(\"task_manager\")\n cursor.useDictCursor()\n except Exception as e:\n logger.critical(\"Exception: Can not connect to task_manager_db: %s \" % (e))\n \n ###取得现在值班人的ID,并更新其状态为4\n sql = \"select monitor_id, name, email, mobile from %s where type = 3\"%(table_name)\n cursor.execute(sql)\n row = cursor.fetchone()\n current_monitor_id = row['monitor_id']\n current_name = row['name']\n sql = \"update %s set type = 4 where monitor_id = %s\"%(table_name, current_monitor_id)\n cursor.execute(sql)\n\n ###取得将要值班人的ID,并更新其状态为3\n sql = \"select monitor_id, name, email, mobile from %s where type = 2 order by monitor_id\"%(table_name)\n cursor.execute(sql)\n row = cursor.fetchone()\n if not row:\n sql = \"update %s set type = 2 where type = 4\"%(table_name)\n cursor.execute(sql)\n sql = \"select monitor_id, name, email, mobile from %s where type = 2 order by monitor_id\"%(table_name)\n cursor.execute(sql)\n row = cursor.fetchone()\n to_monitor_id = row['monitor_id']\n to_name = row['name']\n to_email = row['email']\n to_mobile = row['mobile']\n sql = \"update %s set type = 3 where monitor_id = %s\"%(table_name, to_monitor_id)\n cursor.execute(sql)\n\n email([to_email,],\"恭喜!任务监控平台本周你值班!\",\"你的值班时间是本周五15:00至下周五15:00,祝你好运!\")\n gsmsend([to_mobile,], \"恭喜!任务监控平台本周你值班!你的值班时间是本五15:00到下周五15:00,祝你好运!\")\n \n email([\"ubi@baidu.com\"], \"本周任务监控平台的值班人\", \"Hi, All, 本周任务监控平台的值班人为%s (%s),\\\n 你的值班时间是本周五15:00至下周五15:00,祝你好运\"%(to_name, to_mobile))\n cursor.close()\n logger.debug(\"change monitor from %s to %s\" % (current_name, to_name))\n\nif __name__ == \"__main__\":\n change_monitor()\n","sub_path":"bin/tm/monitor/change_monitor.py","file_name":"change_monitor.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234923577","text":"from zope.component import getAdapters, queryMultiAdapter\nfrom zope.component import getSiteManager\nfrom zope.contentprovider.interfaces import IContentProvider\nfrom zope.interface import implements\nfrom zope.viewlet.interfaces import IViewlet\n\nfrom plone.app.upgrade.tests.base import MigrationTest\nfrom plone.app.upgrade.utils import loadMigrationProfile\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import INonInstallable\nfrom Products.CMFPlone.utils import getFSVersionTuple\nfrom Products.GenericSetup import profile_registry\nfrom Products.GenericSetup.interfaces import EXTENSION\n\nimport alphas\n\nPLONE5 = getFSVersionTuple()[0] >= 5\n\n\nclass TestMigrations_v4_3alpha1(MigrationTest):\n\n profile = 'profile-plone.app.upgrade.v43:to43alpha1'\n\n def testProfile(self):\n # This tests the whole upgrade profile can be loaded\n loadMigrationProfile(self.portal, self.profile)\n self.assertTrue(True)\n\n def testAddDisplayPublicationDateInBylineProperty(self):\n if PLONE5:\n return\n pprop = getToolByName(self.portal, 'portal_properties')\n self.assertEqual(\n pprop.site_properties.getProperty('displayPublicationDateInByline'),\n False)\n\n def testUpgradeToI18NCaseNormalizer(self):\n from Products.CMFPlone.UnicodeSplitter.splitter import Splitter, CaseNormalizer\n ctool = self.portal.portal_catalog\n ctool.plone_lexicon._pipeline[1] == (Splitter(), CaseNormalizer())\n alphas.upgradeToI18NCaseNormalizer(self.portal.portal_setup)\n self.assertEqual(ctool.plone_lexicon._pipeline[1].__class__.__name__, 'I18NNormalizer')\n\n def testUpgradeTinyMCE(self):\n # skip test in new Plones that don't install tinymce to begin with\n if 'portal_tinymce' not in self.portal:\n return\n\n alphas.upgradeTinyMCE(self.portal.portal_setup)\n jstool = getToolByName(self.portal, 'portal_javascripts')\n jsresourceids = jstool.getResourceIds()\n\n self.assertTrue('jquery.tinymce.js' in jsresourceids)\n for ne in ['tiny_mce.js', 'tiny_mce_init.js']:\n self.assertFalse(ne in jsresourceids)\n\n ksstool = getToolByName(self.portal, 'portal_kss', None)\n if ksstool is not None:\n kssresourceids = ksstool.getResourceIds()\n self.assertFalse(\n '++resource++tinymce.kss/tinymce.kss' in kssresourceids)\n\n request = self.app.REQUEST\n plone_view = queryMultiAdapter((self.portal, request), name=\"plone\")\n manager = queryMultiAdapter(\n (self.portal, request, plone_view), IContentProvider, 'plone.htmlhead')\n viewlets = getAdapters(\n (manager.context, manager.request, manager.__parent__, manager), IViewlet)\n self.assertFalse(u'tinymce.configuration' in dict(viewlets))\n\n def testInstallThemingNotPreviouslyInstalled(self):\n from plone.app.theming.interfaces import IThemeSettings\n from plone.registry.interfaces import IRegistry\n from zope.component import getUtility\n\n alphas.upgradePloneAppTheming(self.portal.portal_setup)\n\n registry = getUtility(IRegistry)\n if not PLONE5:\n self.assertRaises(KeyError, registry.forInterface, IThemeSettings)\n\n def testInstallThemingPreviouslyInstalled(self):\n from plone.app.theming.interfaces import IThemeSettings\n from plone.registry.interfaces import IRegistry\n from zope.component import getUtility\n\n self.portal.portal_setup.runAllImportStepsFromProfile('profile-plone.app.theming:default')\n alphas.upgradePloneAppTheming(self.portal.portal_setup)\n\n registry = getUtility(IRegistry)\n\n try:\n registry.forInterface(IThemeSettings)\n except KeyError:\n self.fail(\"plone.app.theming not installed\")\n\n def testReindexNumericalTitle(self):\n from Products.CMFCore.utils import getToolByName\n\n # Create 2 pages, one with a numerical title\n portal = self.portal\n self.setRoles([\"Manager\"])\n catalog = getToolByName(portal, 'portal_catalog')\n portal.invokeFactory(\n id='num-title', type_name='Document',\n title='10 green bottles, hanging on the wall',\n )\n portal.invokeFactory(\n id='accidentally-fall', type_name='Document',\n title='And if one green bottle should accidentally fall',\n )\n\n # Change title of both, shouldn't be reindexed yet\n portal['accidentally-fall'].title = 'fell'\n portal['num-title'].title = '9 green bottles, hanging on the wall'\n self.assertEqual(\n catalog(id=portal['num-title'].id)[0].Title,\n '10 green bottles, hanging on the wall',\n )\n self.assertEqual(\n catalog(id=portal['accidentally-fall'].id)[0].Title,\n 'And if one green bottle should accidentally fall',\n )\n\n # Only the numerical title got reindexed\n portal.portal_setup.runAllImportStepsFromProfile('profile-plone.app.theming:default')\n alphas.reindex_sortable_title(portal.portal_setup)\n self.assertEqual(\n catalog(id=portal['num-title'].id)[0].Title,\n '9 green bottles, hanging on the wall'\n )\n self.assertEqual(\n catalog(id=portal['accidentally-fall'].id)[0].Title,\n 'And if one green bottle should accidentally fall',\n )\n\n\nclass TestMigrations_v4_3final_to4308(MigrationTest):\n\n def testAddDefaultPlonePasswordPolicy(self):\n # this add the 'Default Plone Password Policy' to Plone's acl_users\n portal = self.portal\n # make sure the 'Default Plone Password Policy' does not exist in acl_users\n portal.acl_users.manage_delObjects(ids=['password_policy', ])\n self.assertFalse('password_policy' in portal.acl_users.objectIds())\n # find the relevant upgrade step and execute it\n from Products.GenericSetup.upgrade import listUpgradeSteps\n relevantStep = [step for step in listUpgradeSteps(\n portal.portal_setup, 'Products.CMFPlone:plone', '4307')[0] if\n step['title'] == u'Add default Plone password policy'][0]\n # execute the step\n relevantStep['step'].handler(portal)\n # now it has been added...\n self.assertTrue('password_policy' in portal.acl_users.objectIds())\n\n\nclass TestFakeKupuMigration(MigrationTest):\n\n def afterSetUp(self):\n from plone.app.upgrade.kupu_bbb import PloneKupuLibraryTool\n portal = self.portal\n self.csstool = getToolByName(portal, 'portal_css')\n self.jstool = getToolByName(portal, 'portal_javascripts')\n self.control_panel = getToolByName(portal, 'portal_controlpanel')\n pprops = getToolByName(portal, 'portal_properties')\n self.site_properties = pprops.site_properties\n bad_expr = ('python:portal.kupu_library_tool.isKupuEnabled'\n '(REQUEST=request)')\n allowed_expr = 'python:\"kupu_library_tool\" not in portal'\n # Setup a fake kupu with resources and settings\n self.kupu_id = 'kupu_library_tool'\n portal._setObject(self.kupu_id, PloneKupuLibraryTool(id=self.kupu_id))\n self.csstool.registerStylesheet('somekupu.css', expression=bad_expr)\n self.csstool.registerStylesheet('nokupu.css', expression=allowed_expr)\n self.jstool.registerScript('somekupu.js', expression=bad_expr)\n self.jstool.registerScript('nokupu.js', expression=allowed_expr)\n self.control_panel.registerConfiglet('kupu', 'kupu', '')\n if self.site_properties.hasProperty('available_editors'):\n self.site_properties._updateProperty(\n 'available_editors', ('TinyMCE', 'Kupu', ''))\n else:\n self.site_properties._setProperty(\n 'available_editors', ('TinyMCE', 'Kupu', ''))\n if self.site_properties.hasProperty('default_editor'):\n self.site_properties._updateProperty('default_editor', 'Kupu')\n else:\n self.site_properties._setProperty('default_editor', 'Kupu')\n self.member_data = getToolByName(portal, 'portal_memberdata')\n if self.member_data.hasProperty('wysiwyg_editor'):\n self.member_data._updateProperty('wysiwyg_editor', 'Kupu')\n else:\n self.member_data._setProperty('wysiwyg_editor', 'Kupu')\n\n def testBeforeRemoveFakeKupu(self):\n # Test that our test setup has worked.\n self.assertTrue(self.kupu_id in self.portal)\n self.assertTrue(self.csstool.getResource('somekupu.css') is not None)\n self.assertTrue(self.csstool.getResource('nokupu.css') is not None)\n self.assertTrue(self.jstool.getResource('somekupu.js') is not None)\n self.assertTrue(self.jstool.getResource('nokupu.js') is not None)\n self.assertTrue(\n self.control_panel.getActionObject('Plone/kupu') is not None)\n self.assertTrue(\n 'Kupu' in self.site_properties.getProperty('available_editors'))\n self.assertEqual(\n self.site_properties.getProperty('default_editor'), 'Kupu')\n self.assertEqual(\n self.member_data.getProperty('wysiwyg_editor'), 'Kupu')\n\n def testRemoveFakeKupu(self):\n from plone.app.upgrade.v43.final import removeFakeKupu\n # Call the upgrade\n setup = getToolByName(self.portal, 'portal_setup')\n removeFakeKupu(setup)\n # Test that the tool is gone\n self.assertTrue(self.kupu_id not in self.portal)\n # Assert that the bad resources are gone and the allowed ones\n # are still there.\n self.assertTrue(self.csstool.getResource('somekupu.css') is None)\n self.assertTrue(self.csstool.getResource('nokupu.css') is not None)\n self.assertTrue(self.jstool.getResource('somekupu.js') is None)\n self.assertTrue(self.jstool.getResource('nokupu.js') is not None)\n self.assertTrue(\n self.control_panel.getActionObject('Plone/kupu') is None)\n self.assertTrue(\n 'Kupu' not in\n self.site_properties.getProperty('available_editors'))\n self.assertNotEqual(\n self.site_properties.getProperty('default_editor'), 'Kupu')\n self.assertNotEqual(\n self.member_data.getProperty('wysiwyg_editor'), 'Kupu')\n\n def testNoRemoveFakeKupu(self):\n # Test that we do nothing when the tool is there and is not an\n # instance of the fake class\n from OFS.SimpleItem import SimpleItem\n self.portal._delObject(self.kupu_id)\n self.portal._setObject(self.kupu_id, SimpleItem(id=self.kupu_id))\n from plone.app.upgrade.v43.final import removeFakeKupu\n # Call the upgrade\n setup = getToolByName(self.portal, 'portal_setup')\n removeFakeKupu(setup)\n self.assertTrue(self.kupu_id in self.portal)\n self.assertTrue(self.csstool.getResource('somekupu.css') is not None)\n self.assertTrue(self.csstool.getResource('nokupu.css') is not None)\n self.assertTrue(self.jstool.getResource('somekupu.js') is not None)\n self.assertTrue(self.jstool.getResource('nokupu.js') is not None)\n self.assertTrue(\n self.control_panel.getActionObject('Plone/kupu') is not None)\n self.assertTrue(\n 'Kupu' in self.site_properties.getProperty('available_editors'))\n self.assertEqual(\n self.site_properties.getProperty('default_editor'), 'Kupu')\n self.assertEqual(\n self.member_data.getProperty('wysiwyg_editor'), 'Kupu')\n\n\nclass TestQIandGS(MigrationTest):\n\n def testUnmarkUnavailableProfiles(self):\n from plone.app.upgrade.v43.final import unmarkUnavailableProfiles\n setup = getToolByName(self.portal, 'portal_setup')\n profile_id = 'dummyxyz:default'\n # Pretend that this profile was installed at some point.\n setup.setLastVersionForProfile(profile_id, '1.0')\n self.assertTrue(profile_id in setup._profile_upgrade_versions)\n # The profile is not known to portal_setup: it is not\n # registered in zcml. So our cleanup function gets rid of it.\n unmarkUnavailableProfiles(setup)\n self.assertFalse(profile_id in setup._profile_upgrade_versions)\n\n def testMarkProductsInstalledForUninstallableProfiles(self):\n from plone.app.upgrade.v43.final import \\\n markProductsInstalledForUninstallableProfiles\n\n # Register a profile.\n product_id = 'my.test.package'\n profile_id = '{0}:default'.format(product_id)\n profile_registry.registerProfile(\n 'default', 'title', 'description', '/my/path',\n product=product_id, profile_type=EXTENSION)\n\n # Hide the profile.\n class HiddenProfiles(object):\n implements(INonInstallable)\n\n def getNonInstallableProfiles(self):\n return [profile_id]\n\n sm = getSiteManager()\n sm.registerUtility(factory=HiddenProfiles, name='my.test.package')\n\n # Check that nothing is installed at first.\n setup = getToolByName(self.portal, 'portal_setup')\n self.assertEqual(\n setup.getLastVersionForProfile(profile_id), 'unknown')\n qi = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertFalse(qi.isProductInstalled(product_id))\n\n # Call our upgrade function. This should have no effect,\n # because the profile is not installed.\n markProductsInstalledForUninstallableProfiles(setup)\n self.assertEqual(\n setup.getLastVersionForProfile(profile_id), 'unknown')\n self.assertFalse(qi.isProductInstalled(product_id))\n\n # Now fake that the profile is installed and try again.\n setup.setLastVersionForProfile(profile_id, '1.0')\n markProductsInstalledForUninstallableProfiles(setup)\n self.assertEqual(\n setup.getLastVersionForProfile(profile_id), ('1', '0'))\n self.assertTrue(qi.isProductInstalled(product_id))\n\n # Cleanup test.\n profile_registry.unregisterProfile('default', product_id)\n\n def testCleanupUninstalledProducts(self):\n from plone.app.upgrade.v43.final import cleanupUninstalledProducts\n qi = getToolByName(self.portal, 'portal_quickinstaller')\n setup = getToolByName(self.portal, 'portal_setup')\n # Register three profiles. I wanted to take 'new' as product\n # id, but there is already a python module 'new', so that goes\n # wrong.\n profile_registry.registerProfile(\n 'default', '', '', '/my/path',\n product='newproduct', profile_type=EXTENSION)\n profile_registry.registerProfile(\n 'default', '', '', '/my/path',\n product='installed', profile_type=EXTENSION)\n profile_registry.registerProfile(\n 'default', '', '', '/my/path',\n product='uninstalled', profile_type=EXTENSION)\n # Mark as installed.\n setup.setLastVersionForProfile('newproduct:default', '1')\n setup.setLastVersionForProfile('installed:default', '1')\n setup.setLastVersionForProfile('uninstalled:default', '1')\n # Notify of installation of three products.\n qi.notifyInstalled('newproduct', status='new')\n qi.notifyInstalled('installed', status='installed')\n qi.notifyInstalled('uninstalled', status='uninstalled')\n # The status differs, so QI does not think all are actually\n # installed.\n self.assertFalse(qi.isProductInstalled('newproduct'))\n self.assertTrue(qi.isProductInstalled('installed'))\n self.assertFalse(qi.isProductInstalled('uninstalled'))\n # But all three have an object in the QI.\n self.assertTrue('newproduct' in qi)\n self.assertTrue('installed' in qi)\n self.assertTrue('uninstalled' in qi)\n # And all three have a version in GS.\n self.assertEqual(\n setup.getLastVersionForProfile('newproduct:default'), ('1',))\n self.assertEqual(\n setup.getLastVersionForProfile('installed:default'), ('1',))\n self.assertEqual(\n setup.getLastVersionForProfile('uninstalled:default'), ('1',))\n # Call our cleanup function.\n cleanupUninstalledProducts(setup)\n # Same results for isProductInstalled.\n self.assertFalse(qi.isProductInstalled('newproduct'))\n self.assertTrue(qi.isProductInstalled('installed'))\n self.assertFalse(qi.isProductInstalled('uninstalled'))\n # The two not installed items are removed.\n self.assertFalse('newproduct' in qi)\n self.assertTrue('installed' in qi)\n self.assertFalse('uninstalled' in qi)\n # Those twee are unknown in GS.\n self.assertEqual(\n setup.getLastVersionForProfile('newproduct:default'), 'unknown')\n self.assertEqual(\n setup.getLastVersionForProfile('installed:default'), ('1',))\n self.assertEqual(\n setup.getLastVersionForProfile('uninstalled:default'), 'unknown')\n # Cleanup test.\n profile_registry.unregisterProfile('default', 'newproduct')\n profile_registry.unregisterProfile('default', 'installed')\n profile_registry.unregisterProfile('default', 'uninstalled')\n","sub_path":"plone/app/upgrade/v43/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":17323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64414929","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\nfrom core.forms import ContactForm\nfrom core import mail\nfrom core.models import NewsletterRecipient\nfrom django.core.validators import EmailValidator\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nimport logging\n\ndef index(request):\n\tform = ContactForm()\n\tcontext = RequestContext(request)\n\tcontext_dic = {\"form\": form}\n\n\treturn render_to_response('core/index.html', context_dic, context)\n\ndef static(request, site, content_type=\"text/html\"):\n\treturn render_to_response(\n\t\t'core/{0}'.format(site),\n\t\t{},\n\t\tRequestContext(request),\n\t\tcontent_type=content_type\n\t)\n\ndef send_contactmail(request):\n\t'''\n\tCalled via ajax.\n\t'''\n\tcontext = RequestContext(request)\n\tstatus = False\n\n\tif request.method == 'POST':\n\t\tform = ContactForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tstatus = True\n\telse:\n\t\tform = ContactForm()\n\n\treturn render_to_response('core/contact_form.html', {\n\t\t\t'form': form,\n\t\t\t'success': status\n\t\t}, context)\n\ndef subscribeToNewsletter(request):\n\t'''\n\tCalled via ajax.\n\t'''\n\tcontext = RequestContext(request)\n\tcontext_dic = {}\n\n\tif request.method == 'POST':\n\t\ttry:\n\t\t\temail = request.POST['email'] if 'email' in request.POST else ''\n\n\t\t\tvalidator = EmailValidator(message='Bitte gib eine gültige E-Mail-Adresse an.')\n\t\t\tvalidator(email)\n\n\t\t\trecp = NewsletterRecipient(email=email)\n\t\t\trecp.save()\n\n\t\t\ttext = render_to_string('core/mail/subscribe.md', {'subscribe_id' : recp.confirm_id})\n\t\t\tmail.sendMail('info@zuks.org', [recp], text, 'ZUKS Newsletter Registrierung', display_unsubscribe=False)\n\n\t\t\tcontext_dic['success'] = True\n\t\texcept ValidationError as e:\n\t\t\tcontext_dic['error'] = e.message\n\t\texcept IntegrityError:\n\t\t\tcontext_dic['error'] = \"Für diese E-Mail-Adresse wurde bereits ein Newsletter angefordert.\"\n\t\texcept:\n\t\t\tlogging.exception(\"Newsletter subscribtion failed\")\n\t\t\tcontext_dic['error'] = \"Die Anfrage konnte leider nicht bearbeitet werden. Versuche es später erneut.\"\n\n\t\t\ttry:\n\t\t\t\t# Cleanup mail adress from database\n\t\t\t\trecp.delete()\n\t\t\texcept:\n\t\t\t\tpass\n\n\n\treturn render_to_response('core/subscribe_form.html', context_dic, context)\n\ndef confirmNewsletter(request, id):\n\tcontext = RequestContext(request)\n\tstatus = 'success'\n\n\ttry:\n\t\trecp = NewsletterRecipient.objects.get(confirm_id=id)\n\t\trecp.confirm()\n\t\trecp.save()\n\texcept NewsletterRecipient.DoesNotExist:\n\t\tstatus = 'expired'\n\n\treturn render_to_response('core/confirm.html', {'status' : status}, context)\n\ndef unsubscribeFromNewsletter(request, id):\n\tcontext = RequestContext(request)\n\n\ttry:\n\t\trecp = NewsletterRecipient.objects.get(confirm_id=id)\n\t\trecp.delete()\n\texcept NewsletterRecipient.DoesNotExist:\n\t\t# Is already unsubscribed, nothing to do\n\t\tpass\n\n\treturn render_to_response('core/unsubscribe.html', {}, context)\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"24570318","text":"import sys\n\nOUTPUT_TYPE = 'Output'\nANSWER_TYPE = 'Answer'\nGOTO_TYPE = 'Goto'\nCONCLUSION_TYPE = 'Conclusion'\n\nHOPPER_MESSAGE_PREFIX = 'Hopper: '\nUSER_MESSAGE_PREFIX = 'User: '\nCONCLUSION_MESSAGE_PREFIX = 'Conclusion: '\n\nclass Output:\n def __init__(self, text, label=None):\n self.text = text\n self.label = label\n self.type = OUTPUT_TYPE\n def __str__(self):\n return 'Output({}, label={})'.format(self.text, self.label)\n \n\nclass Answer:\n def __init__(self, text):\n self.text = text\n self.type = ANSWER_TYPE\n def __str__(self):\n return 'Answer({})'.format(self.text)\n\n \nclass Goto:\n def __init__(self, label):\n self.label = label\n self.type = GOTO_TYPE\n def __str__(self):\n return 'Goto({})'.format(self.label)\n\n \nclass Conclusion:\n def __init__(self, text):\n self.text = text\n self.type = CONCLUSION_TYPE\n def __str__(self):\n return 'Conclusion({})'.format(self.text)\n \n \nclass IndentationAndInputObject:\n def __init__(self, indentation, input_object):\n self.indentation = indentation\n self.input_object = input_object\n \n def __str__(self):\n return f\"Ind: {self.indentation}, \" + str(self.input_object)\n\n def __repr__(self):\n return f\"Ind: {self.indentation}, \" + str(self.input_object)\n\ndef print_conversation(flat_tree, user_answers):\n # Write your solution below\n # Some example code is provided to show you how to access our data structures, feel free to modify/delete\n \n n = len(flat_tree)\n \n label_map = {} # label to line number\n branching_ind = {} # key: line num, {key: answer text , value: line num of answer }\n recency_q = {} # key: indentation level, value: most recent line number with this indentation\n \n \n for i, row in enumerate(flat_tree):\n ind = row.indentation\n if (\n row.input_object.type == OUTPUT_TYPE \n and row.input_object.label != None\n ):\n label_map[row.input_object.label] = i\n \n elif row.input_object.type == ANSWER_TYPE:\n \n assert ind - 1 in recency_q\n \n output_line_num = recency_q[ind -1]\n if output_line_num not in branching_ind:\n branching_ind[output_line_num] = {}\n \n branching_ind[output_line_num][row.input_object.text] = i\n\n recency_q[ind] = i # most recent line of indentation level\n \n \n # print(flat_tree, user_answers)\n # print(branching_ind)\n \n if flat_tree[0].input_object.type == ANSWER_TYPE:\n line_num = -1 \n else:\n line_num = 0 \n \n \n answer_index = 0\n last_type = None\n while last_type != CONCLUSION_TYPE:\n # print(line_num)\n if line_num != -1:\n row = flat_tree[line_num]\n if row.input_object.type == OUTPUT_TYPE:\n print(HOPPER_MESSAGE_PREFIX + row.input_object.text)\n elif row.input_object.type == ANSWER_TYPE:\n print(USER_MESSAGE_PREFIX + row.input_object.text)\n elif row.input_object.type == CONCLUSION_TYPE:\n print(CONCLUSION_MESSAGE_PREFIX + row.input_object.text)\n elif row.input_object.type == GOTO_TYPE:\n line_num = label_map[row.input_object.label] -1 # minus 1 bc we want to stay at this position\n \n last_type = row.input_object.type\n\n if line_num in branching_ind:\n \n user_answer = user_answers[answer_index]\n answer_index += 1\n \n \n # print(user_answers)\n while user_answer not in branching_ind[line_num]:\n print(USER_MESSAGE_PREFIX + user_answer)\n print(\"Hopper: Invalid input\")\n user_answer = user_answers[answer_index]\n answer_index += 1\n \n\n line_num = branching_ind[line_num][user_answer]-1 # minus 1 bc we want to stay at this position\n \n line_num += 1\n\ndef parse_line(line):\n def parse_spaces_and_line(line):\n for i, c in enumerate(line):\n if c != ' ':\n return len(line[:i]), line[i:]\n raise RuntimeError(\"Found all whitespace line\")\n \n def parse_label_and_output(line_content):\n for i, c in enumerate(line_content):\n if not c.isdigit():\n if c == ':' and i > 0:\n return int(line_content[:i]), line_content[i+1:]\n else:\n return None, line_content\n return None, line_content\n \n indentation, line_content = parse_spaces_and_line(line)\n if line_content.startswith('-'):\n return IndentationAndInputObject(indentation, Answer(line_content[1:]))\n elif line_content.startswith('='):\n return IndentationAndInputObject(indentation, Conclusion(line_content[1:]))\n elif line_content.startswith('>'):\n return IndentationAndInputObject(indentation, Goto(int(line_content[1:])))\n else:\n label, output = parse_label_and_output(line_content)\n return IndentationAndInputObject(indentation, Output(output, label))\n\n \ndef read_input():\n tree = []\n answers = []\n reading_examples = False\n for line in sys.stdin.readlines():\n line = line.rstrip()\n if line == '---':\n reading_examples = True\n elif not reading_examples:\n tree.append(parse_line(line))\n else:\n answers.append(line)\n return tree, answers\n \n \nflat_tree, user_answers = read_input()\nprint_conversation(flat_tree, user_answers)","sub_path":"company_questions/hopper/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"38020589","text":"# Problem: https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem\n\n#!/bin/python3\n\ndef breakingRecords(scores):\n minscore, maxscore = scores[0], scores[0]\n mincount, maxcount = 0, 0 \n for i in range(1, len(scores)):\n if scores[i] > maxscore:\n maxscore = scores[i]\n maxcount += 1\n if scores[i] < minscore:\n minscore = scores[i]\n mincount += 1\n print(maxcount, mincount)\n\n\nn = int(input())\nscores = list(map(int, input().rstrip().split()))\nresult = breakingRecords(scores)\n\n","sub_path":"Hackerrank/Algorithms/breakingTheRecords.py","file_name":"breakingTheRecords.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"453624936","text":"#!/usr/bin/python3\n\n\"\"\"\nClass that define a Student\n\"\"\"\n\n\nclass Student():\n \"\"\"\n Define a Student\n \"\"\"\n def __init__(self, first_name, last_name, age):\n \"\"\"Initialization Student\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"\n retrieves a dictionary representation of a Student\n \"\"\"\n\n if attrs is None:\n return (self.__dict__)\n my_dict = {}\n for attr in attrs:\n if attr in self.__dict__.keys():\n my_dict[attr] = self.__dict__[attr]\n return (my_dict)\n","sub_path":"0x0B-python-input_output/12-student.py","file_name":"12-student.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330716804","text":"#!/usr/bin/env python\n# encoding: utf-8\n#pylint: disable=no-member, no-init, too-many-public-methods\n#pylint: disable=attribute-defined-outside-init\n# This disable is because the tests need to be name such that\n# you can understand what the test is doing from the method name.\n#pylint: disable=missing-docstring\n\"\"\"\ntests.py\n\n\"\"\"\n\nimport datetime\n\nfrom test_base import APIBaseTestCase, unittest #pylint: disable=relative-import\n\nfrom google.appengine.ext import ndb\n\nfrom app import models\nfrom app.constants import ADMIN_ROLE\n\nfrom ddt import ddt, data, unpack\n\ndef make_fake_course(creator):\n return models.Course(\n name=\"cs61a\",\n institution=\"UC Soumya\",\n term=\"Fall\",\n year=\"2014\",\n creator=creator.key,\n staff=[],\n active=True)\n\ndef make_fake_assignment(course, creator):\n return models.Assignment(\n name='hw1',\n points=3,\n display_name=\"CS 61A\",\n templates=\"[]\",\n course=course.key,\n creator=creator.key,\n max_group_size=4,\n due_date=datetime.datetime.now())\n\n\n@ddt\nclass APITest(object): #pylint: disable=no-init\n \"\"\"\n Simple test case for the API\n \"\"\"\n @classmethod\n def get_basic_instance(cls, mutate=False):\n \"\"\"\n Gets a basic instance of the model class.\n \"\"\"\n raise NotImplementedError()\n\n def setUp(self): #pylint: disable=super-on-old-class, invalid-name\n \"\"\"Set up the API Test.\n\n Sets up the authenticator stub, and logs you in as an admin.\"\"\"\n super(APITest, self).setUp()\n self.login('dummy_admin')\n\n def get_accounts(self):\n return {\n \"dummy_admin\": models.User(\n key=ndb.Key(\"User\", \"dummy@admin.com\"),\n email=\"dummy@admin.com\",\n first_name=\"Admin\",\n last_name=\"Jones\",\n login=\"albert\",\n role=ADMIN_ROLE\n ),\n \"dummy_student\": models.User(\n key=ndb.Key(\"User\", \"dummy@student.com\"),\n email=\"dummy@student.com\",\n first_name=\"Student\",\n last_name=\"Jones\",\n login=\"billy\",\n )\n }\n\n ## INDEX ##\n\n def test_index_empty(self):\n \"\"\"Tests there are no entities when the db is created.\"\"\"\n self.get_index()\n self.assertJson([])\n\n def test_index_one_added(self):\n \"\"\"Tests that the index method gives the added entity.\"\"\"\n inst = self.get_basic_instance()\n inst.put()\n self.get_index()\n self.assertJson([inst.to_json()])\n\n def test_index_one_removed(self):\n \"\"\"Tests that removing an entity makes it disappear from the index.\"\"\"\n inst = self.get_basic_instance()\n inst.put()\n self.get_index()\n self.assertJson([inst.to_json()])\n\n inst.key.delete()\n self.get_index()\n self.assertJson([])\n\n def test_index_one_removed_from_two(self):\n \"\"\"\n Tests that removing one item out of two in the DB makes sure\n the other item is still found.\n \"\"\"\n inst = self.get_basic_instance()\n inst.put()\n inst2 = self.get_basic_instance(mutate=True)\n inst2.put()\n self.get_index()\n self.assertTrue(inst.to_json() in self.response_json,\n self.response_json)\n self.assertTrue(inst2.to_json() in self.response_json,\n self.response_json)\n\n inst2.key.delete()\n self.get_index()\n self.assertTrue(inst.to_json() in self.response_json,\n self.response_json)\n self.assertTrue(inst2.to_json() not in self.response_json,\n self.response_json)\n\n pagination_tests = [\n (3, 2),\n (10, 3),\n (2, 3),\n (2, 2)\n ]\n\n @data(*pagination_tests)\n @unpack\n def test_pagination(self, total_objects, num_page):\n \"\"\"\n Tests pagination by creating a specified number of entities, and\n checking if the number of entities retrieved are less than the\n specified max per page.\n\n total_objects - the number of entities to be created\n num_page - the maximum number of entities per page\n\n To create more copies of this test, just add a tuple to the\n pagination_tests list.\n @unpack allows the ddt package to work with the `pagination_tests`\n list of tuples.\n \"\"\"\n for _ in range(total_objects):\n inst = self.get_basic_instance(mutate=True)\n inst.put()\n while total_objects > 0:\n if hasattr(self, 'page'):\n self.get_index(page=self.page, num_page=num_page)\n else:\n self.get_index(num_page=num_page)\n\n num_instances = len(self.response_json)\n if self.name == 'user' and num_instances < num_page:\n total_objects += 1 # To take care of the dummy already there\n\n self.assertTrue(\n num_instances <= num_page,\n \"There are too many instances returned. There are \" +\n str(num_instances) + \" instances\")\n self.assertTrue(num_instances == min(total_objects, num_page),\n \"Not right number returned: \" + str(total_objects) +\n \" vs. \" +str(num_instances) + str(self.response_json))\n total_objects -= num_page\n self.page += 1\n\n\n ## ENTITY GET ##\n\n def test_get_basic(self):\n \"\"\"Tests that a basic get works.\"\"\"\n inst = self.get_basic_instance()\n inst.put()\n self.get_entity(inst)\n self.assertJson(inst.to_json())\n\n def test_get_with_two_entities(self):\n \"\"\"\n Tests that getting one entity with two in the DB gives the right one.\n \"\"\"\n inst = self.get_basic_instance()\n inst.put()\n inst2 = self.get_basic_instance()\n inst2.put()\n\n self.get_entity(inst2)\n self.assertJson(inst2.to_json())\n\n def test_get_invalid_id_errors(self):\n \"\"\"Tests that a get on an invalid ID errors.\"\"\"\n self.get('/{}/1'.format(self.name))\n self.assertStatusCode(404)\n\n ## ENTITY POST ##\n\n def test_entity_create_basic(self):\n \"\"\"Tests creating an empty entity.\"\"\"\n inst = self.get_basic_instance(mutate=True)\n self.post_entity(inst)\n\n gotten = self.model.get_by_id(self.response_json['key'])\n self.assertEqual(gotten.key, inst.key)\n\n def test_create_two_entities(self):\n inst = self.get_basic_instance(mutate=True)\n self.post_entity(inst)\n self.assertStatusCode(201)\n gotten = self.model.get_by_id(self.response_json['key'])\n\n inst2 = self.get_basic_instance(mutate=True)\n self.post_entity(inst2)\n self.assertStatusCode(201)\n gotten2 = self.model.get_by_id(self.response_json['key'])\n\n self.assertEqual(gotten.key, inst.key)\n self.assertEqual(gotten2.key, inst2.key)\n\n ## ENTITY PUT ##\n\n ## ENTITY DELETE ##\n\n\nclass AssignmentAPITest(APITest, APIBaseTestCase):\n model = models.Assignment\n name = 'assignment'\n num = 1\n access_token = 'dummy_admin'\n\n def setUp(self):\n super(AssignmentAPITest, self).setUp()\n\n def get_basic_instance(self, mutate=True):\n name = 'proj'\n if mutate:\n name += str(self.num)\n self.num += 1\n\n self._course = make_fake_course(self.user)\n self._course.put()\n self._assignment = rval = make_fake_assignment(self._course, self.user)\n rval.name = name\n return rval\n\n def post_entity(self, inst, *args, **kwds):\n \"\"\"Posts an entity to the server.\"\"\"\n data = inst.to_json()\n data['course'] = data['course']['id']\n\n self.post_json('/{}'.format(self.name),\n data=data, *args, **kwds)\n if self.response_json and 'key' in self.response_json:\n if inst.key:\n self.assertEqual(inst.key.id(), self.response_json['key'])\n else:\n inst.key = models.ndb.Key(self.model,\n self.response_json.get('key'))\n\n\nclass SubmissionAPITest(APITest, APIBaseTestCase):\n model = models.Submission\n name = 'submission'\n access_token = \"submitter\"\n\n num = 1\n def setUp(self):\n super(SubmissionAPITest, self).setUp()\n self.assignment_name = u'test assignment'\n self._course = make_fake_course(self.user)\n self._course.put()\n self._assign = make_fake_assignment(self._course, self.user)\n self._assign.name = self.assignment_name\n self._assign.put()\n\n self._submitter = self.accounts['dummy_student']\n self._submitter.put()\n self.logout()\n self.login('dummy_student')\n\n def get_basic_instance(self, mutate=True):\n rval = models.Submission(\n submitter=self._submitter.key,\n assignment=self._assign.key)\n return rval\n\n def post_entity(self, inst, *args, **kwds):\n \"\"\"Posts an entity to the server.\"\"\"\n data = inst.to_json()\n data['assignment'] = self.assignment_name\n data['submitter'] = data['submitter']['id']\n\n self.post_json('/{}'.format(self.name),\n data=data, *args, **kwds)\n if self.response_json and 'key' in self.response_json:\n if inst.key:\n self.assertEqual(inst.key.id(), self.response_json['key'])\n else:\n inst.key = models.ndb.Key(self.model,\n self.response_json.get('key'))\n\n def test_invalid_assignment_name(self):\n self.assignment_name = 'assignment'\n inst = self.get_basic_instance()\n\n self.post_entity(inst)\n self.assertStatusCode(400)\n\n def test_sorting(self):\n time = datetime.datetime.now()\n delta = datetime.timedelta(days=1)\n changed_time = time - delta\n\n inst = self.get_basic_instance()\n inst.created = changed_time\n inst.put()\n\n inst2 = self.get_basic_instance(mutate=True)\n inst2.created = time\n inst2.put()\n\n self.get_index(created='>|%s' % str(changed_time - datetime.timedelta(hours=7)))\n self.assertJson([inst2.to_json()])\n\n self.get_index(created='<|%s' % str(time - datetime.timedelta(hours=7)))\n self.assertJson([inst.to_json()])\n\n\n\n\nclass CourseAPITest(APITest, APIBaseTestCase):\n model = models.Course\n name = 'course'\n num = 1\n access_token = 'dummy_admin'\n\n def get_basic_instance(self, mutate=True):\n name = 'testcourse'\n if mutate:\n name += str(self.num)\n self.num += 1\n rval = make_fake_course(self.user)\n rval.name = name\n return rval\n\nclass VersionAPITest(APITest, APIBaseTestCase):\n model = models.Version\n name = 'version'\n num = 1\n access_token = 'dummy_admin'\n\n def get_basic_instance(self, mutate=True):\n name = 'testversion'\n if mutate:\n name += str(self.num)\n self.num += 1\n return self.model(key=ndb.Key('Version', name),\n name=name, versions=['1.0.0', '1.1.0'], base_url=\"https://www.baseurl.com\")\n\nclass GroupAPITest(APITest, APIBaseTestCase):\n model = models.Group\n name = 'group'\n num = 1\n access_token = 'dummy_admin'\n\n def setUp(self):\n super(GroupAPITest, self).setUp()\n self.course = make_fake_course(self.user)\n self.course.put()\n self.assignment = make_fake_assignment(self.course, self.user)\n self.assignment.put()\n\n def get_basic_instance(self, mutate=True):\n name = 'testversion'\n if mutate:\n name += str(self.num)\n self.num += 1\n return self.model(assignment=self.assignment.key)\n\n def test_add_member(self):\n members = [self.accounts['dummy_student'].key]\n inst = self.get_basic_instance()\n inst.put()\n\n self.post_json(\n '/{}/{}/add_member'.format(self.name, inst.key.id()),\n data={'member': members[0].id()},\n method='PUT')\n\n inst = self.model.get_by_id(inst.key.id())\n self.assertEqual(inst.invited_members, members)\n\n def test_remove_member(self):\n members = [self.accounts['dummy_student'].key]\n inst = self.get_basic_instance()\n inst.members = members\n inst.put()\n\n self.post_json(\n '/{}/{}/remove_member'.format(self.name, inst.key.id()),\n data={'member': members[0].id()},\n method='PUT')\n\n self.assertEquals(None, self.model.get_by_id(inst.key.id()))\n\n def test_entity_create_basic(self):\n # No entity create for Groups\n pass\n\n def test_create_two_entities(self):\n # No entity create for Groups\n pass\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"server/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":12960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"354079850","text":"import numpy as np\nimport random\n\nclass game:\n def __init__(self):\n self.state_width = 10\n self.state_height = 10\n self.replay = []\n self.reset()\n self.game_speed = 4\n self.min_speed = 2\n self.moves = [\"left\", \"right\", \"down\", \"rotate_c\", \"rotate_a\"]\n self.letters = [\"l\", \"r\", \"d\", \"c\", \"a\"]\n self.make_goal_state()\n self.num_actions = len(self.moves)\n\n def reset(self):\n self.state = np.zeros((self.state_width, self.state_height))\n self.board = np.zeros((self.state_width, self.state_height))\n self.get_new_piece()\n self.position = [random.randint(0,self.state_width-len(self.piece)), 0]\n self.timestep = 0\n self.set_state()\n\n def make_goal_state(self):\n self.goal_state = np.zeros((self.state_width, self.state_height))\n for x in range(self.state_width):\n for y in range(self.state_height-4, self.state_height):\n self.goal_state[x][y] = 1\n\n def print_state(self):\n return self.get_printable(self.state)\n\n def print_goal_state(self):\n return self.get_printable(self.goal_state)\n\n def get_printable(self, s):\n printable = \"\"\n for index, row in enumerate(s.T):\n printable += \" |\" + \"\".join([\"*\" if x==1 else \" \" for x in row]) + \"| \"\n printable += \"\".join([str(int(x)) for x in row]) + \"\\n\"\n printable += \" \" + \"-\"*(self.state_width-2) + \"\\n\"\n return printable\n\n def set_state(self):\n self.state = np.copy(self.board)\n s_x = len(self.piece)\n s_y = len(self.piece[0])\n x = self.position[0]\n y = self.position[1]\n for i in range(s_x):\n for j in range(s_y):\n if self.piece[i][j] == 1:\n self.state[x+i][y+j] = self.piece[i][j]\n\n def get_new_piece(self):\n pieces = [ [[1], [1], [1], [1]],\n [[1, 0], [1, 0], [1, 1]],\n [[0, 1], [0, 1], [1, 1]],\n [[0, 1], [1, 1], [1, 0]],\n [[1, 1], [1, 1]] ]\n self.piece = np.array(random.choice(pieces))\n\n def is_game_over(self):\n if not self.can_move_down() and self.position[1] == 0:\n return True\n return False\n\n def settle_piece(self):\n s_x = len(self.piece)\n s_y = len(self.piece[0])\n x = self.position[0]\n y = self.position[1]\n height_reached = y+s_y\n for i in range(s_x):\n for j in range(s_y):\n if self.piece[i][j] == 1:\n self.board[x+i][y+j] = self.piece[i][j]\n return height_reached\n\n def remove_rows(self):\n rows_removed = 0\n temp = self.board.tolist()\n for index, row in enumerate(temp):\n if np.sum(row) == self.state_width:\n del temp[index]\n temp.insert(0, np.zeros((self.state_width)))\n rows_removed += 1\n if rows_removed > 0:\n self.board = np.array(temp)\n return rows_removed\n\n def overlap_check(self, pos, piece):\n x = pos[0]\n y = pos[1]\n s_x = len(piece)\n s_y = len(piece[0])\n clipping = self.board[x:x+s_x,y:y+s_y]\n if piece.shape != clipping.shape:\n return False\n for xp in range(s_x):\n for yp in range(s_y):\n if piece[xp][yp] == 1 and clipping[xp][yp] == 1:\n return False\n return True\n\n def move_left(self):\n new_x = self.position[0]\n if new_x > 0:\n new_x -= 1\n return [new_x, self.position[1]]\n\n def move_right(self):\n new_x = self.position[0]\n if new_x <= self.state_width - len(self.piece[0]):\n new_x += 1\n return [new_x, self.position[1]]\n\n def move_down(self):\n new_y = self.position[1]\n if new_y <= self.state_height - len(self.piece[0]):\n new_y += 1\n return [self.position[0], new_y]\n\n def rotate(self, n):\n return np.rot90(self.piece, n)\n\n def rotate_c(self):\n return np.rot90(self.piece)\n\n def rotate_a(self):\n return np.rot90(self.piece, 3)\n\n def can_move_left(self):\n return self.overlap_check(self.move_left(), self.piece)\n\n def can_move_right(self):\n if self.position[0] < 1:\n return False\n return self.overlap_check(self.move_right(), self.piece)\n\n def can_move_down(self):\n height = len(self.piece[0])\n if self.position[1] + height >= self.state_height:\n return False\n return self.overlap_check(self.move_down(), self.piece)\n\n def can_rotate_a(self):\n new_shape = self.rotate_a()\n new_width = len(new_shape)\n if self.position[0] + new_width > self.state_width:\n return False\n new_height = len(new_shape[0])\n if self.position[1] + new_height > self.state_height:\n return False\n return self.overlap_check(self.position, self.rotate_a())\n\n def can_rotate_c(self):\n new_shape = self.rotate_c()\n new_width = len(new_shape)\n if self.position[0] + new_width > self.state_width:\n return False\n new_height = len(new_shape[0])\n if self.position[1] + new_height > self.state_height:\n return False\n return self.overlap_check(self.position, self.rotate_c())\n\n def sample_random_action(self):\n return random.randint(0, len(self.moves)-1)\n \n # This is a work in progress that I haven't had time to finish.\n # It does't do anything meaningful right now\n def move_ai(self):\n scan_x = 0\n scan_y = self.state_height\n found_fit = False\n desired_rotation = 0\n while found_fit == False:\n rotations = []\n upper_pos = []\n for n in range(0, 4):\n rot = self.rotate(n)\n y_pos = scan_y -len(rot)-1\n upper_pos.append(y_pos)\n rotations.append(rot)\n fit_rotations = []\n for index, r in enumerate(rotations):\n overlap = self.overlap_check([scan_x, upper_pos[index]], r)\n if overlap == True:\n fit_rotations.append(index)\n if len(fit_rotations) > 0:\n sbcounts = []\n for f in fit_rotations:\n bottom_row = np.copy(self.board[scan_y-1])\n block = np.copy(rotations[f])\n bottom_block = block[len(block)-1]\n for p, b in enumerate(bottom_block):\n if b == 1:\n bottom_row[p] = 1\n sbcounts.append(np.sum(bottom_row))\n if len(sbcounts) > 0:\n desired_rotation = fit_rotations[np.argmax(sbcounts)]\n found_fit = True\n if found_fit == False:\n scan_x += 1\n if scan_x + len(self.piece) > self.state_width:\n scan_x = 0\n scan_y -= 1\n if scan_y < 1:\n return random.randint(0, len(self.moves)-1)\n if desired_rotation > 0:\n return 3\n elif self.position[0] < scan_x:\n return 1\n elif self.position[0] > scan_x:\n return 0\n elif self.position[0] == scan_x and self.position[0] < self.state_height-1:\n return 2\n\n def start_new_piece(self):\n best_height = self.settle_piece()\n rows_removed = self.remove_rows()\n self.get_new_piece()\n self.position = [random.randint(0,self.state_width-len(self.piece)), 0]\n return rows_removed, best_height\n\n def step(self, action):\n info = \"\"\n info += \"\\n Action: \" + self.moves[action]\n info += \"\\n Piece height: \" + str(len(self.piece[0]))\n info += \"\\n Piece width: \" + str(len(self.piece))\n if self.is_game_over():\n return self.state, -1, True, \"Game over\"\n self.timestep += 1\n invalid_action = False\n rows_removed = 0\n best_height = 0\n info += \"\\n Old position: \" + str(self.position)\n if action == self.moves.index(\"left\"):\n if self.can_move_left():\n self.position = self.move_left()\n else:\n invalid_action = True\n elif action == self.moves.index(\"right\"):\n if self.can_move_right():\n self.position = self.move_right()\n else:\n invalid_action = True\n elif action == self.moves.index(\"down\"):\n if self.can_move_down():\n self.position = self.move_down()\n else:\n info += \"\\n Settled piece\"\n rows_removed, best_height = self.start_new_piece()\n elif action == self.moves.index(\"rotate_a\"):\n if self.can_rotate_a():\n self.piece = self.rotate_a()\n else:\n invalid_action = True\n elif action == self.moves.index(\"rotate_c\"):\n if self.can_rotate_c():\n self.piece = self.rotate_c()\n else:\n invalid_action = True\n info += \"\\n New position: \" + str(self.position)\n\n if self.timestep % self.game_speed == 0 and self.timestep > 0:\n if self.can_move_down():\n self.position = self.move_down()\n else:\n rows_removed, best_height = self.start_new_piece()\n reward = 0\n if invalid_action == True:\n info += \"\\n Invalid action\"\n #reward = -0.04\n #if best_height > 0:\n #reward = best_height/20\n if rows_removed > 0:\n reward = rows_removed * rows_removed\n info += \"\\n Rows removed: \" + str(rows_removed)\n info += \"\\n Reward: \" + str(reward)\n self.set_state()\n return self.state, reward, False, info\n\n","sub_path":"pytorch_drqn_tetris/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":9996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333828170","text":"#!/usr/bin/env python\n#-# Copyright 2012-2016 Karlsruhe Institute of Technology\n#-#\n#-# Licensed under the Apache License, Version 2.0 (the \"License\");\n#-# you may not use this file except in compliance with the License.\n#-# You may obtain a copy of the License at\n#-#\n#-# http://www.apache.org/licenses/LICENSE-2.0\n#-#\n#-# Unless required by applicable law or agreed to in writing, software\n#-# distributed under the License is distributed on an \"AS IS\" BASIS,\n#-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#-# See the License for the specific language governing permissions and\n#-# limitations under the License.\n\nimport os, sys, random, optparse\nfrom gcSupport import getConfig, parseOptions, utils\nfrom grid_control.datasets import DataSplitter\nfrom grid_control.parameters import DataParameterSource, ParameterFactory, ParameterInfo, ParameterMetadata, ParameterSource\n\nrandom.seed(0)\n\nusage = '%s [OPTIONS] ' % sys.argv[0]\nparser = optparse.OptionParser(usage=usage)\nparser.add_option('-l', '--list-parameters', dest='listparams', default=False, action='store_true',\n\thelp='')\nparser.add_option('-M', '--manager', dest='manager', default=None,\n\thelp='Select parameter source manager')\nparser.add_option('-p', '--parameter', dest='parameters', default=[], action='append',\n\thelp='Specify parameters')\nparser.add_option('-o', '--output', dest='output', default='',\n\thelp='Show only specified parameters')\nparser.add_option('-s', '--static', dest='static', default=False, action='store_true',\n\thelp='Assume a static parameterset')\nparser.add_option('-a', '--active', dest='active', default=False, action='store_true',\n\thelp='Show activity state')\nparser.add_option('-d', '--disabled', dest='inactive', default=False, action='store_true',\n\thelp='Show disabled parameter sets')\nparser.add_option('-t', '--untracked', dest='untracked', default=False, action='store_true',\n\thelp='Display untracked variables')\nparser.add_option('-c', '--collapse', dest='collapse', default=0, action='count',\n\thelp='Do not collapse dataset infos in display')\nparser.add_option('-I', '--intervention', dest='intervention', default=False, action='store_true',\n\thelp='Display intervention tasks')\nparser.add_option('-f', '--force-intervention', dest='forceiv', default=False, action='store_true',\n\thelp='Simulate dataset intervention')\nparser.add_option('-D', '--dataset', dest='dataset', default='',\n\thelp='Add dataset splitting (use \"True\" to simulate a dataset)')\nparser.add_option('-i', '--reinit', dest='init', default=False, action='store_true',\n\thelp='Trigger re-init')\nparser.add_option('-r', '--resync', dest='resync', default=False, action='store_true',\n\thelp='Trigger re-sync')\nparser.add_option('-V', '--visible', dest='visible', default='',\n\thelp='Set visible variables')\nparser.add_option('-S', '--save', dest='save',\n\thelp='Saves information to specified file')\n(opts, args) = parseOptions(parser)\n\nif len(args) != 1:\n\tutils.exitWithUsage(usage)\n\ndef main():\n\t# Set config based on settings from config file or command line\n\tconfigFile = None\n\tif os.path.exists(args[0]):\n\t\tconfigFile = args[0]\n\tconfig = getConfig(configFile, section = 'global')\n\tconfig.changeView(setSections = ['jobs']).set('nseeds', '1', '?=')\n\tconfigParameters = config.changeView(setSections = ['parameters'])\n\tif opts.parameters:\n\t\tutils.vprint('Provided options:')\n\t\tfor p in opts.parameters:\n\t\t\tk, v = p.split('=', 1)\n\t\t\tconfigParameters.set(k.strip(), v.strip().replace('\\\\n', '\\n'), '=')\n\t\t\tutils.vprint('\\t%s: %s' % (k.strip(), v.strip()))\n\t\tutils.vprint('')\n\tif not os.path.exists(args[0]):\n\t\tconfigParameters.set('parameters', str.join(' ', args).replace('\\\\n', '\\n'))\n\tif opts.dataset:\n\t\tconfigParameters.set('default lookup', 'DATASETNICK')\n#\tconfigParameters.set('parameter adapter', 'BasicParameterAdapter', '=') # Don't track parameter changes\n\tif opts.verbosity > 2:\n\t\tconfig.changeView(setSections = None).write(sys.stdout)\n\n\t# Initialize ParameterFactory\n\tconfigTask = config.changeView(setSections = [config.get(['task', 'module'], 'DummyTask')])\n\tpm = config.getPlugin('parameter factory', 'SimpleParameterFactory', cls = ParameterFactory).getInstance()\n\n\t# Create dataset parameter source\n\tclass DummySplitter:\n\t\tdef getMaxJobs(self):\n\t\t\treturn 3\n\t\tdef getSplitInfo(self, pNum):\n\t\t\tmkEntry = lambda ds, fl, n, nick: { DataSplitter.Dataset: ds, DataSplitter.Nickname: nick,\n\t\t\t\tDataSplitter.FileList: fl, DataSplitter.NEntries: n }\n\t\t\trndStr = lambda: md5(str(random.random())).hexdigest()[:10]\n\t\t\ttmp = [ mkEntry('ds1', ['a', 'b'], 23, 'data_1'), mkEntry('ds1', ['1'], 42, 'data_1'),\n\t\t\t\tmkEntry('ds2', ['m', 'n'], 123, 'data_2'), mkEntry('ds2', ['x', 'y', 'z'], 987, 'data_3') ]\n\t\t\treturn tmp[pNum]\n\n\tclass DataSplitProcessorTest:\n\t\tdef getKeys(self):\n\t\t\treturn map(lambda k: ParameterMetadata(k, untracked=True),\n\t\t\t\t['DATASETINFO', 'DATASETID', 'DATASETPATH', 'DATASETBLOCK', 'DATASETNICK'])\n\n\t\tdef process(self, pNum, splitInfo, result):\n\t\t\tresult.update({\n\t\t\t\t'DATASETINFO': '',\n\t\t\t\t'DATASETID': splitInfo.get(DataSplitter.DatasetID, None),\n\t\t\t\t'DATASETPATH': splitInfo.get(DataSplitter.Dataset, None),\n\t\t\t\t'DATASETBLOCK': splitInfo.get(DataSplitter.BlockName, None),\n\t\t\t\t'DATASETNICK': splitInfo.get(DataSplitter.Nickname, None),\n\t\t\t\t'DATASETSPLIT': pNum,\n\t\t\t})\n\n\tif opts.dataset.lower() == 'true':\n\t\tutils.vprint('Registering dummy data provider data')\n\t\tdataSplitter = DummySplitter()\n\telif opts.dataset:\n\t\tdataSplitter = DataSplitter.loadState(opts.dataset)\n\n\tif opts.dataset:\n\t\tDataParameterSource.datasetsAvailable['data'] = DataParameterSource(\n\t\t\tconfig.getWorkPath(), 'data', None, dataSplitter, DataSplitProcessorTest())\n\n\tpsource = pm.getSource(config)\n\n\tif opts.forceiv:\n\t\tfor dp in DataParameterSource.datasetSources:\n\t\t\tdp.intervention = (set([1]), set([0]), True)\n\n\tif opts.listparams:\n\t\tresult = []\n\t\tneedGCParam = False\n\t\tif psource.getMaxJobs() != None:\n\t\t\tcountActive = 0\n\t\t\tfor jobNum in range(psource.getMaxJobs()):\n\t\t\t\tinfo = psource.getJobInfo(jobNum)\n\t\t\t\tif info[ParameterInfo.ACTIVE]:\n\t\t\t\t\tcountActive += 1\n\t\t\t\tif opts.inactive or info[ParameterInfo.ACTIVE]:\n\t\t\t\t\tif not info[ParameterInfo.ACTIVE]:\n\t\t\t\t\t\tinfo['GC_PARAM'] = 'N/A'\n\t\t\t\t\tif str(info['GC_PARAM']) != str(jobNum):\n\t\t\t\t\t\tneedGCParam = True\n\t\t\t\t\tresult.append(info)\n\t\t\tif opts.displaymode == 'parseable':\n\t\t\t\tutils.vprint('Count,%d,%d' % (countActive, psource.getMaxJobs()))\n\t\t\telse:\n\t\t\t\tutils.vprint('Number of parameter points: %d' % psource.getMaxJobs())\n\t\t\t\tif countActive != psource.getMaxJobs():\n\t\t\t\t\tutils.vprint('Number of active parameter points: %d' % countActive)\n\t\telse:\n\t\t\tresult.append(psource.getJobInfo(123))\n\t\tenabledOutput = opts.output.split(',')\n\t\toutput = filter(lambda k: not opts.output or k in enabledOutput, psource.getJobKeys())\n\t\tstored = filter(lambda k: k.untracked == False, output)\n\t\tuntracked = filter(lambda k: k.untracked == True, output)\n\n\t\tif opts.collapse > 0:\n\t\t\tresult_old = result\n\t\t\tresult = {}\n\t\t\tresult_nicks = {}\n\t\t\thead = [('COLLATE_JOBS', '# of jobs')]\n\t\t\tif 'DATASETSPLIT' in stored:\n\t\t\t\tstored.remove('DATASETSPLIT')\n\t\t\t\tif (opts.collapse == 1):\n\t\t\t\t\tstored.append('DATASETNICK')\n\t\t\t\t\thead.append(('DATASETNICK', 'DATASETNICK'))\n\t\t\t\telif opts.collapse == 2:\n\t\t\t\t\thead.append(('COLLATE_NICK', '# of nicks'))\n\t\t\tfor pset in result_old:\n\t\t\t\tif ('DATASETSPLIT' in pset) and (opts.collapse == 1):\n\t\t\t\t\tpset.pop('DATASETSPLIT')\n\t\t\t\tnickname = None\n\t\t\t\tif ('DATASETNICK' in pset) and (opts.collapse == 2):\n\t\t\t\t\tnickname = pset.pop('DATASETNICK')\n\t\t\t\th = md5(repr(map(lambda key: pset.get(key), stored))).hexdigest()\n\t\t\t\tresult.setdefault(h, []).append(pset)\n\t\t\t\tresult_nicks.setdefault(h, set()).add(nickname)\n\n\t\t\tdef doCollate(h):\n\t\t\t\ttmp = result[h][0]\n\t\t\t\ttmp['COLLATE_JOBS'] = len(result[h])\n\t\t\t\ttmp['COLLATE_NICK'] = len(result_nicks[h])\n\t\t\t\treturn tmp\n\t\t\tresult = map(doCollate, result)\n\t\telse:\n\t\t\thead = [('GC_JOB_ID', '#')]\n\t\t\tif needGCParam:\n\t\t\t\thead.append(('GC_PARAM', 'GC_PARAM'))\n\t\tif opts.active:\n\t\t\thead.append((ParameterInfo.ACTIVE, 'ACTIVE'))\n\t\tif opts.visible:\n\t\t\tstored = opts.visible.split(',')\n\t\thead.extend(sorted(zip(stored, stored)))\n\t\tif opts.untracked:\n\t\t\thead.extend(sorted(map(lambda n: (n, '(%s)' % n), filter(lambda n: n not in ['GC_PARAM', 'GC_JOB_ID'], untracked))))\n\t\tutils.vprint('')\n\t\tutils.printTabular(head, result)\n\n\tif opts.save:\n\t\tutils.vprint('')\n\t\tParameterSource.getClass('GCDumpParameterSource').write(opts.save, psource)\n\t\tutils.vprint('Parameter information saved to ./%s' % opts.save)\n\n\tif opts.intervention:\n\t\tutils.vprint('')\n\t\ttmp = psource.getJobIntervention()\n\t\tif tmp:\n\t\t\tif opts.displaymode == 'parseable':\n\t\t\t\tutils.vprint('R: %s' % str.join(',', map(str, tmp[0])))\n\t\t\t\tutils.vprint('D: %s' % str.join(',', map(str, tmp[1])))\n\t\t\telse:\n\t\t\t\tutils.vprint(' Redo: %r' % tmp[0])\n\t\t\t\tutils.vprint('Disable: %r' % tmp[1])\n\t\telse:\n\t\t\tif opts.displaymode == 'parseable':\n\t\t\t\tutils.vprint('NOINT')\n\t\t\telse:\n\t\t\t\tutils.vprint('No intervention')\n\nif __name__ == '__main__':\n\tsys.exit(main())\n","sub_path":"scripts/parameterList.py","file_name":"parameterList.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5784945","text":"# Head first Python\n# Page 108\n\nman = []\nother = []\ntry:\n data = open('~/Desktop/data/sketch.txt')\n for line in data:\n try:\n (role, lineSpoken) = line.split(':', 1)\n lineSpoken = lineSpoken.strip()\n if role == 'Man':\n man.append(lineSpoken)\n elif role == 'Other Man':\n other.append(lineSpoken)\n except ValueError:\n pass\n data.close()\nexcept IOError:\n print('The data file is missing!')\n\ntry:\n manOut = open('~/Desktop/data/manOut.txt', 'w')\n otherOut = open('~/Desktop/data/otherOut.txt', 'w')\n print(man, file = manOut)\n print(other, file = otherOut)\n manOut.close()\n otherOut.close()\nexcept IOError:\n print('File error')\n","sub_path":"19_writeData.py","file_name":"19_writeData.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497862174","text":"import os\r\n\r\nfrom PySide2.QtCore import *\r\nfrom PySide2.QtGui import *\r\nfrom PySide2.QtWidgets import *\r\n\r\nfrom pymxs import runtime as rt\r\n\r\n\r\nclass CustomFileDialog(QFileDialog):\r\n def __init__(self, *args, **kwards):\r\n QFileDialog.__init__(self, *args, **kwards)\r\n\r\n\r\nclass ScrollMessageBox(QMessageBox):\r\n def __init__(self, text, title=\"\", *args, **kwargs):\r\n lineCount = text.count(\"\\n\")\r\n print(lineCount)\r\n self.minHeight = 150 + lineCount * 10\r\n if self.minHeight > 400:\r\n self.minHeight = 400\r\n QMessageBox.__init__(self, *args, **kwargs)\r\n scroll = QScrollArea(self)\r\n scroll.setWidgetResizable(True)\r\n self.content = QWidget()\r\n self.setWindowTitle(title)\r\n scroll.setWidget(self.content)\r\n lay = QVBoxLayout(self.content)\r\n self.content.setSizePolicy(\r\n QSizePolicy.Expanding, QSizePolicy.Expanding)\r\n txt = QLabel(text, self)\r\n txt.setWordWrap(True)\r\n lay.addWidget(txt)\r\n self.layout().addWidget(scroll, 0, 0, 1, self.layout().columnCount())\r\n self.setSizeGripEnabled(True)\r\n\r\n def event(self, e):\r\n result = QMessageBox.event(self, e)\r\n if(self.minHeight is not None):\r\n self.setMinimumHeight(self.minHeight)\r\n self.setMaximumHeight(16777215)\r\n self.setMinimumWidth(400)\r\n self.setMaximumWidth(16777215)\r\n self.setSizePolicy(QSizePolicy.MinimumExpanding,\r\n QSizePolicy.MinimumExpanding)\r\n return result\r\n\r\n\r\ndef truncateStringFromLeft(string, maxLength):\r\n stringLen = len(string)\r\n truncString = string[max(stringLen-maxLength, 0):stringLen]\r\n if (stringLen > maxLength):\r\n truncString = \"...\" + truncString\r\n return truncString\r\n# add checkbox to a widget.\r\n# x=0 y=0 represent the top left corner of the widget\r\n\r\n\r\ndef createCheckBox(qtWidget, x=14, y=2, w=17, h=17):\r\n headerGeom = qtWidget.geometry()\r\n frameTopLeft = headerGeom.topLeft()\r\n fx = frameTopLeft.x()\r\n fy = frameTopLeft.y()\r\n posX = x + fx\r\n posY = y + fy\r\n topLeft = QPoint(posX, posY)\r\n botRight = QPoint(posX + w, posY + h)\r\n geom = QRect(topLeft, botRight)\r\n checkBox = QCheckBox(qtWidget)\r\n checkBox.setGeometry(geom)\r\n return checkBox\r\n\r\n\r\ndef openSaveFileNameDialog(parent=None, caption=\"\", _dir=\"\", _filter=\"\", forcedExtension=\".gltf\"):\r\n dialog = QFileDialog(parent, caption, _dir, _filter)\r\n dialog.setDirectory(_dir)\r\n if dialog.exec_() == 1:\r\n filePath = dialog.selectedFiles()\r\n if len(filePath) > 0:\r\n ext = os.path.splitext(filePath[0])\r\n if forcedExtension is not None:\r\n if (ext[1] != forcedExtension):\r\n filePath[0] = ext[0] + forcedExtension\r\n if (filePath[0] != \"\"):\r\n return filePath[0]\r\n return None\r\n\r\n\r\ndef validateFloatLineEdit(x):\r\n try:\r\n return float(x)\r\n except:\r\n try:\r\n # turns comma into period and try again\r\n return float(x.replace(\",\", \".\"))\r\n except:\r\n print(\"Invalid float entered\")\r\n return None\r\n\r\n\r\ndef getNewMessageBox(text, title=\"\"):\r\n msgBox = QMessageBox()\r\n msgBox.setWindowTitle(title)\r\n msgBox.setMaximumHeight(500)\r\n msgBox.setMaximumSize\r\n msgBox.setText(text)\r\n return msgBox\r\n\r\n\r\ndef popup(text, title=\"\"):\r\n msgBox = getNewMessageBox(text, title)\r\n msgBox.exec_()\r\n\r\n\r\ndef popup_detail(text, title=\"\", detail=\"\"):\r\n msgBox = getNewMessageBox(text, title)\r\n msgBox.setDetailedText(detail)\r\n msgBox.exec_()\r\n\r\n\r\ndef popup_scroll(text, title=\"\"):\r\n msgBox = ScrollMessageBox(text=text, title=title)\r\n msgBox.exec_()\r\n\r\n\r\ndef popup_Yes_No(text, title=\"\"):\r\n msgBox = getNewMessageBox(text, title)\r\n buttons = QMessageBox.Yes | QMessageBox.No\r\n msgBox.setStandardButtons(buttons)\r\n msgBox.setButtonText(0, \"lol\")\r\n output = msgBox.exec_()\r\n if(output == QMessageBox.Yes):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef popup_Yes_YesToAll_No(text, title=\"\"):\r\n msgBox = getNewMessageBox(text, title)\r\n buttons = QMessageBox.Yes | QMessageBox.YesToAll | QMessageBox.No\r\n msgBox.setStandardButtons(buttons)\r\n output = msgBox.exec_()\r\n if(output == QMessageBox.Yes):\r\n return 2\r\n if(output == QMessageBox.YesToAll):\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ndef popup_Yes_YesToAll_No_NoToAll(text, title=\"\"):\r\n msgBox = getNewMessageBox(text, title)\r\n buttons = QMessageBox.Yes | QMessageBox.YesToAll | QMessageBox.No | QMessageBox.NoToAll\r\n msgBox.setStandardButtons(buttons)\r\n output = msgBox.exec_()\r\n if(output == QMessageBox.Yes):\r\n return 3\r\n if(output == QMessageBox.YesToAll):\r\n return 2\r\n if(output == QMessageBox.No):\r\n return 1\r\n if(output == QMessageBox.NoToAll):\r\n return 0\r\n else: # default\r\n print(\"Default\")\r\n return 1\r\n","sub_path":"scripts/maxsdk/qtUtils.py","file_name":"qtUtils.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"413067492","text":"\nimport numpy as np\n\ndef remove(vol_goe):\n result = np.copy(vol_goe)\n num = 10\n try_num = int(len(vol_goe[:,0])/num)\n for i in range(try_num):\n for ch in range(16):\n me = np.mean(vol_goe[num*i:num*(i+1),ch])\n std = np.std(vol_goe[num*i:num*(i+1),ch])\n for j in range(num):\n if vol_goe[num*i+j,ch] < (me -5* std) or vol_goe[num*i+j,ch] > (me + 5*std):\n print(ch,num*i+j)\n result[num*i+j,ch] = None#1/constant\n return result\n","sub_path":"analysis/process/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330430424","text":"# usage: python data_fuser.py [mac1] [mac2] ... [mac(n)]\nfrom __future__ import print_function\nfrom ctypes import c_void_p, cast, POINTER\nfrom mbientlab.metawear import MetaWear, libmetawear, parse_value, cbindings\nfrom time import sleep\nfrom threading import Event\nfrom sys import argv\n\nstates = []\n\nclass State:\n def __init__(self, device):\n self.device = device\n self.callback = cbindings.FnVoid_VoidP_DataP(self.data_handler)\n self.processor = None\n\n def data_handler(self, ctx, data):\n values = parse_value(data, n_elem = 2)\n print(\"acc: (%.4f,%.4f,%.4f), gyro: (%.4f,%.4f,%.4f)\" % (values[0].x, values[0].y, values[0].z, values[1].x, values[1].y, values[1].z))\n\n def setup(self):\n libmetawear.mbl_mw_settings_set_connection_parameters(self.device.board, 7.5, 7.5, 0, 6000)\n sleep(1.5)\n\n e = Event()\n\n def processor_created(context, pointer):\n self.processor = pointer\n e.set()\n fn_wrapper = cbindings.FnVoid_VoidP_VoidP(processor_created)\n \n acc = libmetawear.mbl_mw_acc_get_acceleration_data_signal(self.device.board)\n gyro = libmetawear.mbl_mw_gyro_bmi160_get_rotation_data_signal(self.device.board)\n\n signals = (c_void_p * 1)()\n signals[0] = gyro\n libmetawear.mbl_mw_dataprocessor_fuser_create(acc, signals, 1, None, fn_wrapper)\n e.wait()\n\n libmetawear.mbl_mw_datasignal_subscribe(self.processor, None, self.callback)\n\n def start(self):\n \n libmetawear.mbl_mw_gyro_bmi160_enable_rotation_sampling(self.device.board)\n libmetawear.mbl_mw_acc_enable_acceleration_sampling(self.device.board)\n\n libmetawear.mbl_mw_gyro_bmi160_start(self.device.board)\n libmetawear.mbl_mw_acc_start(self.device.board)\n \n# for i in range(len(argv) - 1):\nd = MetaWear('F7:83:98:15:21:07')\nd.connect()\nprint(\"Connected to \" + d.address)\nstates.append(State(d))\n\nfor s in states:\n print(\"Configuring %s\" % (s.device.address))\n s.setup()\n\nfor s in states:\n s.start()\n\nsleep(10.0)\n\nprint(\"Resetting devices\")\nevents = []\nfor s in states:\n e = Event()\n events.append(e)\n\n s.device.on_disconnect = lambda s: e.set()\n libmetawear.mbl_mw_debug_reset(s.device.board)\n\nfor e in events:\n e.wait()","sub_path":"Metamotion/Mobitrack_scripts/data_fuser_test.py","file_name":"data_fuser_test.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570424011","text":"# Smart Calculator\n# Author: Jason Tolbert (https://github.com/jasonalantolbert)\n# Python Version: 3.8\n\n\n# BEGINNING OF PROGRAM\n\nimport re\nfrom numexpr import evaluate\n\n \nclass Variable: # contains variable creation method, existing variable dictionary, and variable format validation regex\n vars_dict = {}\n\n sym_format = re.compile(\"^[A-Za-z]+$\")\n val_format = re.compile(\"^[0-9]+$\")\n\n @staticmethod\n def create_var(declaration):\n try:\n symbol, value = (declaration.replace(\" \", \"\")).split(sep=\"=\") # tries to split declaration on equals sign\n except ValueError: # multiple equals signs will throw a ValueError exception\n print(\"Invalid assignment\\n\")\n return\n else:\n if not re.match(Variable.sym_format, symbol): # validates variable name\n print(\"Invalid identifier\\n\")\n return\n elif not re.match(Variable.val_format, value): # validates variable value\n try:\n # if the variable value is not properly formatted, the program checks if the user is trying to\n # set the value of the variable to the value of an existing variable, and if so, adds the new\n # variable to the dictionary with the value of the existing variable\n Variable.vars_dict[symbol] = Variable.vars_dict[value]\n except KeyError:\n # a KeyError exception is raised if the value the user is\n # trying to assign to the variable is invalid\n print(\"Invalid assignment\\n\")\n return\n else:\n Variable.vars_dict[symbol] = value # adds variable to dictionary if declaration is properly formatted\n\n\ndef variable_resolution(expr): # resolves variables\n if re.match(\"[A-Za-z] *[0-9]|[0-9] *[A-Za-z]\", expr):\n # checks for invalid variable identifiers (i.e. alphanumeric strings, such as \"a2a\" or \"n22\")\n print(\"Invalid identifier\\n\")\n return \"skip\"\n\n expr_split = re.findall(\"[\\w]+|[+-/*]|[()]+\", expr) # splits expression into list expr_split\n\n for key, value in Variable.vars_dict.items(): # replaces known variables in expr_split with their values\n for index, element in enumerate(expr_split):\n if key == element:\n expr_split.insert(index, value)\n expr_split.pop(index + 1)\n\n expr = \"\".join(expr_split) # rejoins expression as expr\n\n if re.search(\"[A-Za-z]\", expr): # any alphabetic characters still in the expression are unkown variables\n print(\"Unknown variable\\n\")\n return \"skip\"\n\n return expr\n\n\ndef operator_resolution(expr): # handles occurrences of *, /, and ^ operators\n if re.match(\"\\*\\*+|//+\", expr): # sequences of * or / are invalid\n return \"invalid\"\n else:\n expr = re.sub(\"/\", \"//\", expr) # replaces / with // for integer division\n expr = re.sub(\"\\^\", \"**\", expr) # replaces ^ with ** for exponents\n return expr\n\n\ndef calculate(expression): # evaluates mathematical expressions\n if re.search(\"[A-Za-z]\", expression): # resolves any variables if present\n expression = variable_resolution(expression)\n\n if re.search(\"[/*^]\", expression): # resolves any *, /, or ^ if present\n expression = operator_resolution(expression)\n\n if expression and expression != \"skip\": # runs if expression is not empty or skip key\n try: # attempts to evaluate the expression\n print(f\"{evaluate(expression)}\\n\")\n except Exception: # if any exception is raised, the expression is assumed to be invalid\n print(\"Invalid expression\\n\")\n\n\ndef commands(command): # contains instructions for commands (e.g. /help)\n if command == \"/help\":\n print(\"\\n\"\n \"This program evaluates mathematical expressions.\\n\"\n \"Expressions are combinations of numbers (e.g. 1, 2, 3) and operators (e.g. +, -, *, /). \"\n \"For example: 1 + 2 - 3 * 4.\\n\"\n \"Supported operators include:\\n\"\n \"+: addition\\n\"\n \"-: subtraction\\n\"\n \"*: multiplication\\n\"\n \"/: integer division\\n\"\n \"^: power\\n\"\n \"To evaluate an expression, simply type it and press the Enter or Return key on your keyboard.\\n\"\n \"The program also supports variables. To create a variable, type \"\n \"[variable name] = [variable value] (without brackets). \"\n \"Once created, variables can be used in any expression.\\n\")\n return\n\n if command == \"/exit\":\n print(\"Bye!\")\n exit()\n\n # both of the above commands will eventually leave commands(), so if the program gets to this point, the command\n # entered is invalid.\n print(\"Unknown command\\n\")\n return\n\n\ndef main(): # acts as master control for the rest of the program\n print(\"\\nEnter an expression to calculate it.\\n\"\n \"To exit, enter /exit.\\n\"\n \"For help, enter /help.\\n\")\n \n while True:\n selection = input()\n if selection.startswith(\"/\"): # evaluates the input as a command if the input begins with a forward slash\n commands(selection)\n elif \"=\" in selection: # evaluates the input as a variable assignment if the input contains an equals sign\n Variable.create_var(selection)\n else: # otherwise, evaluates the expression\n calculate(selection)\n\n\nmain() # runs main()\n\n# END OF PROGRAM\n","sub_path":"smartCalculator/task/calculator/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551358239","text":"from django.core.exceptions import SuspiciousOperation\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom rest_framework.exceptions import APIException\nfrom usermanagement.models import OnlineTeam\n\nimport json, urllib\n\nclass TemporarilyUnavailable(APIException):\n status_code = 503\n default_detail = 'Service temporarily unavailable, try again later.'\n default_code = 'service_unavailable'\n\ndef check_uniqueness(contestant_list):\n meta = contestant_list[0]._meta\n model = meta.model\n model_fields = meta.get_fields()\n\n unique_field_names = [ field.name for field in model_fields if field.unique ]\n eachother_check_list = {name: [] for name in unique_field_names}\n for contestant in contestant_list:\n q = Q()\n for field_name in unique_field_names:\n # Check if equivalent fields are present in the database\n try:\n attribute = getattr(contestant, field_name)\n if attribute:\n query_dict = {field_name: attribute}\n q |= Q(**query_dict)\n\n # Check if contestants are unique from eachother\n attribute_list = eachother_check_list[field_name]\n if str(attribute) in attribute_list:\n raise SuspiciousOperation(\"Similar contestants in one team!\")\n else:\n eachother_check_list[field_name].append(str(attribute))\n except AttributeError:\n print(\"Contestant has no attribute %s\" % field_name)\n \n\n if len(model.objects.filter(q)) != 0:\n raise SuspiciousOperation(\"Similar contestant already present in the database!\")\n\n\ndef validate_uniqueness(validated_data, contestantType, team, contestants_data, main_contestant_data):\n main_contestant = contestantType(team=team, **main_contestant_data)\n main_contestant.is_primary = True\n contestants = [main_contestant,]\n for contestant_data in contestants_data[1:]:\n contestant = contestantType(team=team, **contestant_data)\n contestants.append(contestant)\n \n check_uniqueness(contestants)\n return contestants\n\ndef create_contestants(validated_data, TeamType, ContestantType):\n \n contestants_data = validated_data.pop('contestants')\n main_contestant_data = contestants_data[0]\n team = TeamType(**validated_data)\n team.email = main_contestant_data['email']\n team.emails = [data['email'] for data in contestants_data]\n\n contestants = validate_uniqueness(validated_data, ContestantType, team, contestants_data, main_contestant_data)\n try:\n team.sendNewMail = True\n team.save()\n except:\n raise\n # contestants = validate_uniqueness(validated_data, ContestantType, team, contestants_data, main_contestant_data)\n for contestant in contestants:\n contestant.team = team\n contestant.save()\n return team\n\n\ndef validate_recaptcha(request):\n recaptcha_response = request.data['recaptcha']\n url = 'https://www.google.com/recaptcha/api/siteverify'\n values = {\n 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n 'response': recaptcha_response\n }\n data = urllib.parse.urlencode(values).encode()\n req = urllib.request.Request(url, data=data)\n response = urllib.request.urlopen(req)\n result = json.loads(response.read().decode())\n\n #TODO: Find out what was wrong with recaptcha validation.\n print(result)\n if result['success']:\n return True\n return False\n\n\ndef validate_contestants(contestant_serializer, contestant_list):\n if len(contestant_list) == 3:\n result = True\n for contestant in contestant_list:\n serializer = contestant_serializer(data=contestant)\n result &= serializer.is_valid()\n return result\n return False\n","sub_path":"usermanagement/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"4250604","text":"import unittest\nfrom review.slack import get_commit_text, get_commit_payload, \\\n get_mrkdwn_payload, get_first_payload\n\n\nclass SlackCase(unittest.TestCase):\n\n def fake_commit(self):\n return {\n \"author_name\": 'author_name',\n \"title\": \"title\",\n \"branch\": \"master\",\n \"commit_url\": \"http://gitlab.com/commitid\"\n }\n\n def test_get_commit_text(self):\n self.assertTrue(\n \"v:\n count+=1\n sum1=A[i]\n else:\n sum1+=A[i]\n return count\n# @param A : list of integers\n# @param B : integer\n# @return an integer\ndef books(A, B):\n s=max(A)\n e=sum(A)\n if (int(len(A))0:\n \n td = np.sqrt(div_coarse**2+shr_coarse**2)\n ls = np.sqrt(area_coarse)\n \n td_list.append(td)\n ls_list.append(ls)\n \n \n bx = fig2.add_subplot(3,3,idx)\n \n m = pr.plot.area_def2basemap(area_def)\n m.drawmeridians(np.arange(0.,360.,5.),latmax=90.,labels=[0,0,0,1,])\n m.drawparallels(np.arange(79.,90.,1),labels=[1,0,0,0])\n \n patches_all = []\n for k in range(div_coarse.shape[0]):\n patch = Polygon(tripts_coarse[k,:,:])\n patches_all.append(patch)\n\n #plot filled triangles\n p = PatchCollection(patches_all, cmap=plt.cm.bwr, alpha=1)\n p.set_array(div_coarse*1e6)\n p.set_clim(interval)\n bx.add_collection(p)\n \n idx=idx+1\n \n multiplot = 'multiplot_nofilter'+reg+'_'+date+'_'+file_name_end+'_1000.png'\n fig2.savefig(outpath+multiplot)\n\nfig1.savefig(outpath+powerlaw)\n\n#flatten this list of lists\ntd_list = np.concatenate(td_list).ravel()\nls_list = np.concatenate(ls_list).ravel()\n\n#write data for scatter plots\nprint('Storing data for the scatter plots')\ntt = [ls_list,td_list]\ntable = list(zip(*tt))\n\nprint(outname_td)\nwith open(outname_td, 'wb') as f:\n #header\n f.write(b'lenght scale, total deformation\\n')\n np.savetxt(f, table, fmt=\"%s\", delimiter=\",\")\n\n","sub_path":"sid_scale.py","file_name":"sid_scale.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"153035089","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nNavigation properties\n---------------------\n\nThe entity can define properties that link to other entities. These are known\nas navigation properties and are supported in this library.\n\n.. code-block:: python\n\n >>> order = Service.query(Order).first()\n >>> order.Shipper\n \n >>> order.Shipper.CompanyName\n 'Federal Shipping'\n\nWhen creating new instances, relationships can be assigned via navigation\nproperties:\n\n.. code-block:: python\n\n # query a shipper instance, just for this example\n Shipper = Service.entities['Shipper']\n my_shipper = Service.query(Shipper).first()\n\n # assign for the new Order\n order.Shipper = my_shipper\n Service.save(order)\n\"\"\"\n\ntry:\n # noinspection PyUnresolvedReferences\n from urllib.parse import urljoin\nexcept ImportError:\n # noinspection PyUnresolvedReferences\n from urlparse import urljoin\n\n\nclass NavigationProperty(object):\n \"\"\"\n A Property-like object for marking relationships between entities, but does\n not inherit from PropertyBase.\n \"\"\"\n def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False):\n from odata.property import PropertyBase\n self.name = name\n self.entitycls = entitycls\n self.is_collection = collection\n self.is_containment = containment\n if isinstance(foreign_key, PropertyBase):\n self.foreign_key = foreign_key.name\n else:\n self.foreign_key = foreign_key\n\n def __repr__(self):\n return u''.format(self.entitycls)\n\n def instances_from_data(self, raw_data, connection):\n if self.is_collection:\n return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else []\n else:\n return self.instance_from_data(raw_data, connection) if raw_data else None\n\n def instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated form navproperty\n entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type'))\n e = entitycls.__new__(entitycls, from_data=raw_data)\n es = e.__odata__\n es.connection = connection\n return e\n \n def _getClass_by_response_type(self, matched_class, odata_type):\n if not odata_type: return matched_class\n for subclass in matched_class.__subclasses__():\n if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type)\n return matched_class\n \n def _get_parent_cache(self, instance):\n es = instance.__odata__\n ic = es.nav_cache\n if self.name not in ic:\n cache = {}\n ic[self.name] = cache\n else:\n cache = ic[self.name]\n return cache\n\n def _get_instances_from_server(self, instance):\n es = instance.__odata__\n connection = es.connection\n parent_url = es.instance_url\n parent_url += '/'\n url = urljoin(parent_url, self.name)\n raw_data = connection.execute_get(url)\n instances = self.instances_from_data(raw_data, connection)\n while '@odata.nextLink' in raw_data:\n url = raw_data.get('@odata.nextLink')\n raw_data = connection.execute_get(url)\n instances.extend(self.instances_from_data(raw_data, connection))\n return instances\n\n def __set__(self, instance, value):\n \"\"\"\n :type instance: odata.entity.EntityBase\n \"\"\"\n cache = self._get_parent_cache(instance)\n if self.is_collection:\n cache['collection'] = value\n else:\n cache['single'] = value\n instance.__odata__.set_property_dirty(self)\n\n def __get__(self, instance, owner):\n \"\"\"\n :type instance: odata.entity.EntityBase\n \"\"\"\n if instance is None:\n return self\n\n es = instance.__odata__\n cache = self._get_parent_cache(instance)\n\n if es.instance_url is None:\n if self.is_collection:\n return cache.get('collection', [])\n return cache.get('single', None)\n\n cache_type = 'collection' if self.is_collection else 'single'\n\n try:\n return cache[cache_type]\n except KeyError:\n cache[cache_type] = self._get_instances_from_server(instance)\n return cache[cache_type]\n","sub_path":"odata/navproperty.py","file_name":"navproperty.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221052474","text":"# 노드의 개수와 간선(union 연산)의 개수 입력받기\r\nv, e = map(int, input().split())\r\nparent = [0] * (v + 1) # 부모 테이블 초기화\r\n\r\n# 부모 테이블 상에서, 부모를 자기 자신으로 초기화\r\nfor i in range(v + 1):\r\n parent[i] = i\r\n\r\n# 특정 원소가 속한 집합을 찾기\r\ndef find_parent(parent, i):\r\n # 루트 노드가 아니라면, 루트 노드를 찾을 때까지 재귀적으로 호출\r\n if parent[i] != i:\r\n return find_parent(parent, parent[i])\r\n return i\r\n\r\n# 두 원소가 속한 집합을 합치기\r\ndef union_parent(parent, a, b):\r\n a = find_parent(parent, a)\r\n b = find_parent(parent, b)\r\n\r\n if a < b: \r\n parent[b] = a\r\n else:\r\n parent[a] = b\r\n\r\n# union 연산을 각각 수행\r\nfor i in range(e):\r\n a, b = map(int, input().split())\r\n union_parent(parent, a, b)\r\n\r\n# 각 원소가 속한 집합(루트 노드) 출력\r\nprint(\"각 원소가 속한 집합 : \", end = \"\")\r\nfor i in range(1, v + 1):\r\n u = find_parent(parent, i)\r\n print(u, end = \" \")\r\n\r\nprint()\r\n\r\n# 부모 테이블 내용 출력\r\nprint(\"부모 테이블 : \", end = \"\")\r\nfor i in range(1, v + 1):\r\n print(parent[i], end = \" \")\r\n\r\n","sub_path":"p.273 기본적인 서로소 집합.py","file_name":"p.273 기본적인 서로소 집합.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"229740486","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython import display\n\n\ndef display_rgb_env(env, step, total_reward):\n plt.figure(3)\n plt.clf()\n plt.imshow(env.render(mode='rgb_array'))\n plt.title(\"Step: {}, Total Reward: {}\".format(step, total_reward))\n plt.axis('off')\n\n display.display(plt.gcf())\n display.clear_output(wait=True)\n\n\ndef display_simple_env(env, step, total_reward):\n env.render()\n display.clear_output(wait=True)\n\n\ndef dont_display(env, step, total_reward):\n pass\n\n\ndef test_agent(env, agent, display):\n display_func = {\n 'simple': display_simple_env,\n 'rgb': display_rgb_env,\n None: dont_display\n }[display]\n\n state = env.reset()\n step = 0\n total_reward = 0\n display_func(env, 0, total_reward)\n while True:\n state, reward, done, _ = env.step(agent.select_best_action(env, state))\n total_reward += reward\n step += 1\n display_func(env, step, total_reward)\n if done:\n print('reward: ' + str(total_reward) + ', step: ' + str(step))\n total_reward = 0\n step = 0\n state = env.reset()\n # break\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"513559782","text":"# this script runs the validation against the feist data. it takes precalculated data from \"point_23_4_function\" and\n# just does some filtering to properly match with Fiest data\n\nimport pandas, re\n\nin_filename ='pH72_I01_edited'\n\ncomp_contr = pandas.DataFrame.from_csv('../examples/' + in_filename + \".txt\", sep='\\t',header=0)\ncomp_contr_group = pandas.DataFrame.from_csv('../examples/' + 'pH72_I01_edited_noRC' + \".txt\", sep='\\t',header=0)\n\nfeist = pandas.DataFrame.from_csv(\"../../validation/feist_all_reaction_data.tsv\", sep='\\t', header=0)\n\n\n# get all the reaction names in the SBML file\nwith open('../../validation/' + 'allreac_names' + '.txt', 'r') as fp:\n allreac_names = fp.readlines()\nallreac_names = [x.strip() for x in allreac_names]\n\n\n# match reactions\nfeist_reac_ids = list(feist.index)\nmatching_ids = [feist_reac_ids.index(reac) for i,reac in enumerate(allreac_names) if reac in feist_reac_ids]\nmatch_ids_feist = feist.iloc[matching_ids, :]\n\n\ncomp_reac_ids = list(comp_contr.index)\nmatching_ids2 = [i for i,reac in enumerate(allreac_names) if reac in feist_reac_ids]\nmatch_ids_comp = comp_contr.iloc[matching_ids2, :]\nmatch_ids_comp_group = comp_contr_group.iloc[matching_ids2, :]\n\n#transfer over index\nmatch_ids_feist.index=list(match_ids_comp.index)\n\n\n\n# identify reactions that component contrib did not calculate for\nzeros_and_nans = (match_ids_comp['model.dG0'].isnull() | match_ids_comp['model.dG0'].isin([0]))\nzeros_and_nans_group = (match_ids_comp['model.dG0'].isnull() | match_ids_comp['model.dG0'].isin([0]) | match_ids_comp_group['model.dG0'].isnull() | match_ids_comp_group['model.dG0'].isin([0]))\n\ntolerable_error = (match_ids_comp['dG0_std'] < 50)\n#non_crazy_values = (match_ids_comp['model.dG0'] < 900 & match_ids_comp['model.dG0'] > -900)\nall_rows = (-zeros_and_nans & tolerable_error)\nall_rows_group = (-zeros_and_nans_group & tolerable_error)\n\n# match_ids_comp[,'dG0_prime']\n\n\na = pandas.concat([match_ids_comp_group.dG0_prime, match_ids_comp_group.dGm_prime,match_ids_comp.dG0_prime, match_ids_comp.dGm_prime,match_ids_feist.deltaGpH72, match_ids_feist.mMdeltaGpH72], axis=1)\n\na = a[all_rows_group]\n\n# filter out crazy values\n#a = a[(a.dG0_prime < 900) & (a.dG0_prime > -900)]\n\na.to_csv('noRC' + '_data_editing.csv')","sub_path":"examples/point_25_onlygroup.py","file_name":"point_25_onlygroup.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"394777286","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/CIDAN/GUI/Tabs/ROIExtractionTab.py\n# Compiled at: 2020-04-29 16:53:56\n# Size of source mod 2**32: 26173 bytes\nfrom PySide2 import QtCore\nimport CIDAN.GUI.Tabs.Tab as Tab\nfrom PySide2.QtWidgets import *\nfrom PySide2.QtCore import *\nimport numpy as np, pyqtgraph as pg\nimport CIDAN.GUI.Data_Interaction.ROIExtractionThread as ROIExtractionThread\nfrom CIDAN.GUI.SettingWidget.SettingsModule import roi_extraction_settings\nimport CIDAN.GUI.ListWidgets.ROIListModule as ROIListModule\nimport CIDAN.GUI.Inputs.OptionInput as OptionInput\nimport CIDAN.GUI.ListWidgets.TrialListWidget as TrialListWidget\nfrom CIDAN.LSSC.functions.roi_extraction import combine_rois\n\nclass ROIExtractionTab(Tab):\n __doc__ = 'Class controlling the ROI Extraction tab, inherits from Tab\\n\\n\\n Attributes\\n ----------\\n main_widget : MainWidget\\n A reference to the main widget\\n data_handler : DataHandler\\n A reference to the main DataHandler of MainWidget\\n click_event : bool\\n A bool that keeps track of whether a click event is currently happening used\\n by roi_click_event and select_roi\\n time_plot : pg.PlotWidget\\n the plot for the time traces\\n roi_list_module : ROIListModule\\n The module the controlls the list of ROIs\\n thread : ROIExtractionThread\\n The thread that runs the roi extraction process\\n foreground_slider : QSlider\\n slider that determines intensity of background image+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\\n\\n '\n\n def __init__(self, main_widget):\n self.main_widget = main_widget\n self.data_handler = main_widget.data_handler\n self.click_event = False\n self.add_image = False\n self.outlines = True\n self.select_pixel_on = False\n self.brush_size = 0\n self.current_selected_pixels_list = []\n self.current_selected_pixels_mask = np.zeros((self.data_handler.shape[1],\n self.data_handler.shape[2]),\n dtype=bool)\n self.previous_values = {}\n self.select_pixel_color = [0, 255, 0]\n self.select_mode = 'add'\n self.image_item = self.main_widget.roi_image_view.image_view.getImageItem()\n self.image_item.mouseClickEvent = lambda x: self.roi_view_click(x)\n self.image_item.mouseDragEvent = lambda x: self.roi_view_drag(x)\n tab_selector_roi = QTabWidget()\n tab_selector_roi.setStyleSheet('QTabWidget {font-size: 20px;}')\n roi_modification_tab = QWidget()\n roi_modification_tab.setStyleSheet('margin:0px; padding: 0px;')\n roi_modification_tab_layout = QVBoxLayout()\n roi_modification_tab.setLayout(roi_modification_tab_layout)\n self.roi_list_module = ROIListModule(main_widget.data_handler, self)\n roi_modification_tab_layout.addWidget(self.roi_list_module)\n roi_modification_button_top_layout = QHBoxLayout()\n roi_modification_tab_layout.addLayout(roi_modification_button_top_layout)\n add_new_roi = QPushButton(text='New ROI from Selection')\n add_to_roi = QPushButton(text='Add to ROI')\n add_to_roi.clicked.connect(lambda x: self.modify_roi(self.roi_list_module.current_selected_roi, 'add'))\n sub_to_roi = QPushButton(text='Subtract from ROI')\n sub_to_roi.clicked.connect(lambda x: self.modify_roi(self.roi_list_module.current_selected_roi, 'subtract'))\n delete_roi = QPushButton(text='Delete ROI')\n roi_modification_button_top_layout.addWidget(add_to_roi)\n roi_modification_button_top_layout.addWidget(sub_to_roi)\n roi_modification_button_top_layout.addWidget(add_new_roi)\n roi_modification_button_top_layout.addWidget(delete_roi)\n painter_button_group = QButtonGroup()\n off_button = QRadioButton(text='Off')\n off_button.setChecked(True)\n on_button = QRadioButton(text='Add to Selection')\n sub_button = QRadioButton(text='Subtract from Selection')\n painter_button_group.addButton(off_button)\n painter_button_group.addButton(on_button)\n painter_button_group.addButton(sub_button)\n off_button.clicked.connect(lambda x: self.setSelectorBrushType('off'))\n on_button.clicked.connect(lambda x: self.setSelectorBrushType('add'))\n sub_button.clicked.connect(lambda x: self.setSelectorBrushType('subtract'))\n painter_layout = QHBoxLayout()\n painter_layout.addWidget(QLabel(text='Selector Brush: '))\n painter_layout.addWidget(off_button)\n painter_layout.addWidget(on_button)\n painter_layout.addWidget(sub_button)\n roi_modification_tab_layout.addLayout(painter_layout)\n clear_from_selection = QPushButton(text='Clear Selection')\n clear_from_selection.clicked.connect(lambda x: self.clearPixelSelection())\n roi_modification_tab_layout.addWidget(clear_from_selection)\n brush_size_options = OptionInput('Brush Size:', '', lambda x, y: self.setBrushSize(y), 0, 'Sets the brush size', ['1', '3', '5', '7', '9',\n '11', '15', '21', '27',\n '35'])\n roi_modification_tab_layout.addWidget(brush_size_options)\n process_button = QPushButton()\n process_button.setText('Apply Settings')\n self.thread = ROIExtractionThread(main_widget, process_button, self.roi_list_module, self)\n self.main_widget.thread_list.append(self.thread)\n process_button.clicked.connect(lambda : self.thread.runThread())\n self.roi_settings = QWidget()\n self.roi_settings_layout = QVBoxLayout()\n self.roi_settings.setLayout(self.roi_settings_layout)\n self.roi_settings_layout.addWidget(roi_extraction_settings(main_widget))\n self.roi_settings_layout.addWidget(process_button)\n tab_selector_roi.addTab(self.roi_settings, 'ROI Creation')\n tab_selector_roi.addTab(roi_modification_tab, 'ROI Modification')\n self.current_foreground_intensity = 1\n self.set_background('', 'Max Image', update_image=False)\n if self.main_widget.data_handler.rois_loaded:\n self.thread.endThread(True)\n display_settings_layout = QVBoxLayout()\n display_settings = QWidget()\n display_settings.setLayout(display_settings_layout)\n image_chooser = OptionInput('ROI Display type::', '', on_change_function=(self.set_image),\n default_index=0,\n tool_tip='Choose background to display',\n val_list=[\n 'Outlines', 'Blob'])\n display_settings_layout.addWidget(image_chooser)\n self.background_chooser = OptionInput('Background:', '', on_change_function=(self.set_background),\n default_index=2,\n tool_tip='Choose background to display',\n val_list=[\n 'Blank Image', 'Mean Image', 'Max Image', 'Temporal Correlation Image', 'Eigen Norm Image'])\n display_settings_layout.addWidget(self.background_chooser)\n background_slider_layout = QHBoxLayout()\n background_slider_layout.addWidget(QLabel('0'))\n self.foreground_slider = QSlider(Qt.Horizontal)\n self.foreground_slider.setMinimum(0)\n self.foreground_slider.setMaximum(100)\n self.foreground_slider.setSingleStep(1)\n self.foreground_slider.valueChanged.connect(self.intensity_slider_changed)\n try:\n self.foreground_slider.setValue(80)\n except AttributeError:\n pass\n\n background_slider_layout.addWidget(self.foreground_slider)\n background_slider_layout.addWidget(QLabel('10'))\n display_settings_layout.addWidget(QLabel('Change foreground intensity:'))\n display_settings_layout.addLayout(background_slider_layout)\n tab_selector_time_trace = QTabWidget()\n tab_selector_time_trace.setStyleSheet('QTabWidget {font-size: 20px;}')\n tab_selector_time_trace.setMaximumHeight(200)\n self.time_plot = pg.PlotWidget()\n self.time_plot.showGrid(x=True, y=True, alpha=0.3)\n tab_selector_time_trace.addTab(self.time_plot, 'Time Trace Plot')\n time_trace_settings = QWidget()\n time_trace_settings_layout = QVBoxLayout()\n time_trace_settings.setLayout(time_trace_settings_layout)\n time_trace_settings_layout.addWidget(OptionInput('Time Trace Type', '', (lambda x: x + x),\n default_index=0, tool_tip='Select way to calculate time trace',\n val_list=[\n 'Normal', 'DeltaF/F', 'More']),\n stretch=1)\n time_trace_trial_select_list = TrialListWidget()\n time_trace_trial_select_list.setItems(self.data_handler.dataset_params['dataset_path'])\n time_trace_settings_layout.addWidget(time_trace_trial_select_list, stretch=5)\n tab_selector_time_trace.addTab(time_trace_settings, 'Time Trace Settings')\n roi_view_tabs = QTabWidget()\n roi_view_tabs.setStyleSheet('QTabWidget {font-size: 20px;}')\n self.main_widget.roi_image_view.setStyleSheet('margin:0px; border:0px solid rgb(50, 65, 75); padding: 0px;')\n roi_view_tabs.addTab(self.main_widget.roi_image_view, 'ROI Display')\n roi_view_tabs.addTab(display_settings, 'Display Settings')\n self.column_2 = [roi_view_tabs, tab_selector_time_trace]\n super().__init__('ROI Extraction', column_1=[\n tab_selector_roi],\n column_2=(self.column_2),\n column_2_display=True)\n\n def setSelectorBrushType(self, type):\n if type == 'off':\n self.select_pixel_on = False\n else:\n self.select_pixel_on = True\n self.select_mode = type\n\n def modify_roi(self, roi_num, add_subtract='add'):\n \"\"\"\n Add/subtracts the currently selected pixels from an ROI\n Parameters\n ----------\n roi_num roi to modify starting at 1\n add_subtract either add or subtract depending on operation wanted\n\n Returns\n -------\n Nothing\n \"\"\"\n if roi_num == None:\n print('Please select an roi')\n return\n else:\n shape = self.main_widget.data_handler.edge_roi_image_flat.shape\n roi_num = roi_num - 1\n if add_subtract == 'add':\n print('Adding Selection to ROI #' + str(roi_num + 1))\n self.data_handler.clusters[roi_num] = combine_rois(self.data_handler.clusters[roi_num], self.current_selected_pixels_list)\n self.data_handler.gen_roi_display_variables()\n self.data_handler.calculate_time_trace(roi_num)\n if add_subtract == 'subtract':\n print('Subtracting Selection from ROI #' + str(roi_num + 1))\n self.data_handler.clusters[roi_num] = [x for x in self.data_handler.clusters[roi_num] if x not in self.current_selected_pixels_list]\n self.data_handler.gen_roi_display_variables()\n self.data_handler.calculate_time_trace(roi_num)\n if self.outlines:\n self.roi_image_flat = np.hstack([self.data_handler.edge_roi_image_flat,\n np.zeros(shape),\n np.zeros(shape)])\n else:\n self.roi_image_flat = self.main_widget.data_handler.pixel_with_rois_color_flat\n self.select_image_flat = np.zeros([shape[0], 3])\n self.clearPixelSelection(update_display=False)\n self.updateImageDisplay()\n\n def draw(self, pos):\n pass\n\n def intensity_slider_changed(self):\n self.current_foreground_intensity = 10 - float(self.foreground_slider.value()) / 10\n self.updateImageDisplay()\n\n def set_background(self, name, func_name, update_image=True):\n shape = self.main_widget.data_handler.shape\n if func_name == 'Mean Image':\n self.current_background = self.main_widget.data_handler.mean_image.reshape([-1, 1])\n if func_name == 'Max Image':\n self.current_background = self.main_widget.data_handler.max_image.reshape([-1, 1])\n if func_name == 'Blank Image':\n self.current_background = np.zeros([shape[1] * shape[2], 1])\n if func_name == 'Temporal Correlation Image':\n self.current_background = self.data_handler.temporal_correlation_image.reshape([-1, 1])\n if func_name == 'Eigen Norm Image':\n self.current_background = self.data_handler.eigen_norm_image.reshape([-1, 1])\n if update_image:\n self.updateImageDisplay()\n\n def set_image(self, name, func_name, update_image=True):\n shape = self.main_widget.data_handler.edge_roi_image_flat.shape\n if func_name == 'Outlines':\n self.outlines = True\n self.roi_image_flat = np.hstack([self.data_handler.edge_roi_image_flat,\n np.zeros(shape),\n np.zeros(shape)])\n if func_name == 'Blob':\n self.outlines = False\n self.roi_image_flat = self.main_widget.data_handler.pixel_with_rois_color_flat\n if update_image:\n self.updateImageDisplay()\n\n def updateImageDisplay(self, new=False):\n try:\n shape = self.main_widget.data_handler.dataset_filtered.shape\n if not hasattr(self, 'select_image_flat'):\n self.select_image_flat = np.zeros([shape[1] * shape[2], 3])\n else:\n range_list = self.main_widget.roi_image_view.image_view.view.viewRange()\n shape = self.main_widget.data_handler.dataset_filtered.shape\n background_max = self.current_background.max()\n background_image_scaled = self.current_foreground_intensity * 255 / (background_max if background_max != 0 else 1) * self.current_background\n background_image_scaled_3_channel = np.hstack([background_image_scaled, background_image_scaled, background_image_scaled])\n if new:\n combined = self.roi_image_flat + background_image_scaled_3_channel + self.select_image_flat\n combined_reshaped = combined.reshape((shape[1], shape[2], 3))\n self.main_widget.roi_image_view.setImage(combined_reshaped)\n self.clearPixelSelection(update_display=False)\n else:\n self.image_item.image = background_image_scaled_3_channel.reshape((shape[1], shape[2], 3))\n self.image_item.updateImage(autoLevels=True)\n combined = (self.roi_image_flat + self.select_image_flat).reshape((\n shape[1], shape[2], 3))\n self.image_item.image += combined\n self.image_item.image[self.current_selected_pixels_mask] += self.select_pixel_color\n self.image_item.updateImage(autoLevels=False)\n except AttributeError:\n pass\n\n def selectRoi(self, num):\n try:\n color_select = (245, 249, 22)\n color_roi = self.main_widget.data_handler.color_list[((num - 1) % len(self.main_widget.data_handler.color_list))]\n shape = self.main_widget.data_handler.dataset_filtered.shape\n self.select_image_flat[self.main_widget.data_handler.clusters[(num - 1)]] = color_select\n self.updateImageDisplay()\n except AttributeError:\n pass\n\n def deselectRoi(self, num):\n color = self.main_widget.data_handler.color_list[((num - 1) % len(self.main_widget.data_handler.color_list))]\n shape = self.main_widget.data_handler.dataset_filtered.shape\n shape_flat = self.data_handler.edge_roi_image_flat.shape\n self.select_image_flat[self.main_widget.data_handler.clusters[(num - 1)]] = color if not self.outlines else np.hstack([self.data_handler.edge_roi_image_flat,\n np.zeros(shape_flat),\n np.zeros(shape_flat)])[self.main_widget.data_handler.clusters[(num - 1)]]\n self.updateImageDisplay()\n\n def selectRoiTime(self, num):\n try:\n color_select = (245, 249, 22)\n color_roi = self.main_widget.data_handler.color_list[((num - 1) % len(self.main_widget.data_handler.color_list))]\n shape = self.main_widget.data_handler.dataset_filtered.shape\n if self.roi_list_module.roi_time_check_list[(num - 1)]:\n pen = pg.mkPen(color=color_roi, width=3)\n self.time_plot.plot((self.main_widget.data_handler.get_time_trace(num)), pen=pen)\n self.time_plot.enableAutoRange(axis=0)\n except AttributeError:\n pass\n\n def deselectRoiTime(self, num):\n color = self.main_widget.data_handler.color_list[((num - 1) % len(self.main_widget.data_handler.color_list))]\n shape = self.main_widget.data_handler.dataset_filtered.shape\n shape_flat = self.data_handler.edge_roi_image_flat.shape\n self.time_plot.clear()\n self.time_plot.enableAutoRange(axis=0)\n for num2, x in zip(range(1, len(self.roi_list_module.roi_time_check_list)), self.roi_list_module.roi_time_check_list):\n if x:\n color_roi = self.main_widget.data_handler.color_list[((num2 - 1) % len(self.main_widget.data_handler.color_list))]\n pen = pg.mkPen(color=color_roi, width=3)\n self.time_plot.plot((self.main_widget.data_handler.get_time_trace(num2)),\n pen=pen)\n\n def zoomRoi(self, num):\n \"\"\"\n Zooms in to a certain roi\n Parameters\n ----------\n num : int\n roi num starts at 1\n\n Returns\n -------\n Nothing\n \"\"\"\n num = num - 1\n max_cord = self.main_widget.data_handler.cluster_max_cord_list[num] + 15\n min_cord = self.main_widget.data_handler.cluster_min_cord_list[num] - 15\n self.main_widget.roi_image_view.image_view.getView().setXRange(min_cord[1], max_cord[1])\n self.main_widget.roi_image_view.image_view.getView().setYRange(min_cord[0], max_cord[0])\n\n def roi_view_click(self, event):\n if event.button() == QtCore.Qt.RightButton:\n if self.image_item.raiseContextMenu(event):\n event.accept()\n else:\n event.accept()\n pos = event.pos()\n x = int(pos.x())\n y = int(pos.y())\n if self.select_pixel_on:\n self.pixel_paint(x, y)\n else:\n self.click_event = True\n pixel_with_rois_flat = self.main_widget.data_handler.pixel_with_rois_flat\n shape = self.main_widget.data_handler.dataset_filtered.shape\n roi_num = int(pixel_with_rois_flat[(shape[2] * x + y)])\n if roi_num != 0:\n self.roi_list_module.set_current_select(roi_num)\n\n def roi_view_drag(self, event):\n event.accept()\n pos = event.pos()\n x = int(pos.x())\n y = int(pos.y())\n if self.select_pixel_on:\n self.pixel_paint(x, y)\n\n def pixel_paint(self, x, y):\n try:\n if self.select_mode == 'add':\n shape = self.main_widget.data_handler.dataset_filtered.shape\n for x_dif in range(self.brush_size * 2 + 1):\n for y_dif in range(self.brush_size * 2 + 1):\n x_new = x - self.brush_size - 1 + x_dif\n y_new = y - self.brush_size - 1 + y_dif\n if shape[2] * x_new + y_new not in self.current_selected_pixels_list:\n self.image_item.image[(x_new, y_new)] += [0, 255, 0]\n self.current_selected_pixels_list.append(shape[2] * x_new + y_new)\n self.current_selected_pixels_mask[(x_new, y_new)] = True\n\n if self.select_mode == 'subtract':\n shape = self.main_widget.data_handler.dataset_filtered.shape\n for x_dif in range(self.brush_size * 2 + 1):\n for y_dif in range(self.brush_size * 2 + 1):\n x_new = x - self.brush_size - 1 + x_dif\n y_new = y - self.brush_size - 1 + y_dif\n if shape[2] * x_new + y_new in self.current_selected_pixels_list:\n self.image_item.image[(x_new, y_new)] -= [0, 255, 0]\n self.current_selected_pixels_list.remove(shape[2] * x_new + y_new)\n self.current_selected_pixels_mask[(x_new, y_new)] = False\n\n self.image_item.updateImage()\n except IndexError:\n pass\n\n def clearPixelSelection(self, update_display=True):\n shape = self.main_widget.data_handler.dataset_filtered.shape\n self.current_selected_pixels_mask = np.zeros([shape[1], shape[2]], dtype=bool)\n self.current_selected_pixels_list = []\n if update_display:\n self.updateImageDisplay()\n\n def check_pos_in_image(self, x, y):\n pass\n\n def setBrushSize(self, size):\n \"\"\"\n Sets the brush size\n\n self.brush_size is the additional size on all dimensions in addition to middle\n point\n Parameters\n ----------\n size from option input\n\n Returns\n -------\n nothing\n \"\"\"\n self.brush_size = int((int(size) - 1) / 2)","sub_path":"pycfiles/CIDAN-0.1.7-py3.7/ROIExtractionTab.cpython-37.py","file_name":"ROIExtractionTab.cpython-37.py","file_ext":"py","file_size_in_byte":21416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"226432602","text":"import argparse\nimport os\nimport subprocess\n\n\ndef start_fceux(lua_test):\n '''Starts fceux with the appropriate lua script'''\n if lua_test:\n script_path = os.path.join('test.lua')\n else:\n script_path = os.path.join('main.lua')\n\n save_path = 'save_states/level_0.fcs'\n\n start_cmd = 'fceux -lua ' + script_path + ' -loadstate ' + save_path + ' game_roms/Tetris.nes'\n subprocess.run(start_cmd, shell = True)\n\n\nif __name__ == '__main__':\n # program description\n description = \"This is a genetic algorithm for Tetris 1989 NES\"\n\n # initiate parser\n parser = argparse.ArgumentParser(description = description)\n\n # add arguments to parser\n parser.add_argument('--tl', '-test_lua', help = 'runs test.lua')\n\n args = parser.parse_args()\n\n if args.tl:\n test = True\n else:\n test = False\n\n start_fceux(test)\n\n","sub_path":"TetrisGenetic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404682788","text":"# -*- coding:utf-8 -*-\nimport os\n#proto文件\nopenFile = r\"D:\\svn\\workspace\\branch_jinzhong\\Common\\platform_jar\\proto\\club.proto\"\n#需要获取的proto协议\ngainProto = [\"CCLModifyConfirmedGameplaysOrderREQ\",\"CLCModifyConfirmedGameplaysOrderRES\"]\n#使用该协议的service\nserviceName = \"ClubManagerService\"\n\n\n\n#写入文件\nfw = open(\"protoInfo.lua\", \"w\") \n\nprotoInfo = []\nfor i in range(len(gainProto)):\n\tprotoInfo.append([])\n\nopenFileName = openFile.replace(os.path.dirname(openFile),\"\").replace(\"\\\\\",\"\").replace(\".proto\",\"\")\t\n\t\n#判断是否进入想获取的协议\ndef isProto(str):\n\tfor i in range(len(gainProto)):\n\t\tif str.find(\"message \" + gainProto[i]) != -1:\n\t\t\treturn i\n\treturn -1\n\n\n#获取协议字段合并后的字符串\ndef combineProtoInfo(pInfo):\n\tstr = \"\"\n\tfor i in range(len(pInfo)):\n\t\tif i + 1 == len(pInfo):\n\t\t\tstr = str + pInfo[i]\n\t\telse:\n\t\t\tstr = str + pInfo[i] + \", \"\n\t\t\t\n\treturn str\n\nf = open(openFile) \nline = f.readline() \nprotoNameId = -1\n\n\nwhile line: \n\tline = line.lstrip()\n\tif line.find(\"{\") != -1:\n\t\tline = f.readline() \n\t\tcontinue\n\t\n\tif protoNameId >= 0:\n\t\tprotoName = gainProto[protoNameId]\n\t\t#进入了一个想要获取的协议中,我们构造它\n\t\ttempData = line.split(\" \")\n\t\tif len(tempData) > 3 :\n\t\t\tif protoName.find(\"REQ\") != -1:\n\t\t\t\tprotoInfo[protoNameId].append(tempData[2]) \n\t\telse:\n\t\t\tprotoNameId = -1\n\telse:\n\t\tprotoNameId = isProto(line)\n\n\tline = f.readline() \n \n\n \nfor i in range(len(gainProto)):\n\tprotoName = gainProto[i]\n\tfw.write(\"--\\n\")\n\tfw.write(\"local %s = class(\\\"%s\\\", ProtocolBase)\\n\" % (protoName,protoName))\n\tfw.write(\"ns.%s = %s\\n\\n\" % (protoName,protoName))\n\tfw.write(\"%s.OP_CODE = net.ProtocolCode.?????\\n\" % (protoName))\n\tfw.write(\"%s.CLZ_CODE = \\\"com.kodgames.message.proto.%s.%s\\\"\\n\\n\" % (protoName,openFileName,protoName))\n\tfw.write(\"function %s:ctor(serverId, callback)\\n\" % (protoName))\n\tfw.write(\"\tself.super.ctor(self, %s.OP_CODE, serverId, callback)\\n\" % (protoName))\n\tfw.write(\"end\\n\\n\")\n\n\t#setData\n\tif protoName.find(\"REQ\") != -1:\n\t\tfw.write(\"function %s:setData(\" % (protoName))\n\t\tfw.write(combineProtoInfo(protoInfo[i]) + \")\\n\")\n\t\t\t\n\tfor j in range(len(protoInfo[i])):\n\t\tfw.write(\"\tself:getProtocolBuf().%s = %s\\n\" % (protoInfo[i][j],protoInfo[i][j]))\n\t\t\n\tif protoName.find(\"REQ\") != -1:\n\t\tfw.write(\"end\\n\\n\")\n\t\t\t\t\n\t\nfw.write(\"===================================\\n\\n\")\t\n\t\nfor i in range(len(gainProto)):\n\tprotoName = gainProto[i]\n\t\n\tif protoName.find(\"REQ\") != -1:\n\t\tfw.write(\"--\\n\")\n\t\tfw.write(\"function %s:send%s(%s)\\n\" % (serviceName,protoName,combineProtoInfo(protoInfo[i])))\n\t\tfw.write(\"\tlocal request = net.NetworkRequest.new(net.protocol.%s, self.?????:getClubServiceId())\\n\" % (protoName))\n\t\tfw.write(\"\trequest:getProtocol():setData(%s)\\n\" % (combineProtoInfo(protoInfo[i])))\n\t\tfw.write(\"\tgame.util.RequestHelper.request(request)\\n\")\n\t\tfw.write(\"end\\n\\n\")\n\t\tfw.write(\"===================================\\n\\n\")\t\n\telse:\n\t\tfw.write(\"--\\n\")\n\t\tfw.write(\"function %s:_on%s(response)\\n\" % (serviceName,protoName))\n\t\tfw.write(\"\tlocal protocol = response:getProtocol():getProtocolBuf()\\n\\n\\n\")\n\t\tfw.write(\"end\\n\\n\")\n\t\tfw.write(\"===================================\\n\\n\")\t\n\t\tfw.write(\"requestManager:registerResponseHandler(net.protocol.%s.OP_CODE, self, self._on%s)\\n\\n\" % (protoName,protoName))\n\t\tfw.write(\"===================================\\n\\n\")\t\n \n \nf.close() ","sub_path":"01 小工具/protoTools.py","file_name":"protoTools.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50993334","text":"from nodes import *\nfrom network import *\nimport logging\n\nlog = logging.getLogger(\"mcmc\")\n\nlogging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(module)s %(funcName)s(): %(message)s')\nlogging.getLogger().setLevel(logging.ERROR)\n# logging.getLogger(\"nodes\").setLevel(logging.ERROR)\n# logging.getLogger(\"network\").setLevel(logging.ERROR)\n# logging.getLogger(\"mcmc\").setLevel(logging.ERROR)\n\n\nb = BernoulliNode(name='B', prob=[.9], value=True)\ni = BernoulliNode(name='I', prob=[.9, .5, .5, .1], value=True)\nm = BernoulliNode(name='M', prob=[.1], value=True)\ng = BernoulliNode(name='G', prob=[.9, .8, .0, .0, .2, .1, .0, .0], value=False)\nj = BernoulliNode(name='J', prob=[0.9, 0.0], value=False)\n\nb.children = [g, i]\ni.children = [g]\ni.parents = [b, m]\nm.children = [i, g]\ng.children = [j]\ng.parents = [b, i, m]\nj.parents = [g]\n\nb.is_observed = True\ni.is_observed = True\nm.is_observed = True\n\nnetwork = Network(nodes=[b, i, m, g, j])\nsamples = network.collect_samples(burn=3000, n=100000)\nlog.info(\"Totals: \" + str(samples.totals()))\nprint(\"P(J=True | B=True, I=True, M=True) = \" + str(samples.p({j: True}, {b: True, i: True, m: True})))\n\n#samples.plot_mixing(\"P(J=True | B=True, I=True, M=True)\", {j: True}, {b: True, i: True, m: True})\n","sub_path":"Nathan/MCMC Part 1/mcmc1/code/network_jail.py","file_name":"network_jail.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"328995560","text":"from __future__ import absolute_import\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models import Q\nfrom typing import Any, Dict, KeysView, List, Tuple, Union\n\nfrom gdpr.enums import LegalReasonState\nfrom gdpr.fields import Fields\nfrom gdpr.loading import anonymizer_register, purpose_register\n\nFieldList = Union[List[unicode], Tuple, KeysView[unicode]] # List, tuple or return of dict keys() method.\nFieldMatrix = Union[unicode, Tuple[Any, ...]]\nRelatedMatrix = Dict[unicode, FieldMatrix]\n\n\nclass PurposeMetaclass(type):\n\n def __new__(mcs, name, bases, attrs):\n from gdpr.loading import purpose_register\n\n new_class = super(PurposeMetaclass, mcs).__new__(mcs, name, bases, attrs)\n if hasattr(new_class, u'slug') and new_class.slug:\n if new_class.slug in purpose_register:\n raise ImproperlyConfigured(u'More anonymization purposes with slug {}'.format(new_class.slug))\n\n purpose_register.register(new_class.slug, new_class)\n return new_class\n\n def __str__(self):\n return unicode(self.name)\n\n\nclass AbstractPurpose(object):\n __metaclass__ = PurposeMetaclass\n u\"\"\"\n\n :param anonymize_legal_reason_related_object_only: If True anonymize only related objects which have links which\n have LegalReasonRelatedObject records.\n \"\"\"\n\n name = None\n slug = None\n fields = None\n expiration_timedelta = None\n anonymize_legal_reason_related_objects_only = None\n\n def get_parsed_fields(self, model):\n return Fields(self.fields or (), model)\n\n def deanonymize_obj(self, obj, fields = None):\n fields = fields or self.fields or ()\n if len(fields) == 0:\n # If there are no fields to deanonymize do nothing.\n return\n obj_model = obj.__class__\n anonymizer = anonymizer_register[obj_model]()\n anonymizer.deanonymize_obj(obj, fields)\n\n def anonymize_obj(self, obj, legal_reason = None,\n fields = None):\n fields = fields or self.fields or ()\n if len(fields) == 0:\n # If there are no fields to anonymize do nothing.\n return\n from gdpr.models import LegalReason # noqa\n\n obj_model = obj.__class__\n anonymizer = anonymizer_register[obj_model]()\n\n # MultiLegalReason\n other_legal_reasons = LegalReason.objects.filter_source_instance(obj).filter(state=LegalReasonState.ACTIVE)\n if legal_reason:\n other_legal_reasons = other_legal_reasons.filter(~Q(pk=legal_reason.pk))\n if other_legal_reasons.count() == 0:\n anonymizer.anonymize_obj(obj, legal_reason, self, fields)\n return\n\n from gdpr.loading import purpose_register\n\n parsed_fields = self.get_parsed_fields(obj_model)\n\n # Transform legal_reasons to fields\n for allowed_fields in [purpose_register[slug]().get_parsed_fields(obj_model) for slug in\n set([i.purpose_slug for i in other_legal_reasons])]:\n parsed_fields -= allowed_fields\n\n if len(parsed_fields) == 0:\n # If there are no fields to anonymize do nothing.\n return\n\n anonymizer.anonymize_obj(obj, legal_reason, self, parsed_fields)\n\n\npurposes_map = purpose_register # Backwards compatibility\n","sub_path":"gdpr/purposes/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"74924049","text":"# coding: utf-8\n# Antonio Bertino / UFCG\n# antonio.bertino.neto@ccc.ufcg.edu.br\n# Interseção de Listas\n# Programação 1 / 2018.1\n\ndef meu_in(lista, elemento):\n for e in lista:\n if e == elemento:\n return True\n\n return False\n\ndef intersecao_listas(l1, l2):\n for i in range(len(l1) -1,-1,-1):\n if not meu_in(l2, l1[i]):\n l1.pop(i)\n\n return l1\n","sub_path":"unidade07/intersecao_listas/intersecao.py","file_name":"intersecao.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"409404018","text":"\nfrom __future__ import division\nimport random\n\nfile = open('results.txt', 'w')\n\ncount = 0\n\nwhile count < 2000:\n x = random.random()\n y = 0\n if x > .5:\n y = 1\n\n file.write(str(y) + \"\\n\")\n count += 1\n\n\nfile.close()\n","sub_path":"SparkExamples/rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637072536","text":"import webapp2\nimport logging\nimport json\nimport urllib2\nimport datetime\n\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import mail\nfrom google.appengine.ext import ndb\n\nfrom user.models import User\nfrom models import GithubWebhook\nfrom lib import tools, conf\n\nGITHUB_SECRET = conf.config['github']['secret_key']\n\nclass MonsterMain(webapp2.RequestHandler):\n\n def get(self):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write('')\n\nclass WebhookRegister(webapp2.RequestHandler):\n def post(self):\n logging.debug('GitHub Request headers: %s' % self.request.headers)\n logging.debug('GitHub Request body: %s' % json.loads(self.request.body))\n logging.debug('Github Request: %s' % str(self.request))\n webhook = GithubWebhook()\n webhook.event = self.request.headers['X-Github-Event']\n webhook.content = self.request.body\n webhook.put()\n\nclass WebhookFetch(webapp2.RequestHandler):\n def get(self):\n number = self.request.get('number')\n event = self.request.get('event')\n if len(number) == 0:\n number = 5\n else:\n number = int(number)\n if len(event) == 0:\n event = 'all'\n webhooks = GithubWebhook.fetch_by_event(event, number)\n hooklist = []\n for webhook in webhooks:\n hooklist.append(tools.serialize_dict(webhook.to_dict()))\n self.response.headers['Content-Type'] = 'application/json'\n response = {\n 'status': 0,\n 'message': 'Successfully fetching GitHub Webhooks.',\n 'number': number,\n 'webhooks': hooklist,\n }\n self.response.write(json.dumps(response))\n\ndef latestChanges(number=5):\n event = 'push'\n url = 'http://%s/api/github/webhook/fetch?event=%s&number=%s' % (tools.APP_HOSTNAME, event, number)\n result = urlfetch.fetch(url)\n github_hooks = json.loads(result.content)\n changelist = []\n for hook in github_hooks['webhooks']:\n logging.debug('Hook: %s' % hook)\n changelist.append({\n 'repository': hook['content']['repository']['clone_url'],\n 'author': hook['content']['head_commit']['author']['name'],\n 'commit': hook['content']['head_commit']['message'],\n 'url': hook['content']['head_commit']['url'],\n 'date': hook['date'],\n })\n\n return changelist","sub_path":"github/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"68548404","text":"import requests\n\n\nTOKEN = \"AQAAAAAJ2GhsAAT-oojqytr3qUIRrN4vkclmjuQ\"\n\n\nclass MetrikaError(Exception):\n pass\n\n\nclass YaMetrikaInfo:\n\n METRIKS = {\n \"visits\": \"ym:s:visits\",\n \"pageviews\": \"ym:s:pageviews\",\n \"users\": \"ym:s:users\"\n }\n\n def __init__(self, token):\n \"\"\"\n Инициализация экземпляра класса информатора Яндекс-метрики\n :param token: токен Яндекс-метрики\n \"\"\"\n self.token = token\n\n @property\n def available_counters(self):\n \"\"\"\n Получить словарь досупных счетчиков в формате [имя счетчика]: [id счетчика]\n :return: словарь досупных счетчиков\n \"\"\"\n # Словарь результатов\n ans = {}\n\n # URL запроса\n URL = \"https://api-metrika.yandex.ru/management/v1/counters\"\n\n # Параметры — только токен\n params = {\n \"oauth_token\": TOKEN\n }\n\n # Получаем доступные счетчики и заполняем словарь результатов\n req = requests.get(URL, params=params).json()\n for counter in req[\"counters\"]:\n ans[counter[\"name\"]] = counter[\"id\"]\n\n return ans\n\n def get_data(self, *args):\n \"\"\"\n Получить информацию о визитах (передать \"visits\"), просмотрах (передать \"pageviews\") и посетителях (передать \"users\").\n :param args: интересующая метрика (\"visits\" и/или \"pageviews\" и/или \"users\")\n :return: словарь в формате [имя счетчика]: [данные счетчика]\n [данные счетчика]: словарь в формате [дата]: [метрическая информация]\n [метрическая информация]: словарь в формате [метрика]: [значение метрики]\n \"\"\"\n\n # Словарь результатов\n res = {}\n\n # URL запроса\n URL = \"https://api-metrika.yandex.ru/stat/v1/data\"\n\n # Формирование строки метрик\n try:\n metrics = \", \".join(list(map(lambda x: self.METRIKS[x], args)))\n except KeyError:\n raise MetrikaError(\"Error in metrik sring:\", *args)\n\n # Получаем словарь доступных счетчиков\n counters = self.available_counters\n\n # Для каждого из счетчиков получаем указанные метрики, сгруппированные по дате\n for name, id in counters.items():\n params = {\n \"oauth_token\": TOKEN,\n \"ids\": id,\n \"metrics\": metrics,\n \"dimensions\": \"ym:s:date\"\n }\n req = requests.get(URL, params=params).json()\n res_data = {}\n\n # Формируем словарь результатов для счетчика и добавляем в словать общих результатов\n for data in req[\"data\"]:\n res_metrics = {}\n for index, metric in enumerate(args):\n res_metrics[metric] = int(data[\"metrics\"][index])\n res_data[data[\"dimensions\"][0][\"name\"]] = res_metrics\n res[\"Counter \\\"{}\\\"\".format(name)] = res_data\n\n return res\n\n\n# Пример использования\nif __name__ == \"__main__\":\n ym = YaMetrikaInfo(TOKEN)\n import pprint\n pprint.pprint(ym.get_data(\"visits\", \"pageviews\", \"users\"))","sub_path":"les3.4/ya.py","file_name":"ya.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"601046254","text":"import Factoria \n\nif __name__ == '__main__': \n\tmi_factoria = Factoria.Factoria() \t\n\n\t#Factoria, crea a un instrumento! \n\tinstrumentoMusical1 = mi_factoria.get_instrumentoMusical('Percusión', 'Plata', 'DrumWorkShop', '2500\tDlls', '2m','8') \n\t#se ha creado un instrumentoMusical tipo Batería \n\tprint (instrumentoMusical1) \t\n\t# print instrumentoMusical.get_tipo() \n\t# print instrumentoMusical.get_marca()\n\n\t#Factoria, crea a un instrumento! \n\tinstrumentoMusical2 = mi_factoria.get_instrumentoMusical('Cuerdas', 'Café', 'Fender', '1500 Dlls', '135cm','6') \n\t#se ha creado un instrumentoMusical tipo Guitarra\n\tprint (instrumentoMusical2) \t\n\n\t#Factoria, crea a un instrumento! \n\tinstrumentoMusical3 = mi_factoria.get_instrumentoMusical('Cuerdas', 'Negro con Blanco', 'Gibson', '1350 Dlls','129cm','6') \n\t#se ha creado un instrumentoMusical tipo Guitarra\n\tprint (instrumentoMusical3) \n\n\t#Factoria, crea a un instrumento! \n\tinstrumentoMusical4 = mi_factoria.get_instrumentoMusical('Cuerdas', 'Rojo con Blanco', 'Fender', '1800 Dlls', '142cm','6') \n\t#se ha creado un instrumentoMusical tipo Guitarra\n\tprint (instrumentoMusical4) \n\n\t#Factoria, crea a un instrumento! \n\tinstrumentoMusical5 = mi_factoria.get_instrumentoMusical('Percusión', 'Azul', 'Ludwig', '2335 Dlls', '145cm','12') \n\t#se ha creado un instrumentoMusical tipo Batería \n\tprint (instrumentoMusical5)\n\n\tinstrumentoMusical1.afinarB()\n\tinstrumentoMusical1.tocarB()\n\n\tinstrumentoMusical2.afinarP()\n\tinstrumentoMusical2.tocarP()\n\n\tinstrumentoMusical3.afinarP()\n\tinstrumentoMusical3.tocarP()\n\n\tinstrumentoMusical4.afinarP()\n\tinstrumentoMusical4.tocarP()\n\n\tinstrumentoMusical5.afinarB()\n\tinstrumentoMusical5.tocarB()","sub_path":"Ago-Dic-2018/Claudia Seca/PrimerParcial/Main_MU.py","file_name":"Main_MU.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"441214610","text":"'''\n45のプログラムを改変し,述語と格パターンに続けて項(述語に係っている\n文節そのもの)をタブ区切り形式で出力せよ.45の仕様に加えて,\n以下の仕様を満たすようにせよ.\n\n・項は述語に係っている文節の単語列とする(末尾の助詞を取り除く必要はない)\n・述語に係る文節が複数あるときは,助詞と同一の基準・順序でスペース区切りで並べる\n\n「ジョン・マッカーシーはAIに関する最初の会議で人工知能\nという用語を作り出した。」という例文を考える. \nこの文は「作り出す」という1つの動詞を含み,\n「作り出す」に係る文節は「ジョン・マッカーシーは」,「会議で」,「用語を」\nであると解析された場合は,次のような出力になるはずである.\n\n作り出す\tで は を\t会議で ジョンマッカーシーは 用語を\n\n'''\n\nfrom knock41 import get_chunk_sentences\nfrom collections import defaultdict\n\nsentences = get_chunk_sentences()\nf = open(\"knock46_output.txt\", \"w\") # インデント減らしたくなったからwith open やめてみた。\nfor sentence in sentences:\n for chunk in sentence:\n verbs = [a_mor.base for a_mor in chunk.morphs if a_mor.pos == \"動詞\"]\n if not verbs:\n continue\n pps_candidate = [int(a_srcs) for a_srcs in chunk.srcs]\n pps_chunks_list = []\n for i in pps_candidate:\n list1 = [a_mor.base for a_mor in sentence[i].morphs if a_mor.pos == \"助詞\"]\n if not list1:\n continue\n for pp in list1:\n pps_chunks_list += [(pp, \"\".join([a_mor.surface for a_mor in sentence[i].morphs if a_mor.pos != \"記号\"]))]\n pps_chunks_list.sort()\n if pps_chunks_list:\n str1 = \" \".join([i[0] for i in pps_chunks_list])\n str2 = \" \".join([i[1] for i in pps_chunks_list])\n f.write(f\"{verbs[0]}\\t{str1}\\t{str2}\\n\")\nf.close()","sub_path":"kazuma/chapter05/knock46.py","file_name":"knock46.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"114199910","text":"import logging\nimport timeit\nimport pandas as pd\nfrom typing import *\n\nfrom sbsp_general.general import except_if_not_in_set, get_value\nfrom sbsp_io.general import write_string_to_file\n\nlogger = logging.getLogger(__name__)\n\n\nclass Timer:\n seconds_to_unit_multiplier = {\n \"ms\": 60.0,\n \"s\": 1.0,\n \"m\": 1 / 60.0,\n \"h\": 1 / 3600.0\n }\n\n def __init__(self):\n self._timers = dict()\n\n def start(self, name):\n # type: (str) -> None\n\n if self._has_finished(name):\n logger.warning(\"Timer '{}' has already completed. Restarting...\".format(name))\n elif self._has_started(name):\n logger.warning(\"Timer '{}' has already started. Restarting...\".format(name))\n\n self._timers[name] = {\n \"begin\": timeit.default_timer(),\n \"end\": None\n }\n\n def finish(self, name, unit=\"s\"):\n # type: (str, str) -> float\n except_if_not_in_set(unit, Timer.seconds_to_unit_multiplier.keys())\n\n if not self._has_started(name):\n raise ValueError(\"Cannot finish timer '{}'. It hasn't started.\".format(name))\n\n self._timers[name][\"end\"] = timeit.default_timer()\n return self.elapsed_time(name, unit)\n\n def elapsed_time(self, name, unit):\n # type: (str, str) -> float\n \"\"\"Checks if a timer has already been completed\"\"\"\n if not self._has_started(name):\n raise ValueError(\"Cannot compute elapsed time. Timer '{}' has not started.\".format(name))\n if not self._has_finished(name):\n raise ValueError(\"Cannot compute elapsed time. Timer '{}' has not finished.\".format(name))\n\n try:\n return Timer._from_seconds_to_unit(self._timers[\"end\"] - self._timers[\"begin\"], unit)\n except ValueError as e:\n raise e\n\n def to_csv(self, pf_csv, **kwargs):\n # type: (str, Dict[str, Any]) -> None\n\n out = self.to_string(**kwargs)\n write_string_to_file(out, pf_csv)\n\n def to_string(self, **kwargs):\n # type: (Dict[str, Any]) -> str\n\n order = get_value(kwargs, \"order\", \"ascending\", valid_choices={\"ascending\", \"descending\"})\n order_by = get_value(kwargs, \"order_by\", \"start\", valid_choices={\"start\", \"end\"})\n\n df = pd.DataFrame(\n [{\n \"Name\": name, **self._timers[name]\n } for name in self._timers.keys()]\n )\n\n ascending = True if order == \"ascending\" else False\n df.sort_values(order_by, ascending=ascending, inplace=True)\n\n return df.to_string(index=False)\n\n def _has_started(self, name):\n # type: (str) -> bool\n \"\"\"Checks if a timer has already been started (whether or not it has completed)\"\"\"\n return name in self._timers\n\n def _has_finished(self, name):\n # type: (str) -> bool\n \"\"\"Checks if a timer has already been completed\"\"\"\n return name in self._timers and self._timers[name][\"end\"] is not None\n\n @staticmethod\n def _from_seconds_to_unit(seconds, unit):\n # type: (float, str) -> float\n except_if_not_in_set(unit, Timer.seconds_to_unit_multiplier.keys())\n return seconds * Timer.seconds_to_unit_multiplier[unit]\n","sub_path":"code/python/lib/sbsp_general/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64368394","text":"\"\"\"\nHere: https://leetcode.com/problems/longest-happy-prefix/\nA string is called a happy prefix if is a non-empty prefix which is also a suffix (excluding itself).\n\nGiven a string s, return the longest happy prefix of s. Return an empty string \"\" if no such prefix exists.\n\n\nExample 1:\n\nInput: s = \"level\"\nOutput: \"l\"\nExplanation: s contains 4 prefix excluding itself (\"l\", \"le\", \"lev\", \"leve\"), and suffix (\"l\", \"el\", \"vel\", \"evel\"). The largest prefix which is also suffix is given by \"l\".\nExample 2:\n\nInput: s = \"ababab\"\nOutput: \"abab\"\nExplanation: \"abab\" is the largest prefix which is also suffix. They can overlap in the original string.\nExample 3:\n\nInput: s = \"leetcodeleet\"\nOutput: \"leet\"\nExample 4:\n\nInput: s = \"a\"\nOutput: \"\"\n \n\nConstraints:\n\n1 <= s.length <= 105\ns contains only lowercase English letters.\n\n\"\"\"\n\n\ndef longestPrefix(s: str) -> str:\n n = len(s)\n for i in range(n - 2, -1, -1):\n prefix = s[:i + 1]\n suffix = s[n - len(prefix):]\n if prefix == suffix:\n return prefix\n return ''\n\n\nif __name__ == \"__main__\":\n # taking the input\n s = input(\"Enter word: \")\n print(longestPrefix(s))\n\n\"\"\"\nInputs\ns = \"level\"\nOutput: \"l\"\n\ns = \"ababab\"\nOutput: \"abab\"\n\ns = \"leetcodeleet\"\nOutput: \"leet\"\n\"\"\"\n","sub_path":"Python/Cp/LongestHappyPrefix.py","file_name":"LongestHappyPrefix.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6549815","text":"from django.urls import path\nfrom app.views import ProfileView, EditView, CandidateListView\n\napp_name = 'app'\n\nurlpatterns = [\n path('', ProfileView.as_view()),\n path('profile/', ProfileView.as_view(), name = 'profile'),\n path('profile/edit', EditView.as_view(), name = 'edit'),\n path('list/', CandidateListView.as_view(), name = 'list'),\n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341757840","text":"import torch.nn.functional as F\nfrom torch import nn\nfrom tcn import TemporalConvNet\nfrom torch.autograd import Variable\nimport torch.onnx\nimport onnx\n\nclass TCN(nn.Module):\n def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):\n super(TCN, self).__init__()\n self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)\n self.linear = nn.Linear(num_channels[-1], output_size)\n\n def forward(self, inputs):\n \"\"\"Inputs have to have dimension (N, C_in, L_in)\"\"\"\n y1 = self.tcn(inputs) # input should have dimension (N, C, L)\n o = self.linear(y1[:, :, -1])\n return F.log_softmax(o, dim=1)\n\nif __name__ == \"__main__\":\n # onnx\n input_channels = 1\n n_classes = 10\n channel_sizes = [25, 25, 25, 25]\n kernel_size = 7\n dropout = 0.1\n dummy_input = Variable(torch.randn(10,1,784))\n model = TCN(input_channels, n_classes, channel_sizes, kernel_size, dropout)\n torch.onnx.export(model, dummy_input, \"tcn.onnx\", verbose=True)\n\n print(\"load model\")\n # #load model\n # model_new = onnx.load(\"tcn.onnx\")\n # # Check that the IR is well formed\n # onnx.checker.check_model(model_new)\n # # Print a human readable representation of the graph\n # onnx.helper.printable_graph(model_new.graph)\n\n #############\n # from onnx_tf.backend import prepare\n # import numpy as np\n # #############\n #\n # model = onnx.load(\"tcn.onnx\")\n # tf_rep = prepare(model)\n #\n # img = np.load(\"./assets/image.npz\")\n # output = tf_rep.run(img.reshape([1, 1, 28, 28]))\n\n from pytorch2keras import pytorch_to_keras\n\n import numpy as np\n input_np = np.random.uniform(0, 1, (10, 1, 784))\n input_var = Variable(torch.FloatTensor(input_np))\n\n\n k_model = pytorch_to_keras(model, input_var, [(784, 1)], verbose=True)\n\n # from pytorch2keras.converter import pytorch_to_keras\n #\n # # we should specify shape of the input tensor\n # k_model = pytorch_to_keras(model, input_var, [(None, None,)], verbose=True,change_ordering=True)\n\n # import onnx\n # from onnx2keras import onnx_to_keras\n #\n # # Load ONNX model\n # onnx_model = onnx.load('tcn.onnx')\n #\n # # Call the converter (input - is the main model input name, can be different for your model)\n # k_model = onnx_to_keras(onnx_model, ['input'],change_ordering=True)\n\n print('2')\n\n\n\n\n\n","sub_path":"TCN/mnist_pixel/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"572533289","text":"# 마지막 테스트 파이썬\nimport json\n\n# print(dic_mcu)\n# with open(\"./data/mcu_movies.json\", \"w\", encoding=\"UTF-8\") as mcu_list:\n# json.dump(dic_mcu, mcu_list, ensure_ascii=False)\n\nwith open(\"./mcu_movies.json\", \"r\", encoding=\"UTF-8\") as mcu_list:\n dic_mcu = json.load(mcu_list)\n\n# 문제 1번\n# 페이즈가 2인 마블 시네마틱 유니버스 영화면 뽑기\ndef Phase(valuse):\n for movie in dic_mcu:\n if valuse == movie[\"시리즈\"]:\n print(\"{} ( {} )\".format(movie[\"영화명\"],movie[\"개봉일\"]))\n\nPhase(\"페이즈2\")\n\n# 문제 2번\n# 박스오피스가 450000000 이상인 영화들의 감독이름 리스트와 전체 합계금액, 평균 박스오피스 구하기\n\n\n\ndef pro_list():\n count=0\n a=[]\n dal=0\n for movie in dic_mcu:\n if movie[\"박스오피스\"]>=450000000:\n a.append(movie[\"감독\"])\n dal+=movie[\"박스오피스\"]\n count+=1\n print(\"감독 리스트:\\n\",list(set(a)))\n print(f\"총 박스오피스 합계 ${dal:,d}\")\n print(f\"평균 박스오피스 ${int(dal/count):,d}\")\n\npro_list()","sub_path":"base_test_app.py","file_name":"base_test_app.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"505887035","text":"from bax.env import bandit_sae_public as bsae\nfrom bax.env import bandit_ucb_public as bucb \n\nimport numpy as np\nimport pickle\nimport ray\nimport time \nfrom tqdm import tqdm\n\n# \n\nALGO = {\n \"sae\" : bsae,\n \"ucb\" : bucb\n}\n\n\ndef train_helper_for_horizon(prior=None, true_best_arm=1,\n Ts=[2500], num_runs=2,\n checkpoint_freq=5, checkpoint=None, \n alpha=0, algo='sae', ci_fn=None):\n '''\n Helper function for runing multiple demonstrators on multiple runs\n \n Return:\n results : dict('gaps', 'regret', 'seed')\n '''\n \n algorithm = ALGO[algo]\n \n start_time = time.time()\n gaps_est = []\n seed_order = []\n regret = []\n tidx = 0\n seed = 88\n\n for T in tqdm(Ts):\n tidx += 1\n id_refs = []\n gaps_est_one_run = []\n num_good_demo = 0\n seed_one_run = []\n regret_one_run = []\n \n while num_good_demo < num_runs:\n demos_to_run = num_runs - num_good_demo\n for newseed in np.arange(demos_to_run):\n seed += 1\n demonstrator = algorithm.Demonstrator(prior=prior, T=T, seed=seed, ci_fn=ci_fn(alpha))\n id_ref = algorithm.run_.remote(demonstrator)\n id_refs.append(id_ref)\n\n while len(id_refs) > 0:\n ready, not_ready = ray.wait(id_refs)\n if len(ready) > 0:\n is_valid, id_gaps, obj = ray.get(ready[0])\n if is_valid:\n gaps_est_one_run.append(np.array(id_gaps))\n seed_one_run.append(obj.seed)\n regret_one_run.append(np.array(obj.regret))\n num_good_demo += 1\n id_refs = not_ready\n\n\n argsort = np.argsort(seed_one_run)\n # sort by some order to make sure the results are reproducible\n gaps_est.append(np.array(gaps_est_one_run)[argsort])\n seed_order.append(np.sort(seed_one_run))\n regret.append(np.array(regret_one_run)[argsort])\n \n if checkpoint and tidx%checkpoint_freq == 0:\n basepath='results/'\n with open('{}alpha{:.2f}_maxT{}_numrun{}_{}'.format(basepath, alpha, T, num_runs, checkpoint), 'wb') as f:\n pickle.dump(\n {\"Ts\": Ts[:tidx],\n \"gaps\": gaps_est,\n \"regret\": regret,\n \"seed_order\": seed_order}, f)\n gaps_est = np.array(gaps_est)\n seed_order = np.array(seed_order)\n regret = np.array(regret)\n\n return {\n \"gaps\": gaps_est,\n \"regret\": regret,\n \"seed_order\": seed_order,\n }\n","sub_path":"bax/utils/.ipynb_checkpoints/trainer_public-checkpoint.py","file_name":"trainer_public-checkpoint.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"147895414","text":"import keyword\n\n\"\"\"\n'False', 'None', 'True', 'and', 'as', 'assert', 'async', 'await', \n'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', \n'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', \n'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield'\n\n\"\"\"\nprint(123)\n\nprint(\"\"\"1\n2\n3\n4\n5\n6\n7\n\"\"\")\n\nprint(\"1\"\n \"8\"\n \"9\"\n \"0\")\n\nprint(\"ni hao\")\n\nprint(keyword.kwlist)\n\n\"\"\"\n格式化代码 ctrl + shift + alt + L\n\"\"\"\nname = input(\"请输入名字:\")\nage = input(\"请输入年龄:\")\n","sub_path":"01-Python/11-lemon tree/class21_01day/class21_001.py","file_name":"class21_001.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470594504","text":"from django.shortcuts import render\nfrom testapp.models import *\nfrom django.core.paginator import Paginator,PageNotAnInteger,EmptyPage\n\n# Create your views here.\ndef home(request):\n return render (request,'testapp/home.html')\n\ndef hybjobs1(request):\n jobs_list=hybjobs.objects.order_by('date')\n paginator=Paginator(jobs_list,10)\n page_number=request.GET.get('page')\n try:\n jobs_list=paginator.page(page_number)\n except PageNotAnInteger:\n jobs_list=paginator.page(1)\n except EmptyPage:\n jobs_list=paginator.page(paginator.num_pages)\n\n # mydict={'jobs_list':jobs_list}\n return render (request,'testapp/hybjobs.html',{'jobs_list':jobs_list})\n\ndef blorejobs1(request):\n jobs_list=blorejobs.objects.order_by('date')\n mydict={'jobs_list':jobs_list}\n return render (request,'testapp/blorejobs.html',context=mydict)\n\ndef chennaijobs1(request):\n jobs_list=chennaijobs.objects.order_by('date')\n mydict={'jobs_list':jobs_list}\n return render (request,'testapp/chennaijobs.html',context=mydict)\n\ndef punejobs1(request):\n jobs_list=punejobs.objects.order_by('date')\n mydict={'jobs_list':jobs_list}\n return render (request,'testapp/punejobs.html',context=mydict)\n","sub_path":"vjproject/testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"572306244","text":"#!python3\n\nimport io\nimport numpy as np\nfrom scipy.sparse import coo_matrix\n\ndef argmin(t):\n res = 0\n val = t[0]\n for i in range(len(t)):\n if t[i]0:\n wordcount[i] += 1\nwordcount = wordcount + 1\n\nidfarr = np.log(numdocs/wordcount)\ntfidfarr = list()\nfor wvec in vectors:\n temp = np.multiply(wvec, idfarr)\n tfidfarr.append(temp)\n\nflag = True\nk = 10\nindex = 0\nmaxiter = 100\nmaxtheta = 0.01\nmaxdelta = 1\nclusters = [list() for i in range(k)]\nmemlist = [len(i) for i in clusters]\ncentchoice = list(np.random.choice(range(numdocs), k, replace=False))\ncenters = [tfidfarr[i] for i in centchoice]\nwhile flag:\n for wvec in tfidfarr:\n dist = list()\n for cent in centers:\n dist.append(angle(wvec, cent))\n clusters[argmin(dist)].append(wvec)\n newcenters = [get_centroid(i) for i in clusters]\n newmemlist = [len(i) for i in clusters]\n theta = change_cent(centers, newcenters)\n delta = change_clust(memlist, newmemlist)\n centers = [np.copy(newcenters[i]) for i in range(k)]\n memlist = newmemlist[:]\n index += 1\n print(index, theta, delta)\n if (index>maxiter) or (delta 5:\n return 5\n elif user_input < 1:\n return 1\n elif user_input % 1 != 0:\n rounding_loop = \"\"\n while rounding_loop == \"\":\n rounding = input(\"Would you like to round up or down?: \")\n if rounding == \"up\":\n user_input = math.ceil(user_input)\n rounding_loop = 1\n elif rounding == \"down\":\n user_input = math.floor(user_input)\n rounding_loop = 1\n else:\n print(\"Please enter \\\"up\\\" or \\\"down!!!\\\"\")\n else:\n return user_input\n\n input_loop = 1\n except ValueError:\n print(\"Only numbers!!!\")\n\n return user_input\n\n# Converted XLSX into CSV files so that they can be opened by python\n\ngenres = open(\"02_genres.csv\")\nadjectives = open(\"02_adjectives.csv\")\n\ncsv_genres = csv.reader(genres)\ncsv_adjectives = csv.reader(adjectives)\n\n# Asks for the rating of the book, recycled\ntitle = return_title(\"Please enter the title\")\nrating = return_input(\"Please enter the rating: \")\n\n# Creates a dictionary that retains the order of the rows from spreadsheet by assigning numbers as the key to make\n# it easy to print in the same order the entries were added since this version of python doesn't retain the order\n# of dictionaries\n\n# Dictionary contains the name of the genre and the adjective group used for the genre\n\ngenre_dict = {}\nnumbering = -1\nfor row in csv_genres:\n genre_dict[str(numbering)] = [row[1].lower(), row[0]]\n numbering += 1\n\n# Removes irrelevant lines in the spreadsheet since I didn't want to edit the spreadsheet\n\ngenre_dict.pop(\"-1\")\ngenre_dict.pop(\"0\")\n\n# Prints all options with padding zeroes for readability\n\nfor x in range(len(genre_dict)):\n print(str(x+1).zfill(2), \": \", genre_dict[str(x+1)][0])\n\n# Searches for the user input as a key in the dictionary or as the value for name\n# Converts to lower case and strips padding zeroes to increase the number of valid inputs\n\ngenre_group = \"\"\nwhile genre_group == \"\":\n selected_genre = ((input(\"Please select one of the genres above by \"\n \"typing the number or name listed: \")).lower()).lstrip(\"0\")\n\n # Sets the genre group to the entry attached to the key if the key is found\n\n if selected_genre in genre_dict.keys():\n genre_group = genre_dict[selected_genre][1]\n break\n\n if genre_group == \"\":\n\n # Cycles through the dictionary and sets the genre group attached to the genre if the input matches a genre name\n # on the list\n\n for x in range(len(genre_dict)):\n if genre_dict[str(x + 1)][0] == selected_genre:\n genre_group = genre_dict[str(x + 1)][1]\n break\n else:\n print(\"Please enter one of the options!!!\")\nadjective_list = []\n\n# Sets tags allowed for adjectives based on the rating given by user\n\nif rating == 3:\n rating_adjectives = [\"neutral\"]\nelif rating == 4:\n rating_adjectives = [\"neutral\", \"positive\"]\nelif rating == 2:\n rating_adjectives = [\"neutral\", \"negative\"]\nelif rating == 5:\n rating_adjectives = [\"positive\"]\nelse:\n rating_adjectives = [\"negative\"]\n\n# Finds all adjectives that fit either all genre groups or fit the selected genre group that have either the\n# positive, neutral, or negative tag based on the rating provided by the user\n\nfor x in csv_adjectives:\n if x[1] == \"0\" or x[1] == genre_group:\n if x[0] in rating_adjectives:\n adjective_list.append(x)\nfor x in adjective_list:\n print(x)\nadjective1 = random.choice(adjective_list)\nadjective2 = random.choice(adjective_list)\nif adjective1[0] != adjective2[0]:\n conjunction = \"but\"\nelse:\n conjunction = \"and\"\nsummary = input(\"summary?\")\nif summary != \"\":\n rng_number = random.randint(1, 2)\n\n if rng_number == 1:\n print(\"{} {} {}, {} is {}\".format(adjective1[2], conjunction, adjective2[2], title, summary))\n else:\n setting = input(\"Where does the book take place?\")\n print(\"Set in {}, {} is the story of {}\".format(setting, title, summary))\n\nelse:\n print(\"{} is both {} and {}\".format(title, adjective1, adjective2))\n\n","sub_path":"component_6b_fill_random_skeleton.py","file_name":"component_6b_fill_random_skeleton.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496438951","text":"# Copyright 2021, Yahoo\n# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms\n\nimport importlib\nimport warnings\nfrom typing import Any, Optional, Tuple\n\nfrom pydantic import validate_arguments\n\n\nclass DependencyUtils:\n \"\"\"\n DependencyUtils provides utility methods to handle and import optional dependencies\n in the YChaos package.\n \"\"\"\n\n @classmethod\n def import_module(\n cls, name: str, message: str = None, raise_error: bool = True, warn: bool = True\n ) -> Optional[Any]:\n \"\"\"\n Calling this method with a module name is similar to calling\n `import ...`. This can be used to import optional dependencies in the package.\n\n Args:\n name: Module name\n message: Error message to be printed on console when the import fails\n raise_error: Raise an error if the import fails\n warn: Raise warning if the import fails\n\n Raises:\n ImportError: when `raise_error` is True and the module is not present\n\n Returns:\n Optional Module\n \"\"\"\n try:\n module = importlib.import_module(name)\n except ImportError as import_error:\n if not message:\n message = f\"Dependency {name} is not installed.\"\n\n if warn:\n warnings.warn(message)\n\n if raise_error:\n raise ImportError(message) from None\n else:\n return None\n\n return module\n\n @classmethod\n @validate_arguments\n def import_from(\n cls,\n module_name: str,\n attrs: Tuple[str, ...],\n message: str = None,\n raise_error: bool = True,\n warn: bool = True,\n ) -> Tuple[Any, ...]:\n \"\"\"\n Calling this method with a module and an attribute is similar to calling\n `from ... import ...`. This can be used to import optional dependency in the package.\n\n Examples:\n\n ```python\n from ychaos.utils.dependency import DependencyHandler\n BaseModel, Field = DependencyHandler.import_from(\"pydantic\", (\"BaseModel\", \"Field\"))\n ```\n\n The above code snippet is same as\n ```python\n from pydantic import BaseModel, Field\n ```\n\n Args:\n module_name: Valid Python Module name\n attrs: Tuple of attribute names from the module\n message: message to be printed in case of an error\n raise_error: Raise an error if the import fails\n warn: Throw a warning if the import fails\n\n Raises:\n ImportError: when `raise_error` is true and `attr_name` cannot be imported from the `module_name`\n\n Returns:\n An attribute from module_name if exists, None otherwise\n \"\"\"\n module = cls.import_module(\n name=module_name, message=message, raise_error=raise_error, warn=warn\n )\n\n if not module:\n return (None,) * len(attrs)\n else:\n _attr_list = list()\n for _attr_name in attrs:\n try:\n attr = getattr(module, _attr_name)\n _attr_list.append(attr)\n except AttributeError as attr_error:\n if not message:\n message = f\"cannot import {_attr_name} from {module_name}\"\n\n if warn:\n warnings.warn(message)\n if raise_error:\n raise ImportError(message) from None\n else:\n _attr_list.append(None)\n return tuple(_attr_list)\n","sub_path":"src/ychaos/utils/dependency.py","file_name":"dependency.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"314961367","text":"#import\n\nimport yaml\n#from coordinates_generator import CoordinatesGenerator\nimport cv2\nimport numpy as np\nfrom colors import *\n\n#from TakePoints import TakePoints\n\nimport imutils\nfrom imutils.video import FPS\nimport time\nimport os\nimport math\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\n\n#import DetectChars\n#import DetectPlates\n\n\nimport glob\n\n#global\nInput_Video = \"video/16.mp4\"\n\n#background substraction 을 위한 이미지.\nfirst_frame = cv2.imread(\"image/16.png\")\nfirst_frame = cv2.resize(first_frame, (1920, 1080), interpolation=cv2.INTER_CUBIC)\n\n#cut = first_frame.copy()\n#cut = first_frame[300:1020, 360:1640]\n#first_frame = cut\n\nfirst_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)\nfirst_gray = cv2.GaussianBlur(first_gray, (5, 5), 0)\n\n#map = cv2.imread(\"images/map_2.png\")\n\nBasePath = \"yolo-coco\"\nBaseConfidence = 0.3 #0.3\nBase_threshold = 0.2 #0.3\n\n#initialize a dictionary that maps strings to their corresponding\n# OpenCV object tracker implementations\nOPENCV_OBJECT_TRACKERS = {\n\t\"csrt\": cv2.TrackerCSRT_create, ##Recomanded\n\t\"kcf\": cv2.TrackerKCF_create,\n\t\"boosting\": cv2.TrackerBoosting_create,\n\t\"mil\": cv2.TrackerMIL_create,\n\t\"tld\": cv2.TrackerTLD_create,\n\t\"medianflow\": cv2.TrackerMedianFlow_create, ## FAST\n\t\"mosse\": cv2.TrackerMOSSE_create\n}\n\n# grab the appropriate object tracker using our dictionary of\n# OpenCV object tracker objects\n#tracker = OPENCV_OBJECT_TRACKERS[\"csrt\"]()\n\n# initialize the bounding box coordinates of the object we are going to track\n\ndef main():\n fps = FPS().start()\n #writer = None\n\n cap = cv2.VideoCapture(Input_Video)\n\n YOLOINIT()\n\n ##=========================================================\n\n ##View 1\n f_num = 0\n Detecting_cnt_1 = 0\n RED_cnt_1 = 0\n BLUE_cnt_1 = 0\n initBB_1 = None\n tracker_1 = None\n\n Detecting_cnt_2 = 0\n RED_cnt_2 = 0\n BLUE_cnt_2 = 0\n initBB_2 = None\n tracker_2 = None\n\n while(cap.isOpened()):\n f_num =f_num +1\n print(\"F : \", f_num)\n\n (grabbed, frame) = cap.read()\n\n #cutImg = frame.copy()\n #cutImg = frame[300:1020, 360:1640]\n #frame = cutImg\n\n\n\n if f_num % 2== 0 and f_num>0:\n\n #======================================================\n #Background Substraction\n #tracker 에 대해서 frame 원본이미지가 아니라, Background Substracted 된 영상에 트래커를 부착하여, 배경에 트래커가 남지 않도록 구현\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0)\n\n difference = cv2.absdiff(first_gray, gray_frame)\n _, difference = cv2.threshold(difference, 25, 255, cv2.THRESH_BINARY)\n\n mask3 = cv2.cvtColor(difference, cv2.COLOR_GRAY2BGR) # 3 channel mask\n Substracted = cv2.bitwise_and(frame, mask3)\n #======================================================\n\n layerOutputs, start, end = YOLO_Detect(frame)\n\n # 3.YOLO_BOX_INFO(layerOutputs,BaseConfidence,Base_threshold))\n idxs, boxes, classIDs, confidences = YOLO_BOX_INFO(frame, layerOutputs, BaseConfidence, Base_threshold)\n\n # 4.검출된 화면의 X,Y 좌표 가져온다.\n # 검출됨 차량 수 만큼 좌표 가져옴\n Vehicle_x = []\n Vehicle_y = []\n Vehicle_w = []\n Vehicle_h = []\n\n #차량 포인트 가져옴\n Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h = Position(idxs, classIDs, boxes, Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h)\n\n #차량 포인트 그리기\n Draw_Points(frame, Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h)\n\n #Parking Zone Counter\n #view1 (인식영역 Y축 +30 ,-30)\n\n\n\n\n #4개의 포인트\n vertices = [[[600, 350], [600, 650], [1250, 650], [1150, 350]]]\n\n\n tracker_1, initBB_1, RED_cnt_1, BLUE_cnt_1 = Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB_1, frame, tracker_1, Substracted,\\\n RED_cnt_1, BLUE_cnt_1, vertices)\n\n # Red_Line\n cv2.line(frame, (vertices[0][0][0], vertices[0][0][1]), (vertices[0][3][0], vertices[0][3][1]), (0, 0, 255), 2)\n cv2.putText(frame, \"IN Cnt : \" + str(RED_cnt_1), (vertices[0][0][0], vertices[0][0][1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n\n # Blue_Line\n #cv2.line(frame, (vertices[0][1][0], vertices[0][1][1]), (vertices[0][2][0], vertices[0][2][1]), (255, 0, 0), 2)\n #cv2.putText(frame, \"IN Cnt : \" + str(BLUE_cnt_1), (vertices[0][1][0], vertices[0][1][1] + 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)\n\n\n # Detecting Zone\n pts_1 = np.array([[vertices[0][1][0] + int(2 / 3 * (vertices[0][0][0] - vertices[0][1][0])),\n vertices[0][0][1] + int(1 / 3 * (vertices[0][1][1] - vertices[0][0][1]))], \\\n [vertices[0][1][0] + int(1 / 3 * (vertices[0][0][0] - vertices[0][1][0])),\n vertices[0][0][1] + int(2 / 3 * (vertices[0][1][1] - vertices[0][0][1]))], \\\n [vertices[0][3][0] + int(2 / 3 * (vertices[0][2][0] - vertices[0][3][0])),\n vertices[0][3][1] + int(2 / 3 * (vertices[0][2][1] - vertices[0][3][1]))], \\\n [vertices[0][3][0] + int(1 / 3 * (vertices[0][2][0] - vertices[0][3][0])),\n vertices[0][3][1] + int(1 / 3 * (vertices[0][2][1] - vertices[0][3][1]))]], \\\n np.int32)\n\n cv2.polylines(frame, [pts_1], True, (0, 255, 0), 2)\n\n\n\n #프레임 레터박스\n blank_image = np.zeros((64, 1920, 3), np.uint8)\n frame[0:64, 0:1920] = blank_image\n\n frame = cv2.resize(frame, (1280, 720), interpolation=cv2.INTER_CUBIC) #1920, 1080 -> 1280,720\n #Substracted = cv2.resize(Substracted , (1280, 720), interpolation=cv2.INTER_CUBIC)\n\n fps.update()\n fps.stop()\n cv2.putText(frame, \"FPS : \" + \"{:.2f}\".format(fps.fps()), (25, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 255), 2)\n\n cv2.imshow(\"frame\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n return\n\n\n\ndef YOLOINIT():\n\t# load the COCO class labels our YOLO model was trained on\n\tlabelsPath = os.path.sep.join([BasePath, \"coco.names\"])\n\n\tglobal LABELS\n\tLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n\t# derive the paths to the YOLO weights and model configuration\n\tweightsPath = os.path.sep.join([BasePath, \"yolov3-tiny.weights\"])\n\tconfigPath = os.path.sep.join([BasePath, \"yolov3-tiny.cfg\"])\n\n\t# load our YOLO object detector trained on COCO dataset (80 classes)\n\tprint(\"[INFO] loading YOLO from disk...\")\n\tglobal net\n\tnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\n\t# determine only the *output* layer names that we need from YOLO====================================================\n\tglobal ln\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n\t# initialize the video stream, pointer to output video file, and\n\t# frame dimensions\n\n\t(W, H) = (None, None)\n#end YOLOINIT()\n\n\n\ndef YOLO_Detect(frame):\n\t# construct a blob from the input frame and then perform a forward\n\t# pass of the YOLO object detector, giving us our bounding boxes\n\t# and associated probabilities\n\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\tstart = time.time()\n\tlayerOutputs = net.forward(ln)\n\tend = time.time()\n\n\tprint(\"[INFO] {:.6f} seconds\".format(end - start))\n\treturn layerOutputs,start,end\n#end YOLO_Detect()\n\n\ndef YOLO_BOX_INFO(frame,layerOutputs,BaseConfidence,Base_threshold):\n\n\tH, W = frame.shape[:2] ## 1920 x 1080\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\n\t# loop over each of the layer outputs\n\t# loop over each of the layer outputs\n\tfor output in layerOutputs:\n\t\t# loop over each of the detections\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability)\n\t\t\t# of the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > BaseConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to\n\t\t\t\t# the size of the image, keeping in mind that YOLO\n\t\t\t\t# actually returns the center (x, y)-coordinates of\n\t\t\t\t# the bounding box followed by the boxes' width and\n\t\t\t\t# height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top\n\t\t\t\t# and and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates,\n\t\t\t\t# confidences, and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping\n\t# bounding boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, BaseConfidence, Base_threshold)\n\treturn idxs, boxes , classIDs, confidences\n\ndef Position(idxs,classIDs,boxes,Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h):\n\n if len(idxs) > 0:\n # loop over the indexes we are keeping\n for i in idxs.flatten():\n # extract the bounding box coordinates\n ##검출된 이미지가 차량(2) 또는 트럭(7) 인경우에\n if classIDs[i] == 2 or classIDs[i] == 7:\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n\n # 검출된 번호에 맞게 차량의 위치, 크기 정보 대입\n\n Vehicle_x.append(x)\n Vehicle_y.append(y)\n Vehicle_w.append(w)\n Vehicle_h.append(h)\n\n return Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h\n\n\n\ndef Draw_Points(frame,Vehicle_x,Vehicle_y,Vehicle_w,Vehicle_h):\n if len(Vehicle_x) > 0:\n for i in range(0, len(Vehicle_x), 1):\n\n # 보여주기위한 칼라화면\n cv2.circle(frame, (Vehicle_x[i] + int(Vehicle_w[i] / 2), Vehicle_y[i] + Vehicle_h[i]), 5, (0, 255, 0), -1)\n#end func\n\n\ndef Passing_Counter_Zone(Vehicle_x,Vehicle_y,Vehicle_w,Vehicle_h,initBB,frame,tracker,Substracted,RED_cnt,BLUE_cnt,vertices):\n # 1번 카메라에 대해서만 적용\n\n # Detecting Zone\n pts = np.array([[vertices[0][1][0] + int(2 / 3 * (vertices[0][0][0] - vertices[0][1][0])),\n vertices[0][0][1] + int(1 / 3 * (vertices[0][1][1] - vertices[0][0][1]))], \\\n [vertices[0][1][0] + int(1 / 3 * (vertices[0][0][0] - vertices[0][1][0])),\n vertices[0][0][1] + int(2 / 3 * (vertices[0][1][1] - vertices[0][0][1]))], \\\n [vertices[0][3][0] + int(2 / 3 * (vertices[0][2][0] - vertices[0][3][0])),\n vertices[0][3][1] + int(2 / 3 * (vertices[0][2][1] - vertices[0][3][1]))], \\\n [vertices[0][3][0] + int(1 / 3 * (vertices[0][2][0] - vertices[0][3][0])),\n vertices[0][3][1] + int(1 / 3 * (vertices[0][2][1] - vertices[0][3][1]))]], \\\n np.int32)\n\n # 차량 검출시\n for d_num in range(0, len(Vehicle_x)):\n # P 좌표는 프레임에서 디텍팅 포인트 영역\n p_x = Vehicle_x[d_num] + int(Vehicle_w[d_num] / 2)\n p_y = Vehicle_y[d_num] + int(Vehicle_h[d_num])\n\n crosses = 0 # 교점의 개수(짝수개이면 영역 밖에 존재, 홀수개이면 영역 안에 존재)\n for p in range(0, 4): # 항상 사각형 이므로 4\n next_p = (p + 1) % 4\n if (pts[p][1] > p_y) != (pts[next_p][1] > p_y): ##디텍티드 포인트의 Y좌표가 사각형의 두점사이에 존재하면\n\n # atx가 오른쪽 반직선과의 교점이 맞으면 교점의 개수를 증가시킨다,\n atX = int((pts[next_p][0] - pts[p][0]) * (p_y - pts[p][1]) / (\n pts[next_p][1] - pts[p][1]) + pts[p][0])\n if p_x < atX:\n crosses = crosses + 1\n ##텍스트로 인 아웃 여부\n # cv2.putText(frame, str(crosses), (atX, p_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,COLOR_GREEN, 3)\n\n if crosses % 2 == 0: # 영역 밖에 존재하는 경우\n pass\n elif crosses % 2 == 1: # 영역 안에 존재하는 경우\n if initBB is None:\n initBB = (Vehicle_x[d_num], Vehicle_y[d_num], Vehicle_w[d_num], Vehicle_h[d_num])\n # 트래커 활성화\n tracker = cv2.TrackerCSRT_create()\n tracker.init(Substracted, initBB) # 트래커를 원본이미지가 아닌 백그라운드 Substracted 된 이미지에서 트래킹함\n\n\n # 트래커 활성화시 동작\n if initBB is not None:\n # grab the new bounding box coordinates of the object\n (success, box) = tracker.update(Substracted)\n\n # check to see if the tracking was a success\n # 트래킹 성공시\n if success:\n (x, y, w, h) = [int(v) for v in box]\n\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)\n cv2.rectangle(Substracted, (x, y), (x + w, y + h), (0, 255, 255), 2)\n\n Tracking_Xp = x + int(w/2)\n Tracking_Yp = y + h\n\n # Tracking Point 와 Detected Point의 거리가 150픽셀 이하인 경우 매칭및 트래커 박스 재조정\n Matched = False\n Matched_Xp = 0\n Matched_Yp = 0\n\n for i in range(0, len(Vehicle_x), 1):\n\n Vehicle_Xp = Vehicle_x[i] + int(Vehicle_w[i] / 2)\n Vehicle_Yp = Vehicle_y[i] + Vehicle_h[i]\n\n #트래커 포인트와 디텍팅 포인트와의 거리가 150이하인 경우\n if int(math.sqrt(pow(abs(Tracking_Xp-Vehicle_Xp), 2) + pow(abs(Tracking_Yp-Vehicle_Yp), 2))) < 200:\n cv2.line(frame, (Tracking_Xp, Tracking_Yp),\n (Vehicle_Xp, Vehicle_Yp), (125,255,125), 2)\n\n Matched = True\n Matched_Xp=Vehicle_Xp\n Matched_Yp=Vehicle_Yp\n\n #트래커의 박스를 재조정하기위한 Bounding box\n tempBB= (Vehicle_x[i], Vehicle_y[i], Vehicle_w[i], Vehicle_h[i])\n\n #트래커삭제 및 갱신\n tracker = cv2.TrackerCSRT_create()\n tracker.init(Substracted, tempBB) # 트래커를 원본이미지가 아닌 백그라운드 Substracted 된 이미지에서 트래킹함\n break\n\n\n\n # tracker가 영역 밖에 존재하는 경우 - 삭제 (트래커기준이 아니라, 매칭된 디텍티드 포인트를 통해서 카운팅)\n #if (Tracking_Xp < DetectingZone[0]) or Tracking_Xp > (DetectingZone[2]) or Tracking_Yp < (DetectingZone[1]-10) or Tracking_Yp > (DetectingZone[3]+10)or \\\n #(Matched is True and (Matched_Xp < DetectingZone[0]) or Matched_Xp > (DetectingZone[2]) or Matched_Yp < (DetectingZone[1]-10) or Matched_Yp > (DetectingZone[3]+10)):\n\n #매칭이 트루이고, 매칭된 디텍티드 포인트가 영역밖에 존재하는 경우 - 삭제\n if (Matched == True):\n\n\n initBB_xy = (initBB[0] + int(initBB[2] / 2), initBB[1] + initBB[3])\n\n Matched_xy =(Matched_Xp,Matched_Yp)\n\n RED_line_start_xy = (vertices[0][0][0],vertices[0][0][1])\n RED_line_end_xy = (vertices[0][3][0],vertices[0][3][1])\n\n BLUE_line_start_xy = (vertices[0][1][0],vertices[0][1][1])\n BLUE_line_end_xy =(vertices[0][2][0],vertices[0][2][1])\n\n #tracker에서 매칭된 디텍티드 포인트로 변경하여 주석처리\n\n\n if intersect(initBB_xy, Matched_xy, RED_line_start_xy, RED_line_end_xy):\n RED_cnt = RED_cnt + 1\n # initBB,lastBB, tracker 초기화\n cv2.line(frame, (initBB[0] + int(initBB[2] / 2), initBB[1] + initBB[3]),\n (Matched_Xp, Matched_Yp), COLOR_RED, 2)\n initBB = None\n tracker = cv2.TrackerCSRT_create()\n\n if intersect(initBB_xy, Matched_xy, BLUE_line_start_xy, BLUE_line_end_xy):\n BLUE_cnt = BLUE_cnt + 1\n cv2.line(frame, (initBB[0] + int(initBB[2] / 2), initBB[1] + initBB[3]),\n (Matched_Xp, Matched_Yp), COLOR_RED, 2)\n # initBB,lastBB, tracker 초기화\n initBB = None\n tracker = cv2.TrackerCSRT_create()\n\n\n\n return tracker, initBB,RED_cnt, BLUE_cnt\n\n\n\ndef ccw(A,B,C):\n return (C[1]-A[1])*(B[0]-A[0]) > (B[1]-A[1])*(C[0]-A[0])\n\ndef intersect(A, B, C, D):\n return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)\n\nif __name__ == '__main__':\n main()\n\n\n\"\"\"\n # Turn left\n #vertices1 = [[[0, 800], [420, 1080], [700, 460], [320, 460]]]\n vertices1 = [[[100, 1080], [500, 1080], [500, 430], [100, 430]]]\n\n tracker_2, initBB_2, RED_cnt_2, BLUE_cnt_2 = Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB_2, frame, tracker_2, Substracted,\\\n RED_cnt_2, BLUE_cnt_2, vertices1)\n\n # Red_Line\n cv2.line(frame, (vertices1[0][0][0], vertices1[0][0][1]), (vertices1[0][3][0], vertices1[0][3][1]), (0, 0, 255), 2)\n cv2.putText(frame, \"IN Cnt : \" + str(RED_cnt_2), (vertices1[0][0][0], vertices1[0][0][1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n\n # Detecting Zone\n pts_2 = np.array([[vertices1[0][1][0] + int(2 / 3 * (vertices1[0][0][0] - vertices1[0][1][0])),\n vertices1[0][0][1] + int(1 / 3 * (vertices1[0][1][1] - vertices1[0][0][1]))], \\\n [vertices1[0][1][0] + int(1 / 3 * (vertices1[0][0][0] - vertices1[0][1][0])),\n vertices1[0][0][1] + int(2 / 3 * (vertices1[0][1][1] - vertices1[0][0][1]))], \\\n [vertices1[0][3][0] + int(2 / 3 * (vertices1[0][2][0] - vertices1[0][3][0])),\n vertices1[0][3][1] + int(2 / 3 * (vertices1[0][2][1] - vertices1[0][3][1]))], \\\n [vertices1[0][3][0] + int(1 / 3 * (vertices1[0][2][0] - vertices1[0][3][0])),\n vertices1[0][3][1] + int(1 / 3 * (vertices1[0][2][1] - vertices1[0][3][1]))]], \\\n np.int32)\n\n cv2.polylines(frame, [pts_2], True, (0, 255, 0), 2)\n\"\"\"\n\n","sub_path":"test/3_road_16.py","file_name":"3_road_16.py","file_ext":"py","file_size_in_byte":19049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"646553807","text":"import boto3\n\nclass AWSHelper:\n def __init__(self):\n self.client = boto3.client(\n 's3', \n aws_access_key_id='XXXXXXXXXXXXXXXXXXXXX', \n aws_secret_access_key='XXXXXXXXXXXXXXXXXXXXXXXX',\n )\n self.BUCKET_NAME = 'XXXXXXXXXXXXXXXXXXXXXX'\n \n def uploadFile(self, filePath, fileKey):\n try:\n with open(filePath, \"rb\") as f:\n self.client.upload_fileobj(f, self.BUCKET_NAME, fileKey)\n f.close()\n except Exception as e:\n print('AWS File Upload Error:',e)\n \n def presignedUrl(self, fkey):\n try:\n response = self.client.generate_presigned_url('get_object', Params={'Bucket': self.BUCKET_NAME, 'Key': fkey}, ExpiresIn=3600, HttpMethod='GET')\n except Exception as e:\n print('AWS Url Create Error:',e)\n return None\n return response","sub_path":"cvat/apps/engine/awsHelper.py","file_name":"awsHelper.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631322890","text":"# !/user/bin/env python\n# _*_ coding: utf-8 _*_\n# @Time : 2021/5/20 13:48\n# @Author : 王俊\n# @File : read_excel.py\n# @Software : PyCharm\nimport os\nimport openpyxl\nfrom common import DATA_DIR\nfrom numpy.random._common import namedtuple\n\n\nclass ExcelData(object):\n pass\n\n\nclass ReadExcelData(object):\n __instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n cls.__instance = super().__new__(cls)\n return cls.__instance\n\n def __init__(self, f, sheetname=None):\n self.f = os.path.join(DATA_DIR, f)\n self.sheetname = sheetname\n self.open()\n\n def open(self):\n self.workbook = openpyxl.load_workbook(self.f)\n self.sheet = self.workbook.active if self.sheetname is None else self.workbook[self.sheetname]\n\n def save_excel(self):\n self.workbook.save(self.f)\n self.workbook.close()\n\n def get_data(self) -> list:\n \"\"\"\n cases = []\n for row in rows[1:]:\n data = []\n # 循环遍历第一行之外的数据,聚合打包成字典\n for r in row:\n # 将格子中的数据添加到data中\n data.append(r.value)\n # 使用zip函数打包成字典\n case = dict(zip(title, data))\n # 将每条用例添加到整个用例列表\n cases.append(case)\n # 关闭工作簿\n self.close()\n # 返回用例\n return cases\n :return:[{}]\n \"\"\"\n rows = list(self.sheet.rows)\n cases = [dict(zip([row.value for row in rows[0]], [r.value for r in row])) for row in rows[1:]]\n return cases\n\n def get_data_obj(self) -> list:\n \"\"\"\n 采用对象-->属性-->属性值方式进行用例存储\n :return:\n \"\"\"\n rows = list(self.sheet.rows)\n cases = []\n for row in rows[1:]:\n case_obj = ExcelData()\n for k, v in list(zip([row.value for row in rows[0]], [r.value for r in row])):\n setattr(case_obj, k, v)\n cases.append(case_obj)\n return cases\n\n def get_name_tuple_data(self, row=None) -> list:\n self.sheet_head_tuple = tuple(self.sheet.iter_rows(min_row=1, values_only=True))[0]\n self.data_tuple = namedtuple(\"data_tuple\", self.sheet_head_tuple)\n if row is None:\n cases = [self.data_tuple(*_data) for _data in self.sheet.iter_rows(min_row=2, values_only=True)]\n return cases\n else:\n return self.get_specific_case_data(row=row)\n\n def get_specific_case_data(self, row) -> list:\n \"\"\"\n 获取指定行的测试用例\n :param row: 行号\n :return:\n \"\"\"\n if isinstance(row, int) and (1 <= row <= self.sheet.max_row):\n return self.data_tuple(*tuple(self.sheet.iter_rows(min_row=row + 1, max_row=row + 1, values_only=True))[0])\n else:\n raise TypeError(\"行号只能为整数并且大于1!\")\n\n def write_data(self, row, column, value):\n \"\"\"\n 将数据写入excel中\n :param row: 行\n :param column:列\n :param value: 需要写入的值\n :return:\n \"\"\"\n self.sheet.cell(row=row, column=column, value=value)\n self.save_excel()\n\n def close(self):\n self.workbook.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_tb is not None or exc_val is not None:\n print(exc_tb)\n print(exc_val)\n return self.close()\n\n\nif __name__ == '__main__':\n with ReadExcelData(\"order_cases.xlsx\", \"createorder\") as r:\n # r.get_data()\n # r.get_data_obj()\n print(r.get_data())\n # print(r.get_data_obj()[0].data)\n # print(r.get_name_tuple_data())\n","sub_path":"common/read_excel.py","file_name":"read_excel.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"568692085","text":"# -*- coding: utf-8 -*-\n\n# ==============================================================================\n# SBEMimage, ver. 2.0\n# Acquisition control software for serial block-face electron microscopy\n# (c) 2018-2020 Friedrich Miescher Institute for Biomedical Research, Basel.\n# This software is licensed under the terms of the MIT License.\n# See LICENSE.txt in the project root folder.\n# ==============================================================================\n\n\"\"\"This module controls the microtome hardware (knife and motorized stage) via\n DigitalMicrograph (3View) or a serial port (katana).\n\n Microtome (base class)\n / \\\n / \\\n Microtome_3View Microtome_katana\n\"\"\"\n\nimport os\nimport json\nimport serial\n\nfrom time import sleep\n\nimport utils\n\n\nclass Microtome:\n \"\"\"Base class for microtome control. It implements minimum config/parameter\n handling. Undefined methods have to be implemented in the child class,\n otherwise NotImplementedError is raised.\n \"\"\"\n def __init__(self, config, sysconfig):\n self.cfg = config\n self.syscfg = sysconfig\n self.error_state = 0\n self.error_info = ''\n self.motor_warning = False # True when motors slower than expected\n # Load device name and other settings from sysconfig. These\n # settings overwrite the settings in config.\n recognized_devices = json.loads(self.syscfg['device']['recognized'])\n try:\n self.cfg['microtome']['device'] = (\n recognized_devices[int(self.syscfg['device']['microtome'])])\n except:\n self.cfg['microtome']['device'] = 'NOT RECOGNIZED'\n self.device_name = self.cfg['microtome']['device']\n # Get microtome stage limits from systemcfg\n # self.stage_limits: [min_x, max_x, min_y, max_y] in micrometres\n self.stage_limits = json.loads(\n self.syscfg['stage']['microtome_stage_limits'])\n # Get microtome motor speeds from syscfg\n self.motor_speed_x, self.motor_speed_y = (\n json.loads(self.syscfg['stage']['microtome_motor_speed']))\n # Knife settings in system config override the user config settings.\n self.cfg['microtome']['full_cut_duration'] = (\n self.syscfg['knife']['full_cut_duration'])\n self.cfg['microtome']['sweep_distance'] = (\n self.syscfg['knife']['sweep_distance'])\n # The following variables contain the last known (verified) position\n # of the microtome stage in X, Y, Z.\n self.last_known_x = None\n self.last_known_y = None\n self.last_known_z = None\n # self.prev_known_z stores the previous Z coordinate. It is used to\n # ensure that Z moves cannot be larger than 200 nm in safe mode.\n self.prev_known_z = None\n # self.stage_z_prev_session stores the last known Z coordinate at the end of\n # the previous session associated with the current user configuration.\n if self.cfg['microtome']['last_known_z'].lower() == 'none':\n self.stage_z_prev_session = None\n else:\n try:\n self.stage_z_prev_session = float(\n self.cfg['microtome']['last_known_z'])\n except Exception as e:\n self.error_state = 701\n self.error_info = str(e)\n return\n try:\n self.z_range = json.loads(\n self.syscfg['stage']['microtome_z_range'])\n except Exception as e:\n self.error_state = 701\n self.error_info = str(e)\n return\n self.simulation_mode = (\n self.cfg['sys']['simulation_mode'].lower() == 'true')\n self.use_oscillation = (\n self.cfg['microtome']['knife_oscillation'].lower() == 'true')\n\n # Catch errors that occur while reading configuration and converting\n # the string values into floats or integers\n try:\n # Knife cut speed in nm/s\n self.knife_cut_speed = int(float(\n self.cfg['microtome']['knife_cut_speed']))\n # Knife cut speed for fast cutting, typically for approach, nm/s\n self.knife_fast_speed = int(float(\n self.cfg['microtome']['knife_fast_speed']))\n # Knife retract speed in nm/s\n self.knife_retract_speed = int(float(\n self.cfg['microtome']['knife_retract_speed']))\n # Start and end position of cutting window, in nm\n self.cut_window_start = int(float(\n self.cfg['microtome']['knife_cut_start']))\n self.cut_window_end = int(float(\n self.cfg['microtome']['knife_cut_end']))\n # Knife oscillation frequency in Hz\n self.oscillation_frequency = int(float(\n self.cfg['microtome']['knife_osc_frequency']))\n # Knife oscillation amplitude in nm\n self.oscillation_amplitude = int(float(\n self.cfg['microtome']['knife_osc_amplitude']))\n # Duration of a full cut cycle in seconds\n self.full_cut_duration = float(\n self.cfg['microtome']['full_cut_duration'])\n # Sweep distance (lowering of Z position in nm before sweep)\n self.sweep_distance = int(float(\n self.cfg['microtome']['sweep_distance']))\n if (self.sweep_distance < 30) or (self.sweep_distance > 1000):\n # If outside permitted range, set to 70 nm as default\n self.sweep_distance = 70\n self.cfg['microtome']['sweep_distance'] = '70'\n # self.stage_move_wait_interval is the amount of time in seconds\n # that SBEMimage waits after a stage move before taking an image.\n self.stage_move_wait_interval = float(\n self.cfg['microtome']['stage_move_wait_interval'])\n\n except Exception as e:\n self.error_state = 701\n self.error_info = str(e)\n\n def save_to_cfg(self):\n self.cfg['microtome']['stage_move_wait_interval'] = str(\n self.stage_move_wait_interval)\n # Save stage limits in cfg and syscfg\n self.cfg['microtome']['stage_min_x'] = str(self.stage_limits[0])\n self.cfg['microtome']['stage_max_x'] = str(self.stage_limits[1])\n self.cfg['microtome']['stage_min_y'] = str(self.stage_limits[2])\n self.cfg['microtome']['stage_max_y'] = str(self.stage_limits[3])\n self.syscfg['stage']['microtome_stage_limits'] = str(self.stage_limits)\n # Save motor speeds to cfg and syscfg\n self.cfg['microtome']['motor_speed_x'] = str(self.motor_speed_x)\n self.cfg['microtome']['motor_speed_y'] = str(self.motor_speed_y)\n self.syscfg['stage']['microtome_motor_speed'] = str(\n [self.motor_speed_x, self.motor_speed_y])\n self.cfg['microtome']['knife_cut_speed'] = str(int(\n self.knife_cut_speed))\n self.cfg['microtome']['knife_retract_speed'] = str(int(\n self.knife_retract_speed))\n self.cfg['microtome']['knife_fast_speed'] = str(int(\n self.knife_fast_speed))\n self.cfg['microtome']['knife_cut_start'] = str(int(\n self.cut_window_start))\n self.cfg['microtome']['knife_cut_end'] = str(int(\n self.cut_window_end))\n self.cfg['microtome']['knife_oscillation'] = str(self.use_oscillation)\n self.cfg['microtome']['knife_osc_frequency'] = str(int(\n self.oscillation_frequency))\n self.cfg['microtome']['knife_osc_amplitude'] = str(int(\n self.oscillation_amplitude))\n self.cfg['microtome']['last_known_z'] = str(self.last_known_z)\n # Save full cut duration in both cfg and syscfg\n self.cfg['microtome']['full_cut_duration'] = str(self.full_cut_duration)\n self.syscfg['knife']['full_cut_duration'] = str(self.full_cut_duration)\n\n def do_full_cut(self):\n \"\"\"Perform a full cut cycle. This is the only knife control function\n used during stack acquisitions.\n \"\"\"\n raise NotImplementedError\n\n def do_full_approach_cut(self):\n \"\"\"Perform a full cut cycle under the assumption that knife is\n already neared.\"\"\"\n raise NotImplementedError\n\n def do_sweep(self, z_position):\n \"\"\"Perform a sweep by cutting slightly above the surface.\"\"\"\n if (((self.sweep_distance < 30) or (self.sweep_distance > 1000))\n and self.error_state == 0):\n self.error_state = 205\n self.error_info = 'microtome.do_sweep: sweep distance out of range'\n elif self.error_state == 0:\n raise NotImplementedError\n\n def cut(self):\n # only used for testing\n raise NotImplementedError\n\n def retract_knife(self):\n # only used for testing\n raise NotImplementedError\n\n def set_motor_speeds(self, motor_speed_x, motor_speed_y):\n self.motor_speed_x = motor_speed_x\n self.motor_speed_y = motor_speed_y\n return self.write_motor_speeds_to_script()\n\n def write_motor_speeds_to_script(self):\n raise NotImplementedError\n\n def move_stage_to_x(self, x):\n # only used for testing\n raise NotImplementedError\n\n def move_stage_to_y(self, y):\n # only used for testing\n raise NotImplementedError\n\n def rel_stage_move_duration(self, target_x, target_y):\n \"\"\"Use the last known position and the given target position\n to calculate how much time it will take for the motors to move\n to target position. Add self.stage_move_wait_interval.\n \"\"\"\n duration_x = abs(target_x - self.last_known_x) / self.motor_speed_x\n duration_y = abs(target_y - self.last_known_y) / self.motor_speed_y\n return max(duration_x, duration_y) + self.stage_move_wait_interval\n\n def stage_move_duration(self, from_x, from_y, to_x, to_y):\n duration_x = abs(to_x - from_x) / self.motor_speed_x\n duration_y = abs(to_y - from_y) / self.motor_speed_y\n return max(duration_x, duration_y) + self.stage_move_wait_interval\n\n def get_stage_xy(self, wait_interval=0.25):\n \"\"\"Get current XY coordinates from DM\"\"\"\n raise NotImplementedError\n\n def get_stage_x(self):\n return self.get_stage_xy()[0]\n\n def get_stage_y(self):\n return self.get_stage_xy()[1]\n\n def get_stage_xyz(self):\n x, y = self.get_stage_xy()\n z = self.get_stage_z()\n return x, y, z\n\n def move_stage_to_xy(self, coordinates):\n \"\"\"Move stage to coordinates X/Y. This function is called during\n acquisitions. It includes waiting times. The other move functions\n below do not.\n \"\"\"\n raise NotImplementedError\n\n def get_stage_z(self, wait_interval=0.5):\n \"\"\"Get current Z coordinate from DM\"\"\"\n raise NotImplementedError\n\n def move_stage_to_z(self, z, safe_mode=True):\n \"\"\"Move stage to new z position. Used during stack acquisition\n before each cut and for sweeps.\"\"\"\n raise NotImplementedError\n\n def stop_script(self):\n raise NotImplementedError\n\n def near_knife(self):\n raise NotImplementedError\n\n def clear_knife(self):\n raise NotImplementedError\n\n def check_for_cut_cycle_error(self):\n raise NotImplementedError\n\n def reset_error_state(self):\n raise NotImplementedError\n\n\nclass Microtome_3View(Microtome):\n \"\"\"\n This class contains the methods to control a 3View microtome via\n DigitalMicrograph (DM).\n The DM script SBEMimage_DMcom_GMS2.s (or SBEMimage_DMcom_GMS3.s for GMS3)\n must be running in DM to receive commands from SBEMimage and transmit them\n to the 3View hardware (XY stage and knife arm).\n Communication with DM is achieved by read/write file operations.\n The following files are used:\n DMcom.in: Command/parameter file. Contains a command and up to\n two optional parameters.\n DMcom.cmd: The file 'DMcom.in' is renamed to 'DMcom.cmd' to trigger\n its contents to be processed by DM.\n DMcom.out: Contains return value(s) from DM\n DMcom.ack: Confirms that a command has been received and processed.\n DMcom.ac2: Confirms that a full cut cycle has been completed.\n DMcom.wng: Signals a warning (a problem occurred, but could be resolved).\n DMcom.err: Signals that a critical error occured.\n\n The 3View knife parameters (knife speeds, osciallation on/off) cannot be\n changed remotely via SBEMimage; they must be set in DM before the\n acquisition starts. The pre-acquisition dialog box asks the user\n to ensure these settings match the DM settings (for logging purposes).\n \"\"\"\n\n def __init__(self, config, sysconfig):\n super().__init__(config, sysconfig)\n # Paths to DM communication files\n self.INPUT_FILE = os.path.join('..', 'dm', 'DMcom.in')\n self.COMMAND_FILE = os.path.join('..', 'dm', 'DMcom.cmd')\n self.OUTPUT_FILE = os.path.join('..', 'dm', 'DMcom.out')\n self.ACK_FILE = os.path.join('..', 'dm', 'DMcom.ack')\n self.ACK_CUT_FILE = os.path.join('..', 'dm', 'DMcom.ac2')\n self.WARNING_FILE = os.path.join('..', 'dm', 'DMcom.wng')\n self.ERROR_FILE = os.path.join('..', 'dm', 'DMcom.err')\n\n # Perform handshake and read initial X/Y/Z\n if not self.simulation_mode and self.error_state == 0:\n self._send_dm_command('Handshake')\n # DM script should react to trigger file by reading command file\n # usually within 0.1 s (default check interval)\n sleep(1) # Give DM plenty of time to write response into file\n if self._dm_handshake_success():\n # Get initial X/Y/Z with long wait intervals (1s) for responses\n current_z = self.get_stage_z(wait_interval=1)\n current_x, current_y = self.get_stage_xy(wait_interval=1)\n if ((current_x is None) or (current_y is None)\n or (current_z is None)):\n self.error_state = 101\n self.error_info = ('microtome.__init__: could not read '\n 'initial stage position')\n elif current_z < 0:\n self.error_state = 101\n self.error_info = ('microtome.__init__: stage z position '\n 'must not be negative.')\n # Check if current Z coordinate matches last known Z from\n # previous session\n elif (self.stage_z_prev_session is not None\n and abs(current_z - self.stage_z_prev_session) > 0.01):\n self.error_state = 206\n self.error_info = ('microtome.__init__: stage z position '\n 'mismatch')\n # Update motor speeds in DM script\n success = self.write_motor_speeds_to_script()\n # If update unsuccesful, set new error state unless microtome\n # is already in an error state after reading the coordinates.\n if not success and self.error_state == 0:\n self.error_state = 101\n self.error_info = ('microtome.__init__: could not update '\n 'DM script with current motor speeds')\n else:\n self.error_state = 101\n self.error_info = 'microtome.__init__: handshake failed'\n\n def _send_dm_command(self, cmd, set_values=[]):\n \"\"\"Send a command to the DigitalMicrograph script.\"\"\"\n # If output file exists, delete it to ensure old return values are gone\n if os.path.isfile(self.OUTPUT_FILE):\n os.remove(self.OUTPUT_FILE)\n # Delete .ack and .ac2 files\n if os.path.isfile(self.ACK_FILE):\n os.remove(self.ACK_FILE)\n if os.path.isfile(self.ACK_CUT_FILE):\n os.remove(self.ACK_CUT_FILE)\n # Try to open input file\n success, input_file = utils.try_to_open(self.INPUT_FILE, 'w+')\n if success:\n input_file.write(cmd)\n for item in set_values:\n input_file.write('\\n' + str(item))\n input_file.close()\n # Trigger DM script by renaming input file to command file\n try:\n os.rename(self.INPUT_FILE, self.COMMAND_FILE)\n except Exception as e:\n if self.error_state == 0:\n self.error_state = 102\n self.error_info = ('microtome._send_dm_command: could not '\n 'rename input file (' + str(e) + ')')\n elif self.error_state == 0:\n self.error_state = 102\n self.error_info = ('microtome._send_dm_command: could not write '\n 'to input file')\n\n def _read_dm_return_values(self):\n \"\"\"Try to read output file and, if successful, return values.\"\"\"\n return_values = []\n success, return_file = utils.try_to_open(self.OUTPUT_FILE, 'r')\n if success:\n for line in return_file:\n return_values.append(line.rstrip())\n return_file.close()\n elif self.error_state == 0:\n self.error_state = 104\n self.error_info = ('microtome._read_dm_return_values: could not '\n 'read from output file')\n if return_values == []:\n return_values = [None, None]\n return return_values\n\n def _dm_handshake_success(self):\n \"\"\"Verify that handshake command has worked.\"\"\"\n read_success = True\n return_value = None\n try:\n file_handle = open(self.OUTPUT_FILE, 'r')\n except:\n # Try once more\n sleep(1)\n try:\n file_handle = open(self.OUTPUT_FILE, 'r')\n except:\n read_success = False\n if read_success:\n return_value = file_handle.readline().rstrip()\n file_handle.close()\n if return_value == 'OK':\n return True\n else:\n # Error state assigned in self.__init__()\n return False\n\n def do_full_cut(self):\n \"\"\"Perform a full cut cycle. This is the only knife control function\n used during a stack acquisitions.\n \"\"\"\n self._send_dm_command('MicrotomeStage_FullCut')\n sleep(0.2)\n\n def do_full_approach_cut(self):\n \"\"\"Perform a full cut cycle under the assumption that the knife is\n already neared. This function is called repeatedly from the approach\n dialog (ApproachDlg) after the knife has been neared.\n \"\"\"\n self._send_dm_command('MicrotomeStage_FullApproachCut')\n sleep(0.2)\n\n def do_sweep(self, z_position):\n \"\"\"Perform a sweep by cutting slightly above the surface.\"\"\"\n if (((self.sweep_distance < 30) or (self.sweep_distance > 1000))\n and self.error_state == 0):\n self.error_state = 205\n self.error_info = 'microtome.do_sweep: sweep distance out of range'\n elif self.error_state == 0:\n # Move to new z position\n sweep_z_position = z_position - (self.sweep_distance / 1000)\n self.move_stage_to_z(sweep_z_position)\n if self.error_state > 0:\n # Try again\n self.reset_error_state()\n sleep(2)\n self.move_stage_to_z(sweep_z_position)\n if self.error_state == 0:\n # Do a cut cycle above the sample surface to clear away debris\n self.do_full_cut()\n sleep(self.full_cut_duration)\n # Check if error occurred during cut cycle.\n if os.path.isfile(self.ERROR_FILE):\n self.error_state = 205\n self.error_info = ('microtome.do_sweep: error during '\n 'cutting cycle')\n elif not os.path.isfile(self.ACK_CUT_FILE):\n # Cut cycle was not carried out\n self.error_state = 103\n self.error_info = ('microtome.do_sweep: command not '\n 'processed by DM script')\n\n # Move to previous z position (before sweep)\n self.move_stage_to_z(z_position)\n if self.error_state > 0:\n # Try again\n sleep(2)\n self.reset_error_state()\n self.move_stage_to_z(z_position)\n\n def cut(self):\n # only used for testing\n self._send_dm_command('MicrotomeStage_Cut')\n sleep(1.2/self.knife_cut_speed)\n\n def retract_knife(self):\n # only used for testing\n self._send_dm_command('MicrotomeStage_Retract')\n sleep(1.2/self.knife_retract_speed)\n\n def write_motor_speeds_to_script(self):\n self._send_dm_command('SetMotorSpeedXY',\n [self.motor_speed_x, self.motor_speed_y])\n sleep(1)\n # Check if command was processed by DM\n if os.path.isfile(self.ACK_FILE):\n success = True\n else:\n sleep(2)\n success = os.path.isfile(self.ACK_FILE)\n return success\n\n def move_stage_to_x(self, x):\n # only used for testing\n self._send_dm_command('MicrotomeStage_SetPositionX', [x])\n sleep(0.5)\n self.get_stage_xy()\n\n def move_stage_to_y(self, y):\n # only used for testing\n self._send_dm_command('MicrotomeStage_SetPositionY', [y])\n sleep(0.5)\n self.get_stage_xy()\n\n def get_stage_xy(self, wait_interval=0.25):\n \"\"\"Get current XY coordinates from DM.\"\"\"\n success = True\n self._send_dm_command('MicrotomeStage_GetPositionXY')\n sleep(wait_interval)\n answer = self._read_dm_return_values()\n try:\n x, y = float(answer[0]), float(answer[1])\n except:\n x, y = None, None\n success = False\n if success:\n self.last_known_x, self.last_known_y = x, y\n return x, y\n\n def move_stage_to_xy(self, coordinates):\n \"\"\"Move stage to coordinates (X, Y). This function is called during\n acquisitions. It includes waiting times. The other move functions\n below do not.\n \"\"\"\n x, y = coordinates\n self._send_dm_command('MicrotomeStage_SetPositionXY_Confirm', [x, y])\n sleep(0.2)\n # Wait for the time it takes the motors to move\n # plus stage_move_wait_interval\n move_duration = self.rel_stage_move_duration(x, y)\n sleep(move_duration + 0.1)\n # Check if the command was processed successfully\n if os.path.isfile(self.ACK_FILE):\n self.last_known_x, self.last_known_y = x, y\n else:\n # Wait for up to 2.5 additional seconds (DM script will try\n # to read coordinates again to confirm move)\n if self.stage_move_wait_interval < 2.5:\n sleep(2.5 - self.stage_move_wait_interval)\n # Check again for ACK_FILE\n if os.path.isfile(self.ACK_FILE):\n # Move was carried out, but with a delay.\n self.last_known_x, self.last_known_y = x, y\n # Check if there was a warning\n if os.path.isfile(self.WARNING_FILE):\n # There was a warning from the script - motors may have\n # moved too slowly, but they reached the target position\n # after an extra 2s delay.\n self.motor_warning = True\n elif os.path.isfile(self.ERROR_FILE) and self.error_state == 0:\n # Move was not confirmed and error file exists:\n # The motors did not reach the target position.\n self.error_state = 201\n self.error_info = ('microtome.move_stage_to_xy: did not reach '\n 'target xy position')\n # Read last known position (written into output file by DM\n # if a move fails.)\n current_xy = self._read_dm_return_values()\n if len(current_xy) == 2:\n try:\n self.last_known_x = float(current_xy[0])\n self.last_known_y = float(current_xy[1])\n except:\n pass # keep current coordinates\n elif self.error_state == 0:\n # If neither .ack nor .err exist, the command was not processed\n self.error_state = 103\n self.error_info = ('microtome.move_stage_to_xy: command not '\n 'processed by DM script')\n\n def get_stage_z(self, wait_interval=0.5):\n \"\"\"Get current Z coordinate from DM.\"\"\"\n success = True\n self._send_dm_command('MicrotomeStage_GetPositionZ')\n sleep(wait_interval)\n answer = self._read_dm_return_values()\n try:\n z = float(answer[0])\n except:\n z = None\n success = False\n if success:\n if (self.last_known_z is not None\n and abs(z - self.last_known_z) > 0.01):\n self.error_state = 206\n self.prev_known_z = self.last_known_z\n self.last_known_z = z\n return z\n\n def move_stage_to_z(self, z, safe_mode=True):\n \"\"\"Move stage to new z position. Used during stack acquisitions\n before each cut and for sweeps.\n \"\"\"\n if (((self.last_known_z >= 0) and (abs(z - self.last_known_z) > 0.205))\n and self.error_state == 0 and safe_mode):\n # Z must not change more than ~200 nm during stack acquisitions!\n self.error_state = 203\n self.error_info = ('microtome.move_stage_to_z: Z move too '\n 'large (> 200 nm)')\n else:\n self._send_dm_command('MicrotomeStage_SetPositionZ_Confirm', [z])\n sleep(1) # wait for command to be read and executed\n # Check if command was processed\n if os.path.isfile(self.ACK_FILE):\n # Accept new position as last known position\n self.prev_known_z = self.last_known_z\n self.last_known_z = z\n elif os.path.isfile(self.ERROR_FILE) and self.error_state == 0:\n # There was an error during the move\n self.error_state = 202\n self.error_info = ('microtome.move_stage_to_z: did not reach '\n 'target z position')\n elif self.error_state == 0:\n # If neither .ack nor .err exist, the command was not processed\n self.error_state = 103\n self.error_info = ('move_stage_to_z: command not processed '\n 'by DM script')\n\n def stop_script(self):\n self._send_dm_command('StopScript')\n sleep(0.2)\n\n def near_knife(self):\n # only used for testing\n self._send_dm_command('MicrotomeStage_Near')\n sleep(4)\n\n def clear_knife(self):\n # only used for testing\n self._send_dm_command('MicrotomeStage_Clear')\n sleep(4)\n\n def check_for_cut_cycle_error(self):\n duration_exceeded = False\n # Check if error ocurred during self.do_full_cut()\n if self.error_state == 0 and os.path.isfile(self.ERROR_FILE):\n self.error_state = 204\n self.error_info = ('microtome.do_full_cut: error during '\n 'cutting cycle')\n elif not os.path.isfile(self.ACK_CUT_FILE):\n # Cut cycle was not carried out within the specified time limit\n self.error_state = 103\n self.error_info = ('microtome.do_full_cut: command not '\n 'processed by DM script')\n duration_exceeded = True\n # Wait for another 10 sec maximum\n for i in range(10):\n sleep(1)\n if os.path.isfile(self.ACK_CUT_FILE):\n self.error_state = 0\n self.error_info = ''\n break\n return duration_exceeded\n\n def reset_error_state(self):\n self.error_state = 0\n self.error_info = ''\n self.motor_warning = False\n if os.path.isfile(self.ERROR_FILE):\n os.remove(self.ERROR_FILE)\n if os.path.isfile(self.WARNING_FILE):\n os.remove(self.WARNING_FILE)\n\n\nclass Microtome_katana(Microtome):\n \"\"\"\n Class for ConnectomX katana microtome. This microtome provides cutting\n functionality and controls the Z position. X and Y are controlled by the\n SEM stage. The microtome hardware is controlled via COM port commands.\n \"\"\"\n\n def __init__(self, config, sysconfig):\n super().__init__(config, sysconfig)\n self.selected_port = sysconfig['device']['katana_com_port']\n self.clear_position = int(sysconfig['knife']['katana_clear_position'])\n self.retract_clearance = int(\n sysconfig['stage']['katana_retract_clearance'])\n # Realtime parameters\n self.encoder_position = None\n self.knife_position = None\n self.current_osc_freq = None\n self.current_osc_amp = None\n # Connection status:\n self.connected = False\n # Try to connect with current selected port\n self.connect()\n if self.connected:\n # wait after opening port for arduino to initialise (won't be\n # necessary in future when using extra usb-serial chip)\n sleep(1)\n # initial comm is lost when on arduino usb port. (ditto)\n self._send_command(' ')\n # clear any incoming data from the serial buffer (probably\n # not necessary here)\n self.com_port.flushInput()\n # need to delay after opening port before sending anything.\n # 0.2s fails. 0.25s seems to be always OK. Suggest >0.3s for\n # reliability.\n sleep(0.3)\n # if this software is the first to interact with the hardware\n # after power-on, then the motor parameters need to be set\n # (no harm to do anyway)\n self.initialise_motor()\n # get the initial Z position from the encoder\n self.last_known_z = self.get_stage_z()\n print('Starting Z position: ' + str(self.last_known_z) + 'µm')\n\n def save_to_cfg(self):\n super().save_to_cfg()\n # Save kantana-specific keys sysconfig\n self.sysconfig['device']['katana_com_port'] = self.selected_port\n self.sysconfig['knife']['katana_clear_position'] = int(\n self.clear_position)\n self.sysconfig['stage']['katana_retract_clearance'] = int(\n self.retract_clearance)\n\n def connect(self):\n # Open COM port\n if not self.simulation_mode:\n self.com_port = serial.Serial()\n self.com_port.port = self.selected_port\n self.com_port.baudrate = 115200\n self.com_port.bytesize = 8\n self.com_port.parity = 'N'\n self.com_port.stopbits = 1\n # With no timeout, this code freezes if it doesn't get a response.\n self.com_port.timeout = 0.5\n try:\n self.com_port.open()\n self.connected = True\n # print('Connection to katana successful.')\n except Exception as e:\n print('Connection to katana failed: ' + repr(e))\n\n def initialise_motor(self):\n self._send_command('XM2')\n self._send_command('XY13,1')\n self._send_command('XY11,300')\n self._send_command('XY3,-3000000')\n self._send_command('XY4,3000000')\n self._send_command('XY2,0')\n self._send_command('XY6,1')\n self._send_command('XY12,0')\n\n def _send_command(self, cmd):\n \"\"\"Send command to katana via serial port.\"\"\"\n self.com_port.write((cmd + '\\r').encode())\n # always need some delay after sending command.\n # suggest to keep 0.05 for now\n sleep(0.05)\n\n def _read_response(self):\n \"\"\"Read a response from katana via the serial port.\"\"\"\n return self.com_port.readline(13).decode()\n # Katana returns CR character at end of line (this is how our motor\n # controller works so it is easiest to keep it this way)\n\n def _wait_until_knife_stopped(self):\n print('waiting for knife to stop...')\n # initial delay to make sure we don't check before knife has\n # started moving!\n sleep(0.25)\n # knifeStatus = self._read_response()\n self.com_port.flushInput()\n while True:\n self._send_command('KKP') # KKP queries knife movement status\n # reset it here just in case no response on next line\n knife_status = 'KKP:1'\n knife_status = self._read_response()\n knife_status = knife_status.rstrip();\n # print(\" knife status: \" + knifeStatus)\n\n # optional to show knife position so user knows it hasn't frozen!\n # _read_realtime_data is not as robust as other com port reads, and\n # there is no error check, so it should only be used for display\n # purposes. (it is very fast though, so you can use it in a loop\n # to update the GUI)\n self._read_realtime_data()\n print(\"Knife status: \"\n + knife_status\n + \", \\tKnife pos: \"\n + str(self.knife_position)\n + \"µm\")\n\n if knife_status == 'KKP:0': # If knife is not moving\n # print('Knife stationary')\n return 0\n # re-check every 0.2s. Repeated queries like this shouldnt be more\n # often than every 0.025s (risks overflowing the microtome\n # serial buffer)\n sleep(0.2)\n\n def _bytes_to_num(self, val_str, start, end):\n val = 0\n for i in range (start, end + 1):\n val += val_str[i] * (2**(8 * (i - start)))\n return(val)\n\n def _read_realtime_data(self):\n # _read_realtime_data gets the data as bytes rather than ascii. It is\n # not as robust as other com port reads, and there is no error check,\n # so it should only be used for display purposes. (it is very fast #\n # though, so you can use it in a loop to update the GUI)\n self.com_port.flushInput()\n self._send_command('KRT')\n datalength = 10\n c = self.com_port.read(datalength)\n # Can't get arduino to send negative number in binary. Temporary\n # solution is to add large number before sending and then subtract\n # it here\n self.encoder_position = self._bytes_to_num(c, 0, 3) - 10000000\n # nice to see where the knife is whilst we wait for a slow movement:\n self.knife_position = self._bytes_to_num(c, 4, 5)\n # the following gets retrieved because (when I get around to\n # implementing it) the knife will have a 'resonance mode' option. So\n # the frequency will shift to keep the knife at max amplitude\n self.current_osc_freq = self._bytes_to_num(c, 6, 7)\n # measured amplitude in nm. (Arduino scales it by 100)\n self.current_osc_amp = self._bytes_to_num(c, 8, 9) / 100\n # print(str(katana.encoderPos)+\" \\t\"+str(katana.knifepos)\n # +\" \\t\"+str(katana.oscfreq)+\" \\t\"+str(katana.oscAmp))\n\n def _reached_target(self):\n \"\"\"Check to see if the z motor is still moving (returns 1 if target\n reached, otherwise 0 if still moving.\"\"\"\n self.com_port.flushInput()\n self._send_command('XY23')\n # XY23 passes through to the motor controller.\n sleep(0.03)\n response = self._read_response()\n if response.startswith('XY23'):\n response = response.rstrip()\n response = response.replace('XY23:', '')\n status = response.split(',')\n # print(status[1])\n return int(status[1])\n else:\n return 0\n\n def do_full_cut(self):\n \"\"\"Perform a full cut cycle.\"\"\"\n # Move to cutting window\n # (good practice to check the knife is not moving before starting)\n self._wait_until_knife_stopped()\n print('Moving to cutting position '\n + str(self.cut_window_start) + ' ...')\n self._send_command('KMS' + str(self.knife_fast_speed))\n # send required speed. The reason I'm setting it every time before\n # moving is that I'm using two different speeds\n # (knifeFastSpeed & knifeCutSpeed)\n self._send_command('KKM' + str(self.cut_window_start)) # send required position\n\n # Turn oscillator on\n self._wait_until_knife_stopped()\n if self.is_oscillation_enabled():\n # turn oscillator on\n self._send_command('KO' + str(self.oscillation_frequency))\n self._send_command('KOA' + str(self.oscillation_amplitude))\n\n # Cut sample\n print('Cutting sample...')\n self._send_command('KMS' + str(self.knife_cut_speed))\n self._send_command('KKM' + str(self.cut_window_end))\n\n # Turn oscillator off\n self._wait_until_knife_stopped()\n if self.is_oscillation_enabled():\n self._send_command('KOA0')\n\n # Drop sample\n print('Dropping sample by ' + str(self.retract_clearance/1000) + 'µm...')\n # TODO: discuss how Z is handled:\n # drop sample before knife retract\n self.move_stage_to_z(\n desiredzPos - self.retract_clearance, 100)\n\n # Retract knife\n print('Retracting knife...')\n self._send_command('KMS' + str(self.knife_fast_speed))\n self._send_command('KKM' + str(self.clear_position))\n\n # Raise sample to cutting plane\n self._wait_until_knife_stopped()\n print('Returning sample to cutting plane...')\n self.move_stage_to_z(desiredzPos, 100)\n\n def do_full_approach_cut(self):\n \"\"\"Perform a full cut cycle under the assumption that knife is\n already neared.\"\"\"\n pass\n\n def do_sweep(self, z_position):\n \"\"\"Perform a sweep by cutting slightly above the surface.\"\"\"\n pass\n\n def cut(self):\n # only used for testing\n pass\n\n def retract_knife(self):\n # only used for testing\n pass\n\n def get_stage_z(self, wait_interval=0.5):\n \"\"\"Get current Z position\"\"\"\n self.com_port.flushInput()\n self._send_command('KE')\n response = self._read_response()\n # response will look like 'KE:120000' (for position of 0.12mm)\n response = response.rstrip();\n response = response.replace('KE:', '')\n z = int(response)\n return z\n\n def get_stage_z_prev_session(self):\n return self.stage_z_prev_session\n\n def move_stage_to_z(self, z, speed, safe_mode=True):\n \"\"\"Move to specified Z position, and block until it is reached.\"\"\"\n print('Moving to Z=' + str(z) + 'µm...')\n self._send_command('KT' + str(z) + ',' + str(speed))\n response = self._read_response()\n response = response.rstrip()\n while self._reached_target() != 1:\n # _reached_target() returns 1 when stage is at target position\n self._read_realtime_data()\n print('stage pos: ' + str(self.encoder_position))\n sleep(0.05)\n print('stage finished moving')\n\n def near_knife(self):\n # only used for testing\n pass\n\n def clear_knife(self):\n # only used for testing\n pass\n\n def get_clear_position(self):\n return self.clear_position\n\n def set_clear_position(self, clear_position):\n self.clear_position = int(clear_position)\n\n def get_retract_clearance(self):\n return self.retract_clearance\n\n def set_retract_clearance(self, retract_clearance):\n self.retract_clearance = int(retract_clearance)\n\n def check_for_cut_cycle_error(self):\n pass\n\n def reset_error_state(self):\n self.error_state = 0\n self.error_info = ''\n self.motor_warning = False\n\n def disconnect(self):\n if self.connected:\n self.com_port.close()\n print(f'katana: Connection closed (Port {self.com_port.port}).')","sub_path":"src/microtome_control.py","file_name":"microtome_control.py","file_ext":"py","file_size_in_byte":40646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"301066331","text":"\ndef cel_to_fah(deg_in_cel):\n deg_in_fah = deg_in_cel*9/5+32\n return deg_in_fah\n\ntemperatures=[10,-20,-289,100]\n\nfile=open(\"celcius_deg.txt\",'w')\nfor deg in temperatures:\n if deg > -273.15:\n file.write(str(cel_to_fah(int(deg)))+\"\\n\")\n\nfile.close\n","sub_path":"scripts/Section6/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"554058269","text":"from bs4 import BeautifulSoup\nimport requests\nfrom scrapy.selector import Selector\n\nhtml = \"\"\"

\n

\n \n text here that i need to grab\n more text here that i would like to grab\n \n

\"\"\"\n\nsoup = BeautifulSoup(html, 'lxml')\n\nfor div in soup.findAll('p', {'class': 'Product-title'}):\n print(div.find('a')['href'])\n print(div.find('a').getText(strip=True))\n\n","sub_path":"scrapy_file/SOUP_USER_BADUKER.py","file_name":"SOUP_USER_BADUKER.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530387495","text":"from werckercli.decorators import login_required\nfrom werckercli.client import Client\nfrom werckercli.prompt import get_value_with_default\nfrom werckercli.printer import (\n store_highest_length,\n print_hr,\n print_line,\n format_date,\n)\n\n# from werckercli.commands.target import\nfrom werckercli.cli import get_term, puts\n\nfrom werckercli.config import (\n get_value,\n VALUE_PROJECT_ID\n)\n\nfrom werckercli.commands.target import (\n # get_targets,\n # print_targets,\n pick_target\n)\n# from werckercli.paths import find_git_root\n\n\n@login_required\ndef build_list(valid_token=None, limit=5):\n\n term = get_term()\n\n if not valid_token:\n raise ValueError(\"A valid token is required!\")\n\n projectId = get_value(VALUE_PROJECT_ID, print_warnings=False)\n\n if not projectId:\n puts(\n term.red(\"Error: \") +\n \"No application found. Please create or link an application first\"\n )\n\n return\n\n builds = get_builds(valid_token, projectId)\n print_builds(builds, limit=limit)\n\n\n@login_required\ndef build_deploy(valid_token=None):\n\n if not valid_token:\n raise ValueError(\"A valid token is required!\")\n\n term = get_term()\n\n projectId = get_value(VALUE_PROJECT_ID, print_warnings=False)\n\n if not projectId:\n puts(\n term.red(\"Error: \") +\n \"No application found. Please create or link an application first\"\n )\n return\n\n builds = get_builds(valid_token, projectId)\n\n if type(builds) is not list or len(builds) == 0:\n puts(term.yellow(\"warning: \") + \"No builds found.\")\n return\n\n passed_builds = [build for build in builds if build['result'] == \"passed\"]\n if len(passed_builds) == 0:\n puts(\"No passed deploys found.\")\n print_builds(passed_builds, print_index=True)\n\n deploy_index = -1\n target_index = -1\n\n while(True):\n result = get_value_with_default(\"Select which build to deploy\", '1')\n\n valid_values = [str(i + 1) for i in range(len(passed_builds))]\n # valid_values = range(1, len(passed_builds) + 1)\n\n # print valid_values, result\n if result in valid_values:\n deploy_index = valid_values.index(result)\n break\n else:\n puts(term.red(\"warning: \") + \" invalid build selected.\")\n\n target_index = pick_target(valid_token, projectId)\n\n c = Client()\n\n code, result = c.do_deploy(\n valid_token,\n passed_builds[deploy_index]['id'],\n target_index\n )\n\n if \"success\" in result and result['success'] is True:\n puts(term.green(\"Success: \") + \"\"\"\n Build scheduled for deploy.\n\nYou can monitor the scheduled deploy in your browser using:\n{command_targets_deploy}\nOr query the queue for this application using:\n{command_queue}\"\"\".format(\n command_targets_deploy=term.white(\"wercker targets deploy\"),\n command_queue=term.white(\"wercker queue\")))\n else:\n puts(term.red(\"Error: \") + \"Unable to schedule deploy\")\n\n\ndef get_builds(valid_token, projectId):\n term = get_term()\n c = Client()\n\n puts(\"Retrieving builds from wercker...\")\n status, result = c.get_builds(valid_token, projectId)\n\n if status != 200:\n puts(\n term.yellow(\"Warning: \") +\n \"A problem occurred while retrieving builds\"\n )\n\n return result\n\n\ndef print_builds(builds, print_index=False, limit=5):\n\n result = builds\n\n header = [\n 'result',\n 'progress',\n 'branch',\n 'hash',\n 'created',\n 'message',\n ]\n\n props = [\n 'result',\n 'progress',\n 'branch',\n # 'deployResult',\n # 'deployBy',\n # 'deployFinishedOn',\n # 'branch',\n 'commit',\n 'creationDate',\n # 'deployStatus',\n 'commitMessage',\n ]\n\n if print_index:\n header = [''] + header\n props = ['index'] + props\n\n max_lengths = []\n\n for i in range(len(header)):\n max_lengths.append(0)\n\n store_highest_length(max_lengths, header)\n\n if type(result) is list:\n index = 0\n result = result[:limit]\n if type(result) is list:\n puts(\"Found %d result(s)...\\n\" % len(result))\n\n for row in result:\n if \"startedOn\" in row:\n row['creationDate'] = format_date(row['creationDate'])\n\n if \"progress\" in row:\n row['progress'] = \"{progress:.1f}%\".format(\n progress=row['progress'])\n\n if \"commit\" in row:\n row[\"commit\"] = row['commit'][:8]\n\n if print_index:\n row['index'] = index + 1\n\n store_highest_length(max_lengths, row, props)\n\n index += 1\n\n print_hr(max_lengths, first=True)\n print_line(max_lengths, header)\n print_hr(max_lengths)\n\n for row in result:\n print_line(max_lengths, row, props)\n print_hr(max_lengths)\n\n if len(result) == 0:\n print_hr(max_lengths)\n","sub_path":"werckercli/commands/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214780942","text":"# Er is reeds toegang tot het dieptebeeld en de panorama\n\nimport cv2\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport GraphCalculations\nwallLines = [] # contains all the corner lines in a room (no roof lines!), format: x,y1,y2\n\ndef getRoomCoords(panorama): # ONLY VERTICAL LINES! the variable should be a line detection image of a depth image\n steps = 100 # amount of steps we want\n edgesMax = 4\n\n global wallLines\n imageWidth = len(panorama[0])\n imageHeight = len(panorama)\n threshold = 233\n # we split the image into parts, the stepwidth..\n\n stepHeight = len(panorama) / steps\n stepWidth = len(panorama[0])/steps\n if(stepHeight<1 or stepWidth<1):\n print(\"steps to small\")\n exit(0)\n Values = np.zeros(steps)\n index = 0\n for width in range(0, len(panorama[0]), stepWidth):\n # subImage = panorama[height:height + 1] # get a line of the image\n # clippedSubImage = np.where(subImage > threshold, True, False)\n max=0\n for height in range(0, len(panorama), stepHeight):\n if(panorama[height][width]>max): # 0 weg doen of toevoegen..\n max = panorama[height][width] # 0 weg doen\n if(index100):\n breakLoop = True\n loopCycles+=1\n return wallLines # this returns the wall location in the room\n\n # maxima should always be followed by a minima (with minima difference of delta > max/3\n\n\n # for width in range(0, len(subImage[1])):\n # if (clippedSubImage[1][width] == True):\n # np.append(cornerCoords,\n # (height, width, subImage[1][width])) # add the coordinates, + distance to an array\n # np.append(xValues,subImage[0][width]) # get a list of all the x coords\n # np.clip(subImage, threshold, 6500, subImage) # clip the image to only get the max\n #histogram for X coords.. (https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html)\n #min amount of points => nodes\n\n\n # plt.hist(xValues, bins='auto') # arguments are passed to np.histogram\n # plt.title(\"Histogram with 'auto' bins\")\n # plt.show()\n\n # min distance between lines\n # if not -> chose the longest line\n # lines should have a min length\n # put the resulting lines into wallLines\n # if no lines: print(\"no wall lines!!)\n # return wallLines\n\n\ndef setRoomCoords(newWallLines):\n global wallLines\n wallLines = newWallLines\n\n# by having access to the room coords we should be able to determine the position of objects in a room, as the distance between walls is lineair\ndef getDistanceToWalls(objectLocation): # parameter: [x1,x2,y1,y2]\n global wallLines\n startX = (objectLocation[0]+objectLocation[1])/2\n if wallLines is None:\n exit(-1) #exit as no walllines have been initialised\n # find closest wall lines, should be chronological\n leftWall = 0\n rightWall = 0\n for wallLocation in range(0,len(wallLines)):\n if(wallLines[wallLocation][0]>startX): # the right wall of our object\n rightWall = wallLocation\n if(wallLocation>0):\n leftWall = rightWall-1\n else:\n leftWall = len(wallLines)-1\n return leftWall,rightWall,startX","sub_path":"RoomEdges/getDistance.py","file_name":"getDistance.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"131029617","text":"import scrapy \nfrom w3lib.http import basic_auth_header\n\n\n\nclass tiki_comment_spider(scrapy.Spider):\n name = \"ip_check\"\n\n\n def start_requests(self):\n # url = \"https://www.tiki.vn/\"\n url = \"https://www.whatismyip.com/\"\n proxy = ''\n yield scrapy.Request(url = url, callback = self.parse, meta = {\"proxy\": proxy}) \n\n def parse(self, response): \n path = '//*/ul[@class=\"list-group text-center\"]/li/text()'\n ip = response.xpath(path).extract_first()\n\n path = '//*/ul[@class=\"list-group text-center\"]/li[3]/text()'\n location = response.xpath(path).extract_first()\n\n path = '//*/ul[@class=\"list-group text-center\"]/li[4]/text()'\n ips = response.xpath(path).extract_first()\n\n print(ip)\n print(location) \n print(ips)\n","sub_path":"tiki/tiki/spiders/check_ip.py","file_name":"check_ip.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"201504999","text":"import functools\r\nimport time\r\n\r\n\r\nclass LogTime:\r\n # 带参数的装饰器类\r\n def __init__(self, user_int=False):\r\n self.user_int = user_int\r\n\r\n def __call__(self, func):\r\n def _log(*args, **kwargs):\r\n beg = time.time()\r\n res = func(*args, **kwargs)\r\n print(res)\r\n if self.user_int:\r\n print('user time: {}'.format(int(time.time() - beg)))\r\n else:\r\n print('user time:{}'.format(time.time() - beg))\r\n return res\r\n\r\n return _log\r\n\r\n\r\n@LogTime(True)\r\ndef mysleep():\r\n time.sleep(1)\r\n return True\r\n\r\n\r\nmysleep() # user time: 1 True\r\n","sub_path":"装饰器/装饰类.py","file_name":"装饰类.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495173209","text":"capcity,fill=map(int,input().split())\nif fill > capcity:\n\tprint(capcity)\n\texit()\nlast=0\nnum=capcity-fill\nl,r = 1,num\nwhile(l<=r):\n\tmid = l + (r-l)//2\n\tx=(mid*(mid+1))//2\n\tif(x>=num):\n\t\tlast = mid\n\t\tr = mid - 1\n\telse:\n\t\tl = mid + 1\nprint(last+fill)\n","sub_path":"Codeforces Round #404 (Div. 2)/C - Anton and Fairy Tale.py","file_name":"C - Anton and Fairy Tale.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275065882","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom pyleecan.Classes.MeshMat import MeshMat\nfrom pyleecan.Classes.CellMat import CellMat\nfrom pyleecan.Classes.PointMat import PointMat\nimport numpy as np\n\n\n@pytest.mark.MeshSol\n@pytest.mark.METHODS\nclass Test_get_point2cell(object):\n \"\"\"unittest to get cell containing specific point\"\"\"\n\n def setup_method(self, method):\n self.mesh = MeshMat()\n self.mesh.cell[\"triangle\"] = CellMat(nb_pt_per_cell=3)\n self.mesh.point = PointMat()\n self.mesh.point.add_point(np.array([0, 0]))\n self.mesh.point.add_point(np.array([1, 0]))\n self.mesh.point.add_point(np.array([1, 2]))\n self.mesh.point.add_point(np.array([2, 3]))\n self.mesh.point.add_point(np.array([3, 3]))\n\n self.mesh.add_cell(np.array([0, 1, 2]), \"triangle\")\n self.mesh.add_cell(np.array([1, 2, 3]), \"triangle\")\n self.mesh.add_cell(np.array([4, 2, 3]), \"triangle\")\n\n self.DELTA = 1e-10\n\n def test_MeshMat_point(self):\n \"\"\"unittest for an existing point \"\"\"\n ind_elem = self.mesh.cell[\"triangle\"].get_point2cell(1)\n solution = np.array([0, 1])\n testA = np.sum(abs(solution - ind_elem))\n msg = \"Wrong output: returned \" + str(ind_elem) + \", expected: \" + str(solution)\n assert abs(testA - 0) < self.DELTA, msg\n\n def test_MeshMat_fakepoint(self):\n \"\"\"unittest for one non-existing point \"\"\"\n ind_elem = self.mesh.cell[\"triangle\"].get_point2cell(-99)\n solution = None\n testA = np.sum(abs(solution - ind_elem))\n msg = \"Wrong output: returned \" + str(ind_elem) + \", expected: \" + str(solution)\n DELTA = 1e-10\n assert abs(testA - 0) < DELTA, msg\n\n elem_tag = self.mesh.cell[\"triangle\"].get_point2cell(None)\n testA = np.sum(abs(solution - elem_tag))\n msg = \"Wrong output: returned \" + str(ind_elem) + \", expected: \" + str(solution)\n assert abs(testA - 0) < self.DELTA, msg\n","sub_path":"Tests/Methods/Mesh/test_get_point2cell.py","file_name":"test_get_point2cell.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"396466112","text":"#!/usr/bin/env python\nfrom ase.db import connect\nfrom jasp import *\nfrom ase.lattice.surface import surface\nfrom ase.constraints import FixAtoms\nfrom ase import Atoms, Atom\nfrom ase.io import write, read\nfrom multiprocessing import Pool\nimport os\n\ndef sortz(atoms):\n tags = atoms.positions[:,2]\n deco = sorted([(tag, i) for i, tag in enumerate(tags)])\n indices = [i for tag, i in deco]\n return atoms[indices]\n\natoms = read('relax/al-ter/CONTCAR')\natoms = atoms*(2, 2, 1)\natoms = sortz(atoms)\n\n#\nconstraint = FixAtoms(mask=[atom.position[2] < 2.4\n for atom in atoms])\natoms.set_constraint(constraint)\n#view(atoms)\n\n# top_al\natoms_top_aldef = atoms.copy()\ndel atoms_top_aldef[-1]\n# top_o\natoms_top_odef = atoms.copy()\ndel atoms_top_odef[-5]\n\n\nenergies = {}\njobs = {'top_aldef':atoms_top_aldef,\n 'top_odef':atoms_top_odef}\n\ndef run(job, atoms):\n with jasp('alter-def-relax/{0}'.format(job),\n xc='PBE',\n kpts=[3, 3, 1],\n gamma=True,\n encut=400,\n ismear=0,\n ibrion=2,\n lreal='auto',\n prec='accurate',\n algo='fast',\n nsw=200,\n atoms=atoms) as calc:\n print(\"{0} {1}\".format(job, atoms.get_potential_energy()))\n\nprint(\"-----------------------\")\nprint(\"class energy (eV)\")\npool = Pool(processes=10)\nfor job, atoms in jobs.items():\n pool.apply_async(run, (job, atoms))\npool.close()\npool.join()\n\n\n\n","sub_path":"surfaces/tio2/pt1-def-relax.py","file_name":"pt1-def-relax.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"528498774","text":"from pygame import *\nimport sys, random\nimport sys, time\n\ninit()\nscreen = display.set_mode((1280,720))\n\ndef cargar_animacion(nombre,extension, n):\n images = []\n for i in range(1,n+1):\n name = nombre+str(i)+extension\n images.append(image.load(name))\n return images\n\ndef mostrar_animacion(images, freq, x, y):\n frame = int(time.time()*freq) % len(images)\n screen.blit(images[frame],(x, y))\n\n\n\nmapa1 = image.load(\"mapa1.jpeg\")\nmapa1 = transform.scale(mapa1, (1280,720))\n\nplayerizq = cargar_animacion(\"left/left\", \".png\", 3)\nplayerder = cargar_animacion(\"right/right\", \".png\", 3)\nplayerup = cargar_animacion(\"up/up\", \".png\", 3)\nplayerdown = cargar_animacion(\"down/down\", \".png\", 3)\n\nplayer = image.load(\"player.png\")\nplayer = transform.scale(player, (28,32))\n\nmoviendose = False\n\nxMapa1 = 0\ncuadro = 40\n\nxPlayer = 1\nyPlayer = 1\n\n\nspeedPlayer = 0.15 \n\nwhile True:\n screen.fill((255,255,255))\n for e in event.get():\n if e.type == QUIT: sys.exit()\n #if e.type == KEYDOWN and e.key == K_p:\n \n screen.blit(mapa1,(0,0))\n\n #Movimiento\n if key.get_pressed()[K_w]:\n yPlayer = yPlayer - speedPlayer\n mostrar_animacion(playerup,10,xPlayer,yPlayer)\n moviendose = True\n elif key.get_pressed()[K_s]:\n yPlayer = yPlayer + speedPlayer\n mostrar_animacion(playerdown,10,xPlayer,yPlayer)\n moviendose = True\n elif key.get_pressed()[K_a]:\n xPlayer = xPlayer - speedPlayer\n mostrar_animacion(playerizq,10,xPlayer,yPlayer)\n moviendose = True\n elif key.get_pressed()[K_d]:\n xPlayer = xPlayer + speedPlayer\n mostrar_animacion(playerder,10,xPlayer,yPlayer)\n moviendose = True\n else:\n screen.blit(player, (xPlayer, yPlayer))\n \n if xPlayer + 28 >= mapa1.get_width():\n xPlayer = mapa1.get_width() - 28\n if xPlayer <= 0:\n xPlayer = 0\n if yPlayer + 32 >= mapa1.get_height():\n yPlayer = mapa1.get_height() - 32\n if yPlayer <= 0:\n yPlayer = 0\n \n\n \n\n\n \n playerRect = Rect(xPlayer, yPlayer, player.get_width(), player.get_height())\n \n\n\n\n display.flip()\n \n ","sub_path":"PyGame_Antonio_Camila.py","file_name":"PyGame_Antonio_Camila.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170581159","text":"#!/usr/bin/python3\nimport urllib.request, urllib.parse\nimport pprint\nimport json\nimport sys\nimport time\n\nfrom pathlib import Path\n#for ignoring invalid certs\n#https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error\nimport ssl\ngcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # Only for gangstars\n\n\ntry:\n\tusername = sys.argv[1]\nexcept:\n\tprint( \"Usage: {} [url]\".format(sys.argv[0]))\n\tsys.exit(0)\n\n\ntry:\n\turl = sys.argv[2]\nexcept:\n\turl = 'https://localhost:4443'\n\n\n\ndef api(path, args):\n\tdata = json.dumps(args).encode()\n\tres = urllib.request.urlopen(url + path, data, context=gcontext)\n\treturn res.read().decode('utf-8')\n\n#https://stackoverflow.com/questions/287871/print-in-terminal-with-colors\nclass bc:\n\tHEADER = '\\033[95m'\n\tOKBLUE = '\\033[94m'\n\tOKGREEN = '\\033[92m'\n\tWARNING = '\\033[93m'\n\tFAIL = '\\033[91m'\n\tENDC = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\ndef pc(stran, color):\n\tprint(color + stran + bc.ENDC)\n\n\n\npc('login as admin', bc.HEADER)\nlogin = json.loads(api('/user/login', {\"username\":\"admin\", \"password\":\"changeMe!\"}))\npprint.pprint(login)\nadmintok = login['userToken']\n\npc('create a user', bc.HEADER)\ncret = json.loads(api('/user/create', {\"userToken\":admintok, \"username\":username, \"password\":\"virtuerestdefaultpw\"}))\npprint.pprint(cret)\n\npc('create an app for xterm-advanced', bc.HEADER)\ngoodrole = json.loads(api('/application/create', {\"userToken\":admintok, \"applicationID\":\"xterm-advanced\", \"install\":\"xterm nano wget less ftp telnet netcat iputils-ping\", \"type\":\"linux\", \"launchCmd\":\"xterm -e bash\"}))\npprint.pprint(goodrole)\n\npc('create a role for linuxrouteradmin', bc.HEADER)\ngoodrole = json.loads(api('/role/create', {\"userToken\":admintok, \"roleID\":\"linuxrouteradmin\", \"applicationIDs\":[\"xterm-advanced\"], \"mounts\":\"user, persistent\"}))\npprint.pprint(goodrole)\n\npc('ok, admin will add rolo', bc.HEADER)\nbadv = json.loads(api('/user/role/authorize', {\"userToken\":admintok, \"username\":username, \"roleID\":\"linuxrouteradmin\"}))\npprint.pprint(badv)\n\n\n\npc('lets wait until the role is ready', bc.HEADER)\n\n\nwhile True:\n\tbadv = json.loads(api('/role/get', {\"userToken\":admintok, \"roleID\":\"linuxrouteradmin\"}))\n\tpprint.pprint(badv)\n\tif 'status' in badv and badv['status'] != 'creating' and badv['status'] != 'uploading' and badv['status'] != 'bundling':\n\t\tbreak\n\ttime.sleep(1)\n\npc('launch it', bc.HEADER)\nbadv = json.loads(api('/virtue/create', {\"userToken\":admintok, \"userID\":username, \"roleID\":\"linuxrouteradmin\"}))\npprint.pprint(badv)\n\n\n'''\n'''\n","sub_path":"linux/rest/demousefules/linrouter.py","file_name":"linrouter.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"521870895","text":"from shutil import move\r\nfrom turtle import Turtle, Screen\r\n\r\ntim = Turtle()\r\nscreen = Screen() # control window\r\n\r\ndef move_forward():\r\n tim.forward(10)\r\n\r\ndef move_backward():\r\n tim.backward(10)\r\n\r\ndef turn_left():\r\n temp_heading = tim.heading() + 10\r\n tim.setheading(temp_heading)\r\n\r\ndef turn_right():\r\n temp_heading = tim.heading() - 10\r\n tim.setheading(temp_heading)\r\n\r\ndef clear():\r\n tim.clear()\r\n tim.penup()\r\n tim.home()\r\n tim.pendown()\r\n\r\nscreen.listen() # start listening\r\nscreen.onkey(move_forward, \"w\")\r\nscreen.onkey(move_backward, \"s\")\r\nscreen.onkey(turn_left, \"a\")\r\nscreen.onkey(turn_right, \"d\")\r\nscreen.onkey(clear, \"c\")\r\n\r\nscreen.exitonclick()\r\n\r\n","sub_path":"turtle_package/racing game/turtle_event_handlers.py","file_name":"turtle_event_handlers.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"580076673","text":"from math import sqrt\r\n\r\nusers = {\"Angelica\": {\"Blues Traveler\": 3.5, \"Broken Bells\": 2.0, \"Norah Jones\": 4.5, \"Phoenix\": 5.0, \"Slightly Stoopid\": 1.5, \"The Strokes\": 2.5, \"Vampire Weekend\": 2.0},\r\n \"Bill\":{\"Blues Traveler\": 2.0, \"Broken Bells\": 3.5, \"Deadmau5\": 4.0, \"Phoenix\": 2.0, \"Slightly Stoopid\": 3.5, \"Vampire Weekend\": 3.0},\r\n \"Chan\": {\"Blues Traveler\": 5.0, \"Broken Bells\": 1.0, \"Deadmau5\": 1.0, \"Norah Jones\": 3.0, \"Phoenix\": 5, \"Slightly Stoopid\": 1.0},\r\n \"Dan\": {\"Blues Traveler\": 3.0, \"Broken Bells\": 4.0, \"Deadmau5\": 4.5, \"Phoenix\": 3.0, \"Slightly Stoopid\": 4.5, \"The Strokes\": 4.0, \"Vampire Weekend\": 2.0},\r\n \"Hailey\": {\"Broken Bells\": 4.0, \"Deadmau5\": 1.0, \"Norah Jones\": 4.0, \"The Strokes\": 4.0, \"Vampire Weekend\": 1.0},\r\n \"Jordyn\": {\"Broken Bells\": 4.5, \"Deadmau5\": 4.0, \"Norah Jones\": 5.0, \"Phoenix\": 5.0, \"Slightly Stoopid\": 4.5, \"The Strokes\": 4.0, \"Vampire Weekend\": 4.0},\r\n \"Sam\": {\"Blues Traveler\": 5.0, \"Broken Bells\": 2.0, \"Norah Jones\": 3.0, \"Phoenix\": 5.0, \"Slightly Stoopid\": 4.0, \"The Strokes\": 5.0},\r\n \"Veronica\": {\"Blues Traveler\": 3.0, \"Norah Jones\": 5.0, \"Phoenix\": 4.0, \"Slightly Stoopid\": 2.5, \"The Strokes\": 3.0}\r\n }\r\n\r\ndef ManhattanDistance(rating1, rating2):\r\n distance=0\r\n commonRatings = False\r\n for key in rating1:\r\n if key in rating2:\r\n distance += abs(rating1[key] - rating2[key])\r\n commonRatings = True\r\n if commonRatings:\r\n return distance\r\n else:\r\n return -1\r\n\r\ndef EuclideanDistance(rating1, rating2):\r\n distance=0\r\n commonRatings = False\r\n for key in rating1:\r\n if key in rating2:\r\n distance += (rating1[key] - rating2[key])**2\r\n commonRatings = True\r\n if commonRatings:\r\n return (sqrt(distance))\r\n else:\r\n return -1\r\n\r\ndef Generalization(rating1, rating2, r):\r\n distance=0\r\n commonRatings= False\r\n for key in rating1:\r\n if key in rating2:\r\n distance += (abs(rating1[key]-rating2[key]))**r\r\n commonRatings=True\r\n if commonRatings:\r\n return (distance**(1.0/r))\r\n else:\r\n return -1\r\n\r\ndef computeNearestNeighbor(username, users, typeDist):\r\n distances = []\r\n for user in users:\r\n if user != username:\r\n distance=Generalization(users[user], users[username],typeDist)\r\n #if (typeDist==1):\r\n # distance = ManhattanDistance(users[user], users[username])\r\n #elif (typeDist==2):\r\n # distance = EuclideanDistance(users[user], users[username])\r\n distances.append((distance, user))\r\n # sort based on distance -- closest first\r\n distances.sort()\r\n return distances\r\n\r\ndef recommend(username, users,typeDist):\r\n nearest = computeNearestNeighbor(username, users,typeDist)[0][1]\r\n print(nearest)\r\n recommendations = []\r\n neighborRatings = users[nearest]\r\n userRatings = users[username]\r\n for artist in neighborRatings:\r\n if not artist in userRatings:\r\n recommendations.append((artist, neighborRatings[artist]))\r\n return sorted(recommendations, key=lambda artistTuple: artistTuple[1], reverse = True)\r\n\r\n\r\nprint( recommend('Hailey', users,2))\r\n#print(Generalization(users[\"Hailey\"],users[\"Veronica\"],1))\r\n#print( computeNearestNeighbor(\"Hailey\", users,2))\r\n","sub_path":"Capitulo2/LAB/Python/distaRecommend.py","file_name":"distaRecommend.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"73237459","text":"from django.core.management.base import BaseCommand\r\nfrom website.profile import Profile, Founder\r\nfrom django.db import transaction\r\n\r\nclass Command(BaseCommand):\r\n help = \"Set is_filled flag on Profiles and Founders.\"\r\n\r\n def handle(self, *args, **options):\r\n self.stdout.write('Check Founders')\r\n with transaction.atomic():\r\n result = Founder.objects.all()\r\n for item in result:\r\n item.check_is_filled(save=False)\r\n if not item.is_filled:\r\n self.stdout.write(self.style.SUCCESS('id: {} email: {}'.format(item.user.id, item.user.email)))\r\n self.stdout.write(self.style.SUCCESS('Checked \"{}\" Founder Profiles'.format(result.count())))\r\n self.stdout.write('-')\r\n self.stdout.write('Check Profiles')\r\n with transaction.atomic():\r\n result = Profile.objects.all()\r\n for item in result:\r\n item.check_is_filled(save=False)\r\n if not item.is_filled:\r\n self.stdout.write(self.style.SUCCESS('id: {} email: {}'.format(item.user.id, item.user.email)))\r\n self.stdout.write(self.style.SUCCESS('Checked \"{}\" User Profiles'.format(result.count())))\r\n","sub_path":"website/management/commands/check_is_filled.py","file_name":"check_is_filled.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"347243892","text":"#import turtle\n#num_pts= 4\n#for i in range (num_pts):\n # turtle.left(360/num_pts)\n #turtle.forward(100)\n#turtle.mainloop()\n \nimport turtle\nturtle.tracer(1)\nrounds = 10\nsize = 10\nmike = turtle.clone()\nsteve = turtle.clone()\nturtle.bgcolor(\"pink\")\nturtle.hideturtle()\nmike.color(\"gold\")\nsteve.color(\"purple\")\nsteve.goto(5,5)\nwhile True:\n\tmike.forward(size)\n\tmike.left(90)\n\tsteve.forward(-size)\n\tsteve.left(-90)\n\tsize += 10\nturtle.mainloop()\n","sub_path":"turtleshapes.py","file_name":"turtleshapes.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"540625221","text":"import time\nt=time.time()\nn=10\nprime=[True]*(n+1)\nco=[]\nfor i in range(2,n+1):\n if(prime[i]==1):\n for j in range(i*i,n+1,i):\n prime[j]=False\n if(prime[i]):\n co.append(i)\ndef search(x=1,index=0):\n res=1\n for i in range(index,len(co)):\n product=co[i]*x\n if(product>1000000000):\n break\n res+=search(product,i)\n return res\nprint(search()) \nt2=time.time()\nprint(t2-t)\n","sub_path":"problem204.py","file_name":"problem204.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"92778167","text":"from selenium.webdriver.support.wait import WebDriverWait\nfrom common.webdriver_factory import get_driver\nfrom module_05.sauce_func_lib.inventory import get_inventory, InventoryItem\nfrom module_05.sauce_func_lib.login import login\n\nVALID_PRICES = ['$29.99', '$9.99', '$15.99', '$49.99', '$7.99', '$15.99']\n\ndef test_inventory_size():\n driver = get_driver('chrome')\n wait = WebDriverWait(driver, 5)\n driver.get('https://www.saucedemo.com/')\n login(wait, 'standard_user', 'secret_sauce')\n items = get_inventory(wait)\n assert len(items) == 6, 'Inventory should contain 6 items'\n driver.close()\n\ndef test_inventory_price():\n driver = get_driver('chrome')\n wait = WebDriverWait(driver, 5)\n driver.get('https://www.saucedemo.com/')\n login(wait, 'standard_user', 'secret_sauce')\n items = get_inventory(wait)\n for index, item in enumerate(items):\n item: InventoryItem\n assert item.price == VALID_PRICES[index], f'Price for item {index} should be {VALID_PRICES[index]}'\n driver.close()\n\n\n\n","sub_path":"module_05/test_sauce_inventory.py","file_name":"test_sauce_inventory.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"562741499","text":"from magma import *\nfrom magma.bitutils import int2seq\nfrom mantle import *\nfrom rom import ROM8, ROM16\nfrom loam.boards.icestick import IceStick\nfrom uart import UART\n\n\nicestick = IceStick()\nicestick.Clock.on()\nfor i in range(8):\n icestick.J3[i].output().on()\n\nmain = icestick.main()\n\n# \"test\" data\ninit = [uint(n, 16) for n in range(16)]\nprintf = Counter(4, has_ce=True)\nrom = ROM16(4, init, printf.O)\n\n# baud for uart output\nclock = CounterModM(103, 8)\nbaud = clock.COUT\n\nbit_counter = Counter(5, has_ce=True)\nwire(baud, bit_counter.CE)\n\nload = Decode(0, 5)(bit_counter.O)\n\n#valid_counter = CounterModM(4800, 13, has_ce=True)\nvalid_counter = CounterModM(8, 3, has_ce=True)\nwire(load&baud, valid_counter.CE)\n\nvalid_list = [5,7]\n\nvalid = GND\n\nfor i in valid_list:\n\tvalid = valid | Decode(i,3)(valid_counter.O)\n\n#valid = Decode(5,3)(valid_counter.O) | Decode(7,3)(valid_counter.O)\n\nwire(load&baud, printf.CE)\n\npx_val = rom.O\n\nst_in = Register(16, has_ce=True)\nst_in(px_val)\nwire(load, st_in.CE)\n\n#---------------------------STENCILING-----------------------------#\n\nSTEN = DeclareCircuit('STEN', \n\t\t\t\"I_0_0\", In(Array(1, Array(1, Array(16, Bit)))),\n\t\t\t\"O\", Out(Array(16, Bit)),\n\t\t\t\"WE\", In(Bit),\n\t\t\t\"V\", Out(Bit),\n\t\t\t\"CLK\", In(Clock),\n\t\t\t\"CLKOut\", Out(Clock),\n\t\t\t\"L00\", Out(Array(16, Bit)),\n\t\t\t\"L01\", Out(Array(16, Bit)),\n\t\t\t\"L10\", Out(Array(16, Bit)),\n\t\t\t\"L11\", Out(Array(16, Bit)),\n\t\t\t)\n\nstencil = STEN()\n\nwire(st_in.O, stencil.I_0_0[0][0])\nwire(1, stencil.WE)\nwire(load, stencil.CLK)\n\nadd16 = CounterModM(1,16) # needed for Add16 definition\n\n#---------------------------UART OUTPUT-----------------------------#\n\nuart_px = UART(16)\nuart_px(CLK=main.CLKIN, BAUD=baud, DATA=px_val, LOAD=load)\n\nuart_st = UART(16)\nuart_st(CLK=main.CLKIN, BAUD=baud, DATA=stencil.O, LOAD=load)\n\nuart_L00 = UART(16)\nuart_L00(CLK=main.CLKIN, BAUD=baud, DATA=stencil.L00, LOAD=load)\n\nuart_L01 = UART(16)\nuart_L01(CLK=main.CLKIN, BAUD=baud, DATA=stencil.L01, LOAD=load)\n\nuart_L10 = UART(16)\nuart_L10(CLK=main.CLKIN, BAUD=baud, DATA=stencil.L10, LOAD=load)\n\nuart_L11 = UART(16)\nuart_L11(CLK=main.CLKIN, BAUD=baud, DATA=stencil.L11, LOAD=load)\n\nuart_reg = UART(16)\nuart_reg(CLK=main.CLKIN, BAUD=baud, DATA=st_in.O, LOAD=load)\n\nwire(valid, main.J3[0])\nwire(stencil.CLKOut, main.J3[1])\nwire(uart_px.O, main.J3[2]) # change to main.TX to stream to UART\nwire(uart_st.O, main.J3[3]) # change to main.TX to stream to UART\nwire(uart_L00.O, main.J3[4])\nwire(uart_L01.O, main.J3[5])\nwire(uart_L10.O, main.J3[6])\nwire(uart_L11.O, main.J3[7])\n","sub_path":"IceStick/stencil.py","file_name":"stencil.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420597370","text":"# Trap calibration: Bead with stage oscillation\r\n\r\nfrom __future__ import division, print_function, absolute_import\r\nimport Calibration1\r\nimport sys\r\n\r\n\r\nfiles = ['Cal_X', 'Cal_Y']\r\n\r\nf_sample = 10000\r\nf_lp = 10000\r\npower = 100\r\nR = 240\r\nfd = 100\r\nAd = 100\r\nheight = 500\r\n\r\ndef main(): \r\n for i in range(2): \r\n fname = files[i]\r\n axis = fname[-1]\r\n print(fname)\r\n \r\n PZT_A, beta, db, kappa, dk, ratio, dr = Calibration1.main(fname, f_sample, f_lp, R, power, axis, fd, Ad, height)\r\n\r\n\r\n info = open(fname+'.txt', 'w')\r\n\r\n info.write('A_fit = %.1f [nm] \\n' % (abs(PZT_A)))\r\n info.write('beta = %.1f +/- %.1f [nm/V] \\n' %(beta, db)) \r\n info.write('kappa = %.3f +/- %.3f [pN/nm] \\n' %(kappa, dk))\r\n info.write('Stoke ratio = %.1f +/- %.3f \\n\\n' %(ratio, dr)) \r\n\r\n info.close() \r\n \r\nif __name__ == \"__main__\": \r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"scripts/Calibration/Calibration_Bead.py","file_name":"Calibration_Bead.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614969952","text":"import telebot\r\nimport requests\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nbot = telebot.TeleBot('411716032:AAFbAXWyYdURmxP5zCAYrVIXPUIZ0POBVkw')\r\n\r\n\r\ndef parsing(message):\r\n newpers = str(message.text)\r\n n = 0\r\n while True:\r\n source = requests.get(newpers)\r\n source.encoding = 'utf-8'\r\n soup = BeautifulSoup(source.text, 'html.parser')\r\n inf1 = soup.find_all('div')\r\n x = 0\r\n for item in inf1:\r\n if x <= 15:\r\n x += 1\r\n else:\r\n if item.text == 'Online':\r\n n = 1\r\n break\r\n if n == 1:\r\n break\r\n time.sleep(15)\r\n return n\r\n\r\n\r\ndef find_name(message):\r\n newpers = str(message.text)\r\n source = requests.get(newpers)\r\n source.encoding = 'utf-8'\r\n soup = BeautifulSoup(source.text, 'html.parser')\r\n inf1 = soup.find_all('h2')\r\n for item in inf1:\r\n name = item.text\r\n return(name)\r\n\r\n\r\n@bot.message_handler(commands = ['help'])\r\ndef send_welcome(message):\r\n bot.reply_to(message, 'No, you will not get it')\r\n# @bot.message_handler(commands = ['look_for_Tima'])\r\n# def look_for_tima(message):\r\n# message = 'https://vk.com/holyrofl'\r\n# z = parsing(message)\r\n# if z == 1:\r\n# bot.reply_to(message, 'Tima - Online')\r\n@bot.message_handler(regexp=\"https://vk.com/\")\r\ndef mes_http(message):\r\n res = parsing(message)\r\n name = find_name(message)\r\n if res == 1:\r\n bot.reply_to(message, name + ' - Online')\r\nbot.polling()","sub_path":"kontach_new_subline.py","file_name":"kontach_new_subline.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23356676","text":"# -*- coding:utf-8 -*-\nimport torch\nimport os\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformers import BertModel, BertTokenizer, BertConfig\nfrom transformers.modeling_bert import BertEmbeddings, BertEncoder\nimport transformers as tfs\n\nmodel_name = 'bert-base-chinese'\ntokenizer = BertTokenizer.from_pretrained(model_name)\nmodel = BertModel.from_pretrained(model_name)\nconfig = BertConfig.from_pretrained(model_name)\n\n\n\nclass BertTagger(nn.Module):\n def __init__(self, tagset_size=2):\n super(BertTagger, self).__init__()\n model_class, tokenizer_class, pretrained_weights = (tfs.BertModel, tfs.BertTokenizer, 'bert-base-chinese')\n self.tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n # 嵌入层BertEmbeddings().\n self.embeddings = BertEmbeddings(config)\n # 多层(12层)多头自注意力(multi-head self attention)编码层BertEncoder.\n self.encoder = BertEncoder(config)\n self.bert = model_class.from_pretrained(pretrained_weights)\n self.dense = nn.Linear(768, tagset_size) # bert默认的隐藏单元数是768, 输出单元是2,表示二分类\n self.dropout = nn.Dropout(p=0.1) # dropout训练\n\n def forward(self, batch_sentences):\n batch_tokenized = self.tokenizer.batch_encode_plus(batch_sentences, add_special_tokens=True,\n pad_to_max_length=True) # tokenize、add special token、pad\n input_ids = torch.tensor(batch_tokenized['input_ids'])\n attention_mask = torch.tensor(batch_tokenized['attention_mask'])\n bert_output = self.bert(input_ids, attention_mask=attention_mask)\n bert_cls_hidden_state = bert_output[0][:, 0, :] # 提取[CLS]对应的隐藏状态\n dropout_output = self.dropout(bert_cls_hidden_state)\n tag_space = self.dense(dropout_output)\n tag_scores = F.log_softmax(tag_space, dim=2)\n return tag_scores","sub_path":"Automatic-Corpus_Filter_Data_Aug/model/BERT_Tagger.py","file_name":"BERT_Tagger.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348847949","text":"\"\"\"Module that manages the testing states of the access ports\"\"\"\n\nimport threading\n\nfrom forch.utils import get_logger\n\nfrom forch.proto.shared_constants_pb2 import PortBehavior\nfrom forch.proto.devices_state_pb2 import DeviceBehavior, DevicePlacement\nfrom forch.proto.shared_constants_pb2 import DVAState\n\nINVALID_VLAN = 0\n\nSTATE_HANDLERS = {}\n\n\ndef _register_state_handler(state_name):\n def register(func):\n STATE_HANDLERS[state_name] = func\n return func\n return register\n\n\nclass PortStateMachine:\n \"\"\"State machine class that manages testing states of an access port\"\"\"\n\n UNAUTHENTICATED = 'unauthenticated'\n AUTHENTICATED = 'authenticated'\n SEQUESTERED = 'sequestered'\n OPERATIONAL = 'operational'\n INFRACTED = 'infracted'\n\n TRANSITIONS = {\n UNAUTHENTICATED: {\n PortBehavior.cleared: OPERATIONAL,\n PortBehavior.sequestered: SEQUESTERED,\n },\n SEQUESTERED: {\n PortBehavior.passed: OPERATIONAL,\n PortBehavior.failed: INFRACTED,\n PortBehavior.deauthenticated: UNAUTHENTICATED,\n },\n OPERATIONAL: {\n PortBehavior.cleared: OPERATIONAL,\n PortBehavior.deauthenticated: UNAUTHENTICATED,\n },\n }\n\n # pylint: disable=too-many-arguments\n def __init__(self, mac, initial_state, unauthenticated_state_callback, sequester_state_callback,\n operational_state_callback, infracted_state_callback):\n self._mac = mac\n self._current_state = initial_state\n self._unauthenticated_state_callback = unauthenticated_state_callback\n self._sequester_state_callback = sequester_state_callback\n self._operational_state_callback = operational_state_callback\n self._infracted_state_callback = infracted_state_callback\n self._logger = get_logger('portsm')\n\n self._handle_current_state()\n\n def handle_port_behavior(self, port_behavior):\n \"\"\"Handle port behavior\"\"\"\n next_state = self.TRANSITIONS.get(self._current_state, {}).get(port_behavior, {})\n\n if not next_state:\n self._logger.warning(\n 'Cannot find next state for device %s in state %s for port behavior %s',\n self._mac, self._current_state, port_behavior)\n return\n\n self._logger.info(\n 'Device %s is entering %s state from %s state',\n self._mac, next_state, self._current_state)\n\n self._current_state = next_state\n self._handle_current_state()\n\n def get_current_state(self):\n \"\"\"Get current state of the port\"\"\"\n return self._current_state\n\n def _handle_current_state(self):\n if self._current_state in STATE_HANDLERS:\n STATE_HANDLERS[self._current_state](self)\n\n @_register_state_handler(state_name=UNAUTHENTICATED)\n def _handle_unauthenticated_state(self):\n self._logger.info('Handling unauthenticated state for device %s', self._mac)\n self._unauthenticated_state_callback(self._mac)\n\n @_register_state_handler(state_name=SEQUESTERED)\n def _handle_sequestered_state(self):\n self._logger.info('Handling sequestered state for device %s', self._mac)\n self._sequester_state_callback(self._mac)\n\n @_register_state_handler(state_name=OPERATIONAL)\n def _handle_operational_state(self):\n self._logger.info('Handling operational state for device %s', self._mac)\n self._operational_state_callback(self._mac)\n\n @_register_state_handler(state_name=INFRACTED)\n def _handle_infracted_state(self):\n self._logger.info('Handling infracted state for device %s', self._mac)\n self._infracted_state_callback(self._mac)\n\n\nclass PortStateManager:\n \"\"\"Manages the states of the access ports for orchestrated testing\"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(self, device_state_manager=None, varz_updater=None, testing_segment=None):\n self._state_machines = {}\n self._static_port_behaviors = {}\n self._static_device_behaviors = {}\n self._dynamic_device_behaviors = {}\n self._device_state_manager = device_state_manager\n self._varz_updater = varz_updater\n self._placement_to_mac = {}\n self._testing_segment = testing_segment\n self._lock = threading.RLock()\n self._logger = get_logger('portmgr')\n\n def handle_static_device_behavior(self, mac, device_behavior):\n \"\"\"Add static testing state for a device\"\"\"\n with self._lock:\n static_port_behavior = device_behavior.port_behavior\n if static_port_behavior:\n self._static_port_behaviors[mac] = static_port_behavior\n\n if device_behavior.segment:\n self.handle_device_behavior(mac, device_behavior, static=True)\n\n def handle_device_behavior(self, mac, device_behavior, static=False):\n \"\"\"Handle authentication result\"\"\"\n if device_behavior.segment:\n self._handle_authenticated_device(mac, device_behavior, static)\n if static:\n self._update_static_vlan_varz(\n mac, vlan=self._get_vlan_from_segment(device_behavior.segment))\n else:\n self._handle_deauthenticated_device(mac, static)\n\n def handle_device_placement(self, mac, device_placement, static=False):\n \"\"\"Handle a learning or expired VLAN for a device\"\"\"\n if device_placement.connected:\n return self._handle_learned_device(mac, device_placement, static)\n\n return self._handle_disconnected_device(device_placement)\n\n def _handle_learned_device(self, mac, device_placement, static=False):\n # if device is learned\n old_mac = self._placement_to_mac.get((device_placement.switch, device_placement.port))\n stale_mac = old_mac if old_mac and old_mac != mac else None\n\n if stale_mac:\n switch = device_placement.switch\n port = device_placement.port\n self._logger.warning(\n 'Cleaning stale device placement: %s, %s, %s', old_mac, switch, port)\n stale_placement = DevicePlacement(switch=switch, port=port, connected=False)\n self._handle_disconnected_device(stale_placement)\n\n self._placement_to_mac[(device_placement.switch, device_placement.port)] = mac\n self._process_device_placement(mac, device_placement, static=static)\n\n if mac not in self._state_machines:\n self._state_machines[mac] = PortStateMachine(\n mac, PortStateMachine.UNAUTHENTICATED, self._handle_unauthenticated_state,\n self._set_port_sequestered, self._set_port_operational,\n self._handle_infracted_state)\n\n device_behavior = (self._static_device_behaviors.get(mac) or\n self._dynamic_device_behaviors.get(mac))\n if device_behavior:\n static = mac in self._static_device_behaviors\n self.handle_device_behavior(mac, device_behavior, static=static)\n\n return True, None, stale_mac\n\n def _handle_disconnected_device(self, device_placement):\n eth_src = self._placement_to_mac.pop((device_placement.switch, device_placement.port), None)\n\n # Dont propagate removal of placement if not in cache\n if not eth_src:\n return False, None, None\n\n self._process_device_placement(eth_src, device_placement, static=False)\n if eth_src in self._state_machines:\n self._state_machines.pop(eth_src)\n\n self._update_device_state_varz(eth_src, DVAState.initial)\n\n return True, eth_src, None\n\n def _handle_authenticated_device(self, mac, device_behavior, static):\n \"\"\"Initialize or update the state machine for an authenticated device\"\"\"\n if not self._process_device_behavior:\n return\n\n with self._lock:\n device_behaviors = (\n self._static_device_behaviors if static else self._dynamic_device_behaviors)\n device_behaviors.setdefault(mac, DeviceBehavior()).CopyFrom(device_behavior)\n\n static_port_behavior = self._static_port_behaviors.get(mac)\n if not self._testing_segment or static_port_behavior == PortBehavior.cleared:\n port_behavior = PortBehavior.cleared\n else:\n port_behavior = PortBehavior.sequestered\n\n if mac in self._state_machines:\n self._state_machines[mac].handle_port_behavior(port_behavior)\n\n def _handle_deauthenticated_device(self, mac, static):\n \"\"\"Handle an deauthenticated device\"\"\"\n if not self._process_device_behavior:\n return\n\n with self._lock:\n device_behaviors = (\n self._static_device_behaviors if static else self._dynamic_device_behaviors)\n if mac in device_behaviors:\n device_behaviors.pop(mac)\n else:\n self._logger.warning(\n '%s behavior does not exist for %s', 'static' if static else 'dynamic', mac)\n\n # ignore dynamic behavior for device that has static behavior defined\n if not static and mac in self._static_device_behaviors:\n return\n\n if mac in self._state_machines:\n port_behavior = PortBehavior.deauthenticated\n self._state_machines[mac].handle_port_behavior(port_behavior)\n self._process_device_behavior(mac, DeviceBehavior(), static=static)\n\n def handle_testing_result(self, testing_result):\n \"\"\"Update the state machine for a device according to the testing result\"\"\"\n for mac, device_behavior in testing_result.device_mac_behaviors.items():\n self._handle_port_behavior(mac, device_behavior.port_behavior)\n\n def _handle_port_behavior(self, mac, port_behavior):\n with self._lock:\n state_machine = self._state_machines.get(mac)\n if not state_machine:\n self._logger.error(\n 'No state machine defined for device %s before receiving testing result', mac)\n return\n state_machine.handle_port_behavior(port_behavior)\n\n def _handle_unauthenticated_state(self, mac):\n self._update_device_state_varz(mac, DVAState.unauthenticated)\n\n def _set_port_sequestered(self, mac):\n \"\"\"Set port to sequester vlan\"\"\"\n if not self._process_device_behavior:\n return\n device_behavior = DeviceBehavior(segment=self._testing_segment)\n self._process_device_behavior(mac, device_behavior, static=False)\n self._update_device_state_varz(mac, DVAState.sequestered)\n\n def _set_port_operational(self, mac):\n \"\"\"Set port to operation vlan\"\"\"\n if not self._process_device_behavior:\n return\n\n static = mac in self._static_device_behaviors\n device_behavior = (\n self._static_device_behaviors.get(mac) or self._dynamic_device_behaviors.get(mac))\n assert device_behavior\n\n self._process_device_behavior(mac, device_behavior, static=static)\n self._update_device_state_varz(mac, DVAState.static if static else DVAState.operational)\n\n def _handle_infracted_state(self, mac):\n static = mac in self._static_device_behaviors\n self._process_device_behavior(mac, DeviceBehavior(), static=static)\n self._update_device_state_varz(mac, DVAState.infracted)\n\n def clear_static_device_behaviors(self):\n \"\"\"Remove all static device behaviors\"\"\"\n with self._lock:\n macs = list(self._static_device_behaviors.keys())\n for mac in macs:\n self._update_static_vlan_varz(mac, INVALID_VLAN)\n self._handle_deauthenticated_device(mac, static=True)\n\n def _process_device_placement(self, mac, device_placement, static=False):\n if self._device_state_manager:\n self._device_state_manager.process_device_placement(mac, device_placement, static)\n\n def _process_device_behavior(self, mac, device_behavior, static=False):\n if self._device_state_manager:\n self._device_state_manager.process_device_behavior(mac, device_behavior, static)\n\n def _get_vlan_from_segment(self, segment):\n if self._device_state_manager:\n return self._device_state_manager.get_vlan_from_segment(segment)\n return None\n\n def _update_device_state_varz(self, mac, device_state):\n if self._varz_updater:\n self._varz_updater.update_device_state_varz(mac, device_state)\n\n def _update_static_vlan_varz(self, mac, vlan):\n if self._varz_updater:\n self._varz_updater.update_static_vlan_varz(mac, vlan)\n","sub_path":"forch/port_state_manager.py","file_name":"port_state_manager.py","file_ext":"py","file_size_in_byte":12719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221232110","text":"list = [] #빈 리스트 선언\nnum = 0 #변수 선언\n\nfor i in range(5): #5X5 이차원 리스트 생성\n e = []\n for j in range(5):\n e.append(num) #num을 1씩 증가시켜 e에 추가\n num += 1 \n list.append(e) #list에 e 추가\n i += 5 #e리스트 입력마다 i 5씩 증가\n# print(list)\n\nfor k in list: #list만큼 반복\n print(k[1:4:2]) #1부터 3까지 2씩 증가하여 출력\n\n# for k in range(5):\n# print(list[k][1:4:2])\n\n# for k in range(5):\n# print(list[k][1], list[k][3]) #리스트 형식이 아닌 값만 출력","sub_path":"19_허유빈/session04/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"301084680","text":"import datetime\nimport logging\nimport sys\n\nfrom billing import constants\nfrom billing import plugins\n\nLOG = logging.getLogger(__name__)\n\nTIME_START = datetime.datetime(1970, 1, 1)\n\n\nclass BucketPlugin(plugins.BasePlugin):\n order = constants.BUCKET_ORDER\n\n def __init__(self, conf, *arg, **kwargs):\n self.bucket_size = conf['plugins']['bucket'].get('size', 180)\n\n def process_line(self, buckets, xfer):\n # Will send samples to Ceilometer for at least every 'bucket.size'\n # seconds.\n epoch = int((xfer['time'] - TIME_START).total_seconds())\n bucket = epoch - (epoch % self.bucket_size)\n\n LOG.debug('bucket: %s', bucket)\n\n tenant_id = xfer['tenant_id']\n container = xfer['container']\n billing = xfer['billing']\n\n buckets.setdefault(bucket, {tenant_id: {}})\n buckets[bucket].setdefault(tenant_id, {container: {}})\n buckets[bucket][tenant_id].setdefault(container, {billing: {}})\n\n for key in ['bytes_in', 'bytes_out']:\n buckets[bucket][tenant_id][container].setdefault(billing, {key: 0})\n buckets[bucket][tenant_id][container][billing].setdefault(key, 0)\n buckets[bucket][tenant_id][container][billing][key] += xfer[key]\n","sub_path":"billing/plugins/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85385204","text":"from sklearn.preprocessing import KBinsDiscretizer\nfrom sklearn.mixture import GaussianMixture, BayesianGaussianMixture\nimport numpy as np\n\nfrom ..utils import CATEGORICAL, ORDINAL, CONTINUOUS\n\nclass DiscretizeTransformer(object):\n \"\"\"Discretize continuous columns into several bins.\n Transformation result is a int array.\"\"\"\n def __init__(self, meta, n_bins):\n self.meta = meta\n self.c_index = [id for id, info in enumerate(meta) if info['type'] == CONTINUOUS]\n self.kbin_discretizer = KBinsDiscretizer(n_bins=n_bins, encode='ordinal', strategy='uniform')\n\n def fit(self, data):\n if self.c_index == []:\n return\n self.kbin_discretizer.fit(data[:, self.c_index])\n\n def transform(self, data):\n if self.c_index == []:\n return data.astype('int')\n\n data_t = data.copy()\n data_t[:, self.c_index] = self.kbin_discretizer.transform(data[:, self.c_index])\n return data_t.astype('int')\n\n def inverse_transform(self, data):\n if self.c_index == []:\n return data\n\n data_t = data.copy().astype('float32')\n data_t[:, self.c_index] = self.kbin_discretizer.inverse_transform(data[:, self.c_index])\n return data_t\n\nclass GeneralTransformer(object):\n \"\"\"\n Continuous and ordinal columns are normalized to [0, 1].\n Discrete columns are converted to a one-hot vector.\n \"\"\"\n def __init__(self, meta, act='sigmoid'):\n self.act = act\n self.meta = meta\n self.output_dim = 0\n for info in self.meta:\n if info['type'] in [CONTINUOUS, ORDINAL]:\n self.output_dim += 1\n else:\n self.output_dim += info['size']\n\n def fit(self, data):\n pass\n\n def transform(self, data):\n data_t = []\n self.output_info = []\n for id_, info in enumerate(self.meta):\n col = data[:, id_]\n if info['type'] == CONTINUOUS:\n col = (col - (info['min'])) / (info['max'] - info['min'])\n if self.act == 'tanh':\n col = col * 2 - 1\n data_t.append(col.reshape([-1, 1]))\n self.output_info.append((1, self.act))\n elif info['type'] == ORDINAL:\n col = col / info['size']\n if self.act == 'tanh':\n col = col * 2 - 1\n data_t.append(col.reshape([-1, 1]))\n self.output_info.append((1, self.act))\n else:\n col_t = np.zeros([len(data), info['size']])\n col_t[np.arange(len(data)), col.astype('int32')] = 1\n data_t.append(col_t)\n self.output_info.append((info['size'], 'softmax'))\n return np.concatenate(data_t, axis=1)\n\n def inverse_transform(self, data):\n data_t = np.zeros([len(data), len(self.meta)])\n\n data = data.copy()\n for id_, info in enumerate(self.meta):\n if info['type'] == CONTINUOUS:\n current = data[:, 0]\n data = data[:, 1:]\n\n\n if self.act == 'tanh':\n current = (current + 1) / 2\n current = np.clip(current, 0, 1)\n data_t[:, id_] = current * (info['max'] - info['min']) + info['min']\n\n elif info['type'] == ORDINAL:\n current = data[:, 0]\n data = data[:, 1:]\n if self.act == 'tanh':\n current = (current + 1) / 2\n current = current * info['size']\n current = np.round(current).clip(0, info['size'] - 1)\n data_t[:, id_] = current\n else:\n current = data[:, :info['size']]\n data = data[:, info['size']:]\n data_t[:, id_] = np.argmax(current, axis=1)\n\n return data_t\n\nclass GMMTransformer(object):\n \"\"\"\n Continuous columns are modeled with a GMM.\n and then normalized to a scalor [0, 1] and a n_cluster dimensional vector.\n\n Discrete and ordinal columns are converted to a one-hot vector.\n \"\"\"\n\n def __init__(self, meta, n_clusters=5):\n self.meta = meta\n self.n_clusters = n_clusters\n\n def fit(self, data):\n model = []\n\n self.output_info = []\n self.output_dim = 0\n for id_, info in enumerate(self.meta):\n if info['type'] == CONTINUOUS:\n gm = GaussianMixture(self.n_clusters)\n gm.fit(data[:, id_].reshape([-1, 1]))\n model.append(gm)\n self.output_info += [(1, 'tanh'), (self.n_clusters, 'softmax')]\n self.output_dim += 1 + self.n_clusters\n else:\n model.append(None)\n self.output_info += [(info['size'], 'softmax')]\n self.output_dim += info['size']\n\n self.model = model\n\n def transform(self, data):\n values = []\n for id_, info in enumerate(self.meta):\n current = data[:, id_]\n if info['type'] == CONTINUOUS:\n current = current.reshape([-1, 1])\n\n means = self.model[id_].means_.reshape((1, self.n_clusters))\n stds = np.sqrt(self.model[id_].covariances_).reshape((1, self.n_clusters))\n features = (current - means) / (2 * stds)\n\n probs = self.model[id_].predict_proba(current.reshape([-1, 1]))\n argmax = np.argmax(probs, axis=1)\n idx = np.arange((len(features)))\n features = features[idx, argmax].reshape([-1, 1])\n\n features = np.clip(features, -.99, .99)\n\n values += [features, probs]\n else:\n col_t = np.zeros([len(data), info['size']])\n col_t[np.arange(len(data)), current.astype('int32')] = 1\n values.append(col_t)\n\n return np.concatenate(values, axis=1)\n\n def inverse_transform(self, data, sigmas):\n data_t = np.zeros([len(data), len(self.meta)])\n\n st = 0\n for id_, info in enumerate(self.meta):\n if info['type'] == CONTINUOUS:\n u = data[:, st]\n v = data[:, st+1:st+1+self.n_clusters]\n if sigmas is not None:\n sig = sigmas[st]\n u = np.random.normal(u, sig)\n u = np.clip(u, -1, 1)\n st += 1 + self.n_clusters\n means = self.model[id_].means_.reshape([-1])\n stds = np.sqrt(self.model[id_].covariances_).reshape([-1])\n p_argmax = np.argmax(v, axis=1)\n std_t = stds[p_argmax]\n mean_t = means[p_argmax]\n tmp = u * 2 * std_t + mean_t\n data_t[:, id_] = tmp\n else:\n current = data[:, st:st+info['size']]\n st += info['size']\n data_t[:, id_] = np.argmax(current, axis=1)\n return data_t\n\nclass BGMTransformer(object):\n \"\"\"\n Continuous columns are modeled with a BayesianGMM.\n and then normalized to a scalor [0, 1] and a vector.\n\n Discrete and ordinal columns are converted to a one-hot vector.\n \"\"\"\n\n def __init__(self, meta, n_clusters=10, eps=0.005):\n \"\"\"n_cluster is the upper bound of modes\n \"\"\"\n self.meta = meta\n self.n_clusters = n_clusters\n self.eps = eps\n\n def fit(self, data):\n model = []\n\n self.output_info = []\n self.output_dim = 0\n self.components = []\n for id_, info in enumerate(self.meta):\n if info['type'] == CONTINUOUS:\n gm = BayesianGaussianMixture(self.n_clusters,\n weight_concentration_prior_type='dirichlet_process',\n weight_concentration_prior = 0.001,\n n_init=1)\n gm.fit(data[:, id_].reshape([-1, 1]))\n model.append(gm)\n comp = gm.weights_ > self.eps\n self.components.append(comp)\n print(np.sum(comp))\n self.output_info += [(1, 'tanh'), (np.sum(comp), 'softmax')]\n self.output_dim += 1 + np.sum(comp)\n else:\n model.append(None)\n self.components.append(None)\n self.output_info += [(info['size'], 'softmax')]\n self.output_dim += info['size']\n\n self.model = model\n\n def transform(self, data):\n values = []\n for id_, info in enumerate(self.meta):\n current = data[:, id_]\n if info['type'] == CONTINUOUS:\n current = current.reshape([-1, 1])\n\n means = self.model[id_].means_.reshape((1, self.n_clusters))\n stds = np.sqrt(self.model[id_].covariances_).reshape((1, self.n_clusters))\n features = (current - means) / (4 * stds)\n\n probs = self.model[id_].predict_proba(current.reshape([-1, 1]))\n\n n_opts = sum(self.components[id_])\n features = features[:, self.components[id_]]\n probs = probs[:, self.components[id_]]\n\n opt_sel = np.zeros(len(data), dtype='int')\n for i in range(len(data)):\n pp = probs[i] + 1e-6\n pp = pp / sum(pp)\n opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)\n\n idx = np.arange((len(features)))\n features = features[idx, opt_sel].reshape([-1, 1])\n features = np.clip(features, -.99, .99)\n\n probs_onehot = np.zeros_like(probs)\n probs_onehot[np.arange(len(probs)), opt_sel] = 1\n values += [features, probs_onehot]\n else:\n col_t = np.zeros([len(data), info['size']])\n col_t[np.arange(len(data)), current.astype('int32')] = 1\n values.append(col_t)\n\n return np.concatenate(values, axis=1)\n\n def inverse_transform(self, data, sigmas):\n data_t = np.zeros([len(data), len(self.meta)])\n\n st = 0\n for id_, info in enumerate(self.meta):\n if info['type'] == CONTINUOUS:\n u = data[:, st]\n v = data[:, st+1:st+1+np.sum(self.components[id_])]\n if sigmas is not None:\n sig = sigmas[st]\n u = np.random.normal(u, sig)\n u = np.clip(u, -1, 1)\n v_t = np.ones((data.shape[0], self.n_clusters)) * -100\n v_t[:, self.components[id_]] = v\n v = v_t\n st += 1 + np.sum(self.components[id_])\n means = self.model[id_].means_.reshape([-1])\n stds = np.sqrt(self.model[id_].covariances_).reshape([-1])\n p_argmax = np.argmax(v, axis=1)\n std_t = stds[p_argmax]\n mean_t = means[p_argmax]\n tmp = u * 4 * std_t + mean_t\n data_t[:, id_] = tmp\n else:\n current = data[:, st:st+info['size']]\n st += info['size']\n data_t[:, id_] = np.argmax(current, axis=1)\n return data_t\n","sub_path":"synthetic_data_benchmark/synthesizer/synthesizer_utils.py","file_name":"synthesizer_utils.py","file_ext":"py","file_size_in_byte":11153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599042920","text":"#!/usr/bin/env python3\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\")) # directory of module fea_util\nfrom contextlib import contextmanager\n\nimport msprime, pyslim\nimport numpy as np\nimport tszip\n\nfrom feature_util import run_RELATE\n\nhelpMsg = '''\n usage: $./slimSims2ts.py <.param file> \n Take slim simulation output, run RELATE and save both true and inferred tree sequence file for each sim\n and should be directory names!\n'''\n\ndef get_site_ppos(ts):\n var_ppos_ls = []\n prev_pos = 0\n for site in ts.sites():\n site_pos = int(site.position)\n if site_pos <= prev_pos:\n if prev_pos == 49999:\n var_ppos_ls.append(-1) # flag indicating this site should be removed\n continue\n else:\n site_pos = prev_pos + 1\n var_ppos_ls.append(site_pos)\n prev_pos = site_pos\n return np.array(var_ppos_ls)\n\ndef sim2ts(ts_mut, mu, rho_cMpMb, N0):\n\n # ts_samp = pyslim.load(sim_path)\n # ts_mut = msprime.mutate(ts_samp, rate=scaled_mu, keep=True) # random_seed=958,\n\n ppos_ls = get_site_ppos(ts_mut)\n GTM = ts_mut.genotype_matrix()\n \n mask = (ppos_ls != -1)\n ppos_ls = ppos_ls[mask]\n GTM = GTM[mask, :]\n\n ts_inf, _ = run_RELATE(ppos_ls, GTM, str(2*N0), rho_cMpMb=rho_cMpMb, mut_rate=str(mu))\n\n return ts_inf # inferred tree-seqs\n\n# @contextmanager is just an easier way of saying cd = contextmanager(cd)\n@contextmanager\ndef cd(newdir):\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)\n\ndef main(args):\n if len(args) != 8: #7 arguments\n return helpMsg\n\n parent_dir = os.getcwd() # directory where the job is submitted\n param_path = args[1]\n scale = int(args[2])\n no_sims = int(args[3])\n thr = int(args[4]) # should be 1-indexed\n tot_thr = int(args[5])\n inPref = args[6]\n outPref = args[7]\n\n with open(param_path, \"r\") as paramF:\n lines = paramF.readlines()\n scaled_mu = float(lines[0].strip())\n scaled_rho = float(lines[1].strip())\n scaled_N0 = int(lines[-1].strip().split()[1])\n\n mu = scaled_mu/scale\n rho_cMpMb = scaled_rho/scale*100*1e6 # 1cM = 1e-2 crossover\n N0 = scaled_N0*scale\n\n # if mode == 's':\n # meta_data = np.genfromtxt(metaF, usecols=(1, 2, 3, 4), dtype=None)\n # # e.g. `%% 0.100992 2347 0.520421 SLiM_trial_swp/SLiM_trial_swp_4501`\n # no_sims = meta_data.shape[0]\n\n tasks = no_sims//tot_thr\n a_idx = (thr-1)*tasks # inclusive\n if thr == tot_thr:\n b_idx = no_sims # exclusive\n else:\n b_idx = thr*tasks # exclusive\n # indices are 0-based\n\n print(\"Processing: [\", a_idx, b_idx, \")\", flush=True)\n\n wd = outPref+'/'+'RELATE_temp_'+str(thr)\n os.mkdir(wd, 0o755)\n\n # idx_ls = []\n # sc_ls = []\n # onset_ls = []\n # caf_ls = []\n #fv_idx_ls = []\n #cnt = 0\n\n log_f = open(outPref+\"_\"+str(thr)+\".log\", 'a')\n with cd(wd):\n for r_idx in range(a_idx, b_idx):\n # if mode == 'n':\n ID = r_idx + 1 # convert 0-based index to 1-based index\n sim_path = parent_dir+\"/\"+inPref+\"/\"+inPref+\"_\"+str(ID)+\"_samp.trees\"\n # elif mode == 's':\n # ID = int(meta_data[r_idx][3].split(b'_')[-1]) # retrieve 1-based index from meta file\n # sim_path = parent_dir+\"/\"+meta_data[r_idx][3].decode()+\"_samp.trees\"\n\n if not os.path.isfile(sim_path): continue\n # if mode == 's':\n # idx_ls.append(ID)\n # sc_ls.append(meta_data[r_idx][0])\n # onset_ls.append(meta_data[r_idx][1])\n # caf_ls.append(meta_data[r_idx][2])\n\n outFP = parent_dir+\"/\"+outPref+\"/\"+outPref+\"_\"+str(ID)\n if os.path.isfile(outFP+\"_tru.trees\") and os.path.isfile(outFP+\"_inf.trees\"): continue\n\n print(\"Input:\", sim_path, flush=True)\n ts_samp = pyslim.load(sim_path)\n ts_tru = msprime.mutate(ts_samp, rate=scaled_mu, keep=True)\n ts_inf = sim2ts(ts_tru, mu, rho_cMpMb, N0)\n\n ts_tru.dump(outFP+\"_tru.trees\")\n ts_inf.dump(outFP+\"_inf.trees\")\n #tszip.compress(ts_inf, outFP+\"_inf.trees.tsz\")\n print(ID, \"SUCCESS\", file=log_f, flush=True)\n\n # if mode == 's': np.savez_compressed(parent_dir+\"/\"+outPref+\"/\"+outPref+\"_meta_\"+str(thr), \n # idx=idx_ls, sc=sc_ls, onset=onset_ls, caf=caf_ls)\n log_f.close()\n os.rmdir(wd)\n\n return 0\n\nsys.exit(main(sys.argv))\n","sub_path":"sim2args/SLiMsim2arg/slimSims2ts_legacy.py","file_name":"slimSims2ts_legacy.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429341447","text":"from pylab import *\nimport numpy\n\nsavefile1='/home/giulio/RNN/npzs/count/model_count_k_ones_N_100_activ_relu_max_length_144.npz'\nsavefile2='/home/giulio/RNN/npzs/count/model_count_one_hot_N_100_activ_smart_tanh_max_length_144.npz'\n\n\nnpz = numpy.load(savefile1)\nstore_valid1 = npz['valid_error']\n\nnpz = numpy.load(savefile2)\nstore_valid2 = npz['valid_error']\n\n\nstore_valid1=store_valid1[0:11676]\nstore_valid2=store_valid2[0:12976]\n\nt1 = numpy.arange(len(store_valid1))*20\ns1 = store_valid1\nt2 = numpy.arange(len(store_valid2))*20\ns2 = store_valid2\n\n#plot(t, s1,'b.',t,s2,'r.')\n\nline_1, = plot(t1, s1,'b.')\nline_2, = plot(t2,s2,'r.')\nlegend([line_1, line_2],['k ones relu','one hot tanh'],shadow=True, fancybox=True)\n\nxlabel('iteration (n)')\nylabel('validation error (%)')\ngrid(True)\nsavefig(\"test.svg\")\nshow() \n","sub_path":"utils/plot_iter_error.py","file_name":"plot_iter_error.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190200884","text":"#!/usr/bin/env python3\n\n\"\"\"\nSerial Helper for Bobber Project\nSummer 2018\n\"\"\"\n\nimport serial\nimport sys\nfrom datetime import datetime\n\n# this port address is for the serial tx/rx pins on the GPIO header\n# SERIAL_PORT = '/dev/cu.usbmodem1421'\n# be sure to set this to the same rate used on the Arduino\n# SERIAL_RATE = 9600\n\n\ndef get_time():\n return datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3]\n\n\ndef main():\n\n if len(sys.argv)== 4:\n SERIAL_PORT = sys.argv[1]\n BAUD_RATE = int(sys.argv[2])\n FILENAME = sys.argv[3]\n else:\n print(\"No serial port, baud rate, and filename provided\")\n print(\" USAGE: python3 serial-helper.py \")\n print(\"EXAMPLE: python3 serial-helper.py /dev/ttyACM0 9600 /var/www/html/ORP.csv &> /home/pi/ORP.log &\")\n print(\"EXAMPLE: python3 serial-helper.py /dev/ttyACM1 9600 /var/www/html/DO.csv &> /home/pi/DO.log &\")\n print(\"EXAMPLE: python3 serial-helper.py /dev/ttyACM2 9600 /var/www/html/EC.csv &> /home/pi/EC.log &\")\n print(\"EXAMPLE: python3 serial-helper.py /dev/ttyACM3 9600 /var/www/html/PH.csv &> /home/pi/PH.log &\")\n sys.exit(1)\n\n ser = serial.Serial(SERIAL_PORT, BAUD_RATE)\n\n with open(FILENAME, \"a\") as filepointer:\n filepointer.write(\"%s,%s,%s,%s\\r\\n\" % (\"time\", \"sensor\", \"temp\", \"voltage\"))\n\n while True:\n reading = ser.readline().decode('utf-8')\n parsed_line = reading.strip().replace(\" \", \"\").split(\",\")\n\n time, sensor, temp, voltage = parsed_line[0], parsed_line[1], parsed_line[2], parsed_line[3]\n\n with open(FILENAME, \"a\") as filepointer:\n filepointer.write(\"%s,%s,%s,%s\\r\\n\" % (time, sensor, temp, voltage))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"serial-helper.py","file_name":"serial-helper.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475191696","text":"\"\"\"integration_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include('apps.integrate.urls', namespace = \"integrate\")),\n url(r'disappearing_ninjas_assignment/', include('apps.disappearingninjas.urls', namespace = \"disappearingninjas\")),\n url(r'ninja_gold_assignment/', include('apps.ninjagold.urls', namespace = \"ninjagold\")),\n url(r'random_word_assignment/', include('apps.randomword.urls', namespace = \"randomword\")),\n url(r'courses/', include('apps.courses.urls', namespace = \"courses\")),\n url(r'loginregister/', include('apps.loginregister.urls', namespace = \"login_register\"))\n]\n","sub_path":"integration_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"565678113","text":"#import pytest\nimport gecatsim.pyfiles.CommonTools as c\nimport numpy as np\nimport tempfile\nimport os\nimport numpy.matlib as nm\n\n\ndef test_make_col():\n row = 3\n col = 5\n num = 10\n x = np.full((row, col), num)\n x_as_col = c.make_col(x)\n assert len(x_as_col) == row * col\n\n\ndef test_load_C_lib():\n clib = c.load_C_lib()\n assert clib is not None\n print(clib)\n\n\ndef test_path_helper():\n assert c.my_path is not None\n with tempfile.TemporaryDirectory() as tmpdirname:\n with open(os.path.join(tmpdirname, 'temp_phantom.cfg'), 'w') as fp:\n fp.write('Hello world!')\n c.my_path.add_search_path(tmpdirname)\n found_path = c.my_path.find('phantom', 'temp_phantom', '.cfg')\n assert found_path == os.path.join(tmpdirname, 'temp_phantom.cfg')\n\n\ndef test_vector_norm():\n bad_vector = np.zeros([4, 1], dtype=np.single)\n result = c.vectornorm(bad_vector)\n assert result is None\n\n good_vector = np.ones([3, 1], dtype=np.single)\n result = c.vectornorm(good_vector)\n assert result is not None\n print(result)\n assert abs(result[0][0]-1.7320508) < 1e-5\n","sub_path":"gecatsim/tests/test_catsim/test_CommonTools.py","file_name":"test_CommonTools.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"220972717","text":"import json\nimport re\nfrom pprint import pprint\nimport js2py\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\n\n\n# 请求首页,获得第一个wzws_cid\nresp = requests.get(\n url=\"http://wenshu.court.gov.cn/\",\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\"\n },\n # proxies=proxies\n)\nprint(resp.cookies)\nwzws_cid = requests.utils.dict_from_cookiejar(resp.cookies)[\"wzws_cid\"]\n# print(\"wzws_cid:\",wzws_cid)\n\n# 抓取响应中的js代码\nraw_func = re.findall(r'',resp.text,re.DOTALL)[0]\n# print(raw_func)\n\nsub_text = '''aaa=p;return \"'zifuchuan'\"'''\ncourl_func = re.sub('return p',sub_text,raw_func) # 把原文中的return p 替换\n# print(courl_func)\n\ncontext = js2py.EvalJs()\ncontext.execute('var aaa') # 定义个变量获取函数的返回值\ncontext.execute(courl_func) # 执行替换好的函数\nunpacked_cofunc = context.aaa # 拿到函数\n# print(context.aaa)\n\ncode = re.findall(r'(.*)function HXXTTKKLLPPP5',context.aaa)[0]\n# print(code)\n\ncontext.execute(code)\n\njs = '''\nvar cookieString = \"\";\nvar wzwstemplate_result = KTKY2RBD9NHPBCIHV9ZMEQQDARSLVFDU(template.toString());\nconsole.log(cookieString)\nvar confirm = QWERTASDFGXYSF();\nvar wzwschallenge_result = KTKY2RBD9NHPBCIHV9ZMEQQDARSLVFDU(confirm.toString());\nconsole.log(cookieString)\nconsole.log(dynamicurl)\n'''\ncontext.execute(js)\n\nnew_cookies = {\n \"wzws_cid\":wzws_cid,\n \"wzwstemplate\":context.wzwstemplate_result,\n \"wzwschallenge\":context.wzwschallenge_result\n}\n# print(\"new_cookies:\",new_cookies)\n\nnew_url = \"http://wenshu.court.gov.cn\" + context.dynamicurl\n# print(\"new_url:\",new_url)\nresp = requests.get(\n url=new_url,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"Referer\":\"http://wenshu.court.gov.cn/\"\n },\n cookies=new_cookies,\n allow_redirects=False,\n # proxies=proxies\n)\n\nwzws_cid = requests.utils.dict_from_cookiejar(resp.cookies)[\"wzws_cid\"] #获得了新的cid\n# print(\"wzws_cid 计算后的:\",wzws_cid)\n\n# 带着新的cid请求首页\nsession = requests.session()\n\nresp = session.get(\n url=\"http://wenshu.court.gov.cn/\",\n cookies = {\n \"wzws_cid\":wzws_cid\n },\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"Referer\":\"http://wenshu.court.gov.cn/\"\n },\n # proxies=proxies\n)\n\n# resp = session.post(\n# url=\"http://wenshu.court.gov.cn/Index/GetAllCountRefresh?refresh=\", # 获得首页标题\n# headers={\n# \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n# \"Referer\":\"http://wenshu.court.gov.cn/\",\n# \"X-Requested-With\":\"XMLHttpRequest\"\n# }\n# )\n\n# print(resp.text)\n# print(\"*\"*100)\n\ntime.sleep(0.1)\n\n# 请求列表页setcookie\nresp = requests.get(\n url=\"http://wenshu.court.gov.cn/List/List?sorttype=1&conditions=searchWord+1+AJLX++%E6%A1%88%E4%BB%B6%E7%B1%BB%E5%9E%8B:%E5%88%91%E4%BA%8B%E6%A1%88%E4%BB%B6\",\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"Referer\":\"http://wenshu.court.gov.cn/\",\n \"X-Requested-With\":\"XMLHttpRequest\"\n },\n cookies={\n \"wzws_cid\": wzws_cid\n },\n # proxies=proxies\n\n)\n# 从cookie中获取生成加密参数需要的值\nvjkl5 = requests.utils.dict_from_cookiejar(resp.cookies)[\"vjkl5\"]\n# print(\"vjkl5:\",vjkl5)\n\n# 生成加密字符串vl5x和guid\nwith open('第一个.js','r') as f:\n js_content = f.read()\nwith open('md5.js','r') as f:\n js_md5 = f.read()\nwith open('sha1.js','r') as f:\n js_sha1 = f.read()\nwith open('base64.js','r') as f:\n js_base64 = f.read()\nwith open('guid文件.js','r') as f:\n js_guid = f.read()\n\ncontext = js2py.EvalJs()\ncontext.execute(js_md5)\ncontext.execute(js_sha1)\ncontext.execute(js_base64)\ncontext.vjkl5 = vjkl5\ncontext.execute(js_content)\ncontext.execute(js_guid)\n# print('vl5x:',context.result)\n# print('guid:',context.guid)\n\n# 整理参数向列表页发送post请求\ndata = {\n \"Param\":\"案件类型:刑事案件\",\n \"Index\":\"1\",\n \"Page\":\"10\",\n \"Order\":\"法院层级\",\n \"Direction\":\"asc\",\n \"vl5x\":context.result,\n \"number\":\"wens\",\n \"guid\":context.guid\n}\n# print(\"data:\",data)\n\nresp = requests.post(\n url=\"http://wenshu.court.gov.cn/List/ListContent\",\n data=data,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"Referer\":\"http://wenshu.court.gov.cn/List/List?sorttype=1&conditions=searchWord+1+AJLX++%E6%A1%88%E4%BB%B6%E7%B1%BB%E5%9E%8B:%E5%88%91%E4%BA%8B%E6%A1%88%E4%BB%B6\",\n \"X-Requested-With\":\"XMLHttpRequest\"\n },\n cookies={\n \"wzws_cid\": wzws_cid,\n \"vjkl5\":vjkl5\n },\n # proxies=proxies\n)\n\n# 保存获取的列表数据\nwith open('list_data.txt','wb') as f:\n f.write(resp.content)\n\n# 处理一下数据\ncontext.data = resp.text\ncontext.execute('datalist = eval(data)')\n\nwith open('Base_64.js','r',encoding='utf-8') as f:\n context.execute(f.read())\nwith open('rawdeflate.js','r',encoding='utf-8') as f:\n context.execute(f.read())\n\nwith open('pako.js','r',encoding='utf-8') as f:\n context.execute(f.read())\n\ndatalist = json.loads(context.datalist)\n# print(datalist)\nfor row in datalist:\n # pprint(row)\n pass\n\n# RunEval = datalist[0][\"RunEval\"]\n# doc_id =''\n# for item in datalist[2:]:\n# # print(item)\n# doc_id += item[\"文书ID\"]\n# # print(doc_id)\n#\n# data = {\n# 'runEval': RunEval,\n# 'docIds': doc_id\n# }\n#\n# print(data)\n#\n# resp = requests.post(\n# url = 'http://wenku.jwzlai.com/common/decode/docId',\n# headers={\n# \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n# },\n# data = data\n#\n# )\n#\n# print(resp.text)\n\n# 破解情页\ndetail_url1 = 'http://wenshu.court.gov.cn/WZWSREL2NvbnRlbnQvY29udGVudD9Eb2NJRD0xM2Q0YzAxYS0wNzM0LTRlYzEtYmJhYy02NThmOGJiOGVjNjImS2V5V29yZD0='\n\nresp = requests.get(\n url=detail_url1,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n },\n cookies=new_cookies,\n allow_redirects=False,\n # proxies=proxies\n)\n\n# print(resp.cookies)\n# print(resp.headers)\nwzws_cid3 = requests.utils.dict_from_cookiejar(resp.cookies)[\"wzws_cid\"]\nlocation = resp.headers['Location']\n\nprint(wzws_cid3)\nprint(location)\n\nDocID = re.search(r'/content/content\\?DocID=(.*?)&KeyWord=',location).group(1)\nprint(DocID)\n\ndetail_url2 = 'http://wenshu.court.gov.cn/CreateContentJS/CreateContentJS.aspx?DocID='+DocID\nresp = requests.get(\n url=detail_url2,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"Referer\": \"http://wenshu.court.gov.cn/content/content?DocID={}&KeyWord=\".format(DocID)\n },\n\n cookies={\n \"wzws_cid\": wzws_cid3\n },\n # proxies=proxies\n)\n# print(resp.text)\n\nbbb = re.search(r'(.*)var jsonData',resp.text,re.DOTALL).group(1)\n\ncontent_dict = re.search(r'JSON.stringify\\((.*?\\).*?)\\)',bbb,re.DOTALL).group(1)\ncontent_dict = json.loads(content_dict)\npprint(content_dict)\n\ncontent_html = re.search(r'jsonHtmlData = (.*)\\;',bbb,re.DOTALL).group(1)\ncontent_html = json.loads(content_html)\nhtml_raw = re.search(r'\"Html\":\"(.*?)\"',content_html,re.DOTALL).group(1)\n\n\nsoup = BeautifulSoup(html_raw,'lxml')\ntxt_list = soup.select('div')\nfor txt in txt_list:\n print(txt.get_text())\n","sub_path":"pawenshu0.py","file_name":"pawenshu0.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199208550","text":"from common.forms import BootstrapModelForm\n\nfrom .models import COWSurvey\n\n\nclass COWSurveyForm(BootstrapModelForm):\n\n \"\"\" ModelForm for COWSurvey model.\"\"\"\n\n class Meta:\n model = COWSurvey\n fields = ['current_age',\n 'life_expectancy',\n 'current_income',\n 'accumulated_for_retirement',\n 'annual_savings',\n 'growth_rate_during_accumulation',\n 'growth_rate_during_distribution',\n 'desired_retirement_income',\n 'desired_retirement_age',]\n\n def __init__(self, *args, **kwargs):\n NON_DOLLAR_FIELDS = ['current_age',\n 'life_expectancy',\n 'growth_rate_during_accumulation',\n 'growth_rate_during_distribution',\n 'desired_retirement_age']\n\n super(COWSurveyForm, self).__init__(*args, **kwargs)\n for myField in self.fields:\n field = self.fields[myField]\n if myField not in NON_DOLLAR_FIELDS:\n field.widget.attrs['class'] = field.widget.attrs['class'] + ' format-dollar'\n","sub_path":"stratinvnet/referrals/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"590490005","text":"\"\"\"\r\nmiddle 2022-01-17 回溯法-排列问题-有重复元素\r\nhttps://www.programmercarl.com/0047.%E5%85%A8%E6%8E%92%E5%88%97II.html#_47-%E5%85%A8%E6%8E%92%E5%88%97-ii\r\n排列问题只要树的叶子节点,两个list可以交错插入,所以不需要从start_index开始;\r\n【重点】(必须要有used,因为没有start_index了,所以不可以在用i>start_index来去重)\r\n【重点】有重复元素需要sort;\r\n\"\"\"\r\nclass Solution:\r\n def __init__(self):\r\n self.path = []\r\n self.res = []\r\n\r\n def permuteUnique(self, nums):\r\n if not nums:return self.res\r\n nums.sort()\r\n used = [0]*len(nums)\r\n self.dfs(nums,used)\r\n return self.res\r\n\r\n def dfs(self,nums,used):\r\n # 定义出口\r\n if len(self.path) == len(nums):\r\n self.res.append(self.path[:])\r\n return\r\n\r\n for i in range(len(nums)):\r\n # 如果树层里重复取值,跳过\r\n # // used[i - 1] == 1,说明同⼀树⽀nums[i - 1]使⽤过\r\n # // used[i - 1] == 0,说明同⼀树层nums[i - 1]使⽤过\r\n # // 如果同⼀树层nums[i - 1]使⽤过则直接跳过\r\n if i>0 and nums[i]==nums[i-1] and used[i-1]==0: # i>0对len(nums)==1的很重要\r\n continue\r\n\r\n # 剪枝(如果当前的元素已经被用过,continue)\r\n # 排列问题特有的部分,因为没有start_index来标记元素是否访问\r\n if used[i]==1:\r\n continue\r\n\r\n self.path.append(nums[i])\r\n used[i] = 1\r\n # 树枝递归\r\n self.dfs(nums,used)\r\n self.path.pop() # 回溯\r\n used[i] = 0 # 回溯\r\n\r\n\r\nif __name__ == '__main__':\r\n # nums = [1,1,2]\r\n nums = [1]\r\n print(Solution().permuteUnique(nums))\r\n","sub_path":"05_图dfs与bfs/1_回溯算法/4_排列问题/47-全排列 II.py","file_name":"47-全排列 II.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306255242","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"GHajba\"\n__copyright__ = \"Copyright 2016, JaPy Szoftver Kft\"\n__license__ = 'MIT'\n\nif __name__ == '__main__':\n count = 0\n with open('input3.txt', 'r') as infile:\n t = [[], [], []]\n l = 0\n for line in infile.readlines():\n if l > 0 and l % 3 == 0:\n for i in range(3):\n data = sorted(t[i])\n if data[0] + data[1] > data[2]:\n count += 1\n t = [[], [], []]\n l += 1\n x, y, z = [int(d) for d in line.split()]\n t[0].append(x)\n t[1].append(y)\n t[2].append(z)\n else:\n for i in range(3):\n data = sorted(t[i])\n if data[0] + data[1] > data[2]:\n count += 1\n\n print(count)\n","sub_path":"Day_2/aoc_day3.py","file_name":"aoc_day3.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"514973323","text":"\"\"\"\nПрактическое задание к уроку 1\n1. Поработайте с переменными, создайте несколько, выведите на экран.\nЗапросите у пользователя некоторые числа и строки и сохраните в переменные, затем выведите на экран.\n\"\"\"\n\n\nname = input('Enter your name: ')\nprint(f'Hi, {name}')\nage = int(input('Enter your age: '))\nprint(f'Great, you are only {age} years old.')\n\n\"\"\"\n2. Пользователь вводит время в секундах. \nПереведите время в часы, минуты, секунды и выведите в формате чч:мм:сс. \nИспользуйте форматирование строк.\n\"\"\"\n\nuser_time = int(input('Enter time in seconds: '))\nhour = user_time // 3600\nminute = (user_time - hour * 3600) // 60\nsecond = user_time % 60\n\nprint(f'{user_time} seconds is {hour:02}:{minute:02}:{second:02}.')\n\n\"\"\"\n3. Узнайте у пользователя число n. Найдите сумму чисел n + nn + nnn. Н\nапример, пользователь ввёл число 3. Считаем 3 + 33 + 333 = 369.\n\"\"\"\n\nuser_number = int(input('Enter number between 1 and 9: '))\nuser_number2 = user_number * 10 + user_number\nuser_number3 = user_number * 100 + user_number * 10 + user_number\nresult = user_number + user_number2 + user_number3\nprint(f'Result {user_number} + {user_number2} + {user_number3} = {result}')\n\n\"\"\"\n4. Пользователь вводит целое положительное число. Найдите самую большую цифру в числе. \nДля решения используйте цикл while и арифметические операции.\n\"\"\"\n\nuser_number = input('Enter number: ')\nmax_number = int(user_number[0])\ni = 0\n\nwhile i in range(len(user_number)):\n if max_number < int(user_number[i]):\n max_number = int(user_number[i])\n i += 1\n\nprint(f'Max number in {user_number} = {max_number}.')\n\n\"\"\"\n5. Запросите у пользователя значения выручки и издержек фирмы. \nОпределите, с каким финансовым результатом работает фирма. \nНапример, прибыль — выручка больше издержек, или убыток — издержки больше выручки. Выведите соответствующее сообщение.\n\nЕсли фирма отработала с прибылью, вычислите рентабельность выручки. \nЭто отношение прибыли к выручке. \nДалее запросите численность сотрудников фирмы и определите прибыль фирмы в расчёте на одного сотрудника.\n\n\"\"\"\n\nproceeds = int(input('Введите значение выручки в $ за период январь-сентябрь 2021 года: '))\ncosts = int(input('Введите значение издержек в $ за период январь-сентябрь 2021 года: '))\n\nif proceeds < costs:\n print(f'Вы работаете в убыток. Убыток за январь-сентябрь 2021 года составил {proceeds - costs}$.')\nelif proceeds == costs:\n print(f'Вы работаете в ноль (без прибыли). '\n f'Финансовый результат за январь-сентябрь 2021 года составил {proceeds - costs}$.')\nelse:\n print(f'Вы работаете в прибыль. '\n f'Прибыль за январь-сентябрь 2021 года составила {proceeds - costs}$. '\n f'Рентабельность выручки = {(((proceeds - costs) / proceeds) * 100):.2f}%.')\n count_employee = int(input('Введите количество сотрудников фирмы на 30.09.2021: '))\n print(f'Прибыль фирмы в расчете на 1 сотрудника = {((proceeds - costs) / count_employee):.2f}$.')\n\n\"\"\"\n6. Спортсмен занимается ежедневными пробежками. В первый день его результат составил a километров. \nКаждый день спортсмен увеличивал результат на 10% относительно предыдущего. \nТребуется определить номер дня, на который результат спортсмена составит не менее b километров. \nПрограмма должна принимать значения параметров a и b и выводить одно натуральное число — номер дня.\n\nНапример: a = 2, b = 3.\nРезультат:\n1-й день: 2\n2-й день: 2,2\n3-й день: 2,42\n4-й день: 2,66\n5-й день: 2,93\n6-й день: 3,22\nОтвет: на шестой день спортсмен достиг результата — не менее 3 км.\n\"\"\"\n\ndistance_a = int(input('Введите количество километров пробежки в первый день (a): '))\ndistance_b = int(input('Введите количество километров пробежки, которое хотите достичь (b) (меньше а): '))\n\nresult = 1\nprint(f'1-й день: {distance_a}')\n\nwhile distance_a < distance_b:\n distance_a *= 1.1\n result += 1\n print(f'{result}-й день: {distance_a:.2f}')\n\nprint(f'При увеличении результата на 10% относительно предыдущего ежедневно'\n f' вам понадобится {result} дней тренировок.')\n","sub_path":"PZlesson1.py","file_name":"PZlesson1.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"587078236","text":"# solving an autonomous syste of ODEs using the R4 method.\n# linear system example\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef RK4(f, t_start, x0, t_stop, n):\n h = (t_stop - t_start)/n\n t = [t_start]\n x = [x0]\n tn = t_start\n xn = x0\n\n for k in range(n):\n k1 = f(tn,xn)\n k2 = f(tn + h/2,xn + h*k1/2)\n k3 = f(tn + h/2,xn + h*k2/2)\n k4 = f(tn + h,xn + h*k3)\n tn = tn + h\n xn = xn + h*(k1 + 2*(k2 + k3) + k4)/6\n t.append(tn)\n x.append(xn)\n\n return np.array(t), np.array(x)\n# time parameters\nt_start = 0.0\nt_stop = 10.0\nN = 500\nt_step = (t_stop - t_start)/N\n\n# parameter value\na, b, c, d = -1.0, 4.0, -2.0, -1.0 # asymptotically stable equilibrium\n# starting point\nx10 = 13.5\nx20 = -11.1\n\n# parameter value\n#a, b, c, d = 1.0, 4.0, -2.0, 1.0 # unstable equilibrium\n# starting point\n#x10 = 0.05\n#x20 = -0.05\n\n# rhs of system\nA = np.array([[a,b],\n [c,d]])\ndef f(t,x):\n return A.dot(x)\n\n# Numerical solution by RK4\nt, x = RK4(f, t_start, np.array([x10, x20]), t_stop, N)\n\nx1 = x[:,0] \nx2 = x[:,1]\n \nplt.figure()\nplt.plot(x1, x2, 'r-', [x10], [x20], 'k.')\nplt.xlabel('$x_1$')\nplt.ylabel('$x_2$')\nplt.show()","sub_path":"CSB/EPG2/Code/system_example.py","file_name":"system_example.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49281856","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Steve Iannucci\r\n\"\"\"\r\n\r\n\"\"\" \r\n\timport necessary packages\r\n\"\"\"\r\n\r\nimport pyrealsense2 as rs\r\nimport numpy as np \r\nimport cv2\r\nfrom matplotlib import pyplot as plt \r\nimport math\r\nimport sys\r\nimport platform\r\nimport csv\r\n\r\n\"\"\"\r\n\tdeclare variables\r\n\"\"\"\r\n# Center is aligned tot he LEFT imager (as in its left eye)\r\n# z+ is striaht ahead\r\n# x + is the right\r\n# y + is down\r\n\r\n# Camera Matrix\r\n# [fx 0 cx; 0 fy cy; 0 0 1]\r\n# (fx,fy) are focal point\r\n# (cx,cy) are optical centers\r\n\r\n# picture properties\r\nrows = 360\r\ncolumns = 640\r\nf_rate = 30 \r\n\r\n# colors\r\nblue = (255,0,0)\r\ngreen = (0, 255, 0)\r\nred = (0,0,255)\r\nyellow = (200, 250, 0)\r\n\r\n# set program steps\r\nstep_1 = False\r\nstep_2 = False\r\nstep_3 = False\r\nstep_4 = False\r\n\r\n# variables\r\nklick_counter = 0\r\nix = 0\r\niy = 0\r\nch_poi = np.zeros((2,2)) \t#chosen point\r\nkoord = np.zeros((2,3))\t\t# coordinates\r\n\r\n\"\"\"\r\n\tFUNCTIONS\r\n\"\"\"\r\n\r\ndef get_mouse_position(event, x, y, flags, param):\r\n\tglobal ix, iy, klick_counter\r\n\tif event == cv2.EVENT_LBUTTONDOWN:\r\n\t\tix, iy = x, y\r\n\t\tklick_counter +=1\r\n\t\tprint (\"button clicked\")\r\n\r\ndef good_features_to_track(src_color):\r\n\tgray = cv2.cvtColor(src_color, cv2.COLOR_BGR2GRAY)\r\n\t# finds good, sharp corners witin frame\r\n\tcorners = cv2.goodFeaturesToTrack(gray, 1, 0.01, 10)\r\n\tcorners = np.int0(corners)\r\n\t# saves corners found and outputs them\r\n\tfor i in corners:\r\n\t\tx_corner, y_corner = i.ravel()\r\n\treturn x_corner, y_corner\r\n\r\n\"\"\"\r\nWRITE SYSTEM PROPERTIES\r\n\"\"\"\r\nprint (\"Python Version: \"+sys.version)\r\nprint (\"Windows Version: \"+platform.platform())\r\n\r\n\"\"\"\r\nDEFINE STREAM PROPERTIES\r\n\"\"\"\r\n#streams\r\npipeline = rs.pipeline() \t\t\t\t\t\t\t\t\t\t\t\t\t\t#create pipeline\r\nconfig = rs.config()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#create a configuration\r\n#config.enable_stream(rs.stream.depth, columns, rows, rs.format.z16, f_rate)\t\t#get the depth stream\r\n#config.enable_stream(rs.stream.infrared, 1, columns, rows, rs.format.y8, f_rate) #get left IR streams\r\n#config.enable_stream(rs.stream.infrared, 2, columns, rows, rs.format.y8, f_rate) #get right IR streams\r\n\r\n# start streaming\r\nprofile = config.resolve(pipeline)\r\nprofile = pipeline.start(config)\r\n\r\n# get the depth sensor scale\r\ndepth_sensor = profile.get_device().first_depth_sensor()\r\ndepth_scale = depth_sensor.get_depth_scale()\r\nprint(\"Depth Scale is: \",depth_scale)\r\n\r\n# clipping ditsance is in meters\r\nclippingdistance_m = 1\r\nclippingdistance = clippingdistance_m / depth_scale\r\n\r\n# create an align object\r\nalign_to = rs.stream.color\r\nalign = rs.align(align_to)\r\n\r\n# filename = input(\"Please Enter a Filename: \")\r\n# print(\"File location: C:/Users/Steve/Documents/GitHub/SoftResearch/Data/\" + filename + \".csv\")\r\n# filelocation = \"C:/Users/Steve/Documents/GitHub/SoftResearch/Data/\" + filename + \".csv\"\r\n\r\n\"\"\"\r\nMAIN PROGRAM\r\n\"\"\"\r\n\r\ndef main():\r\n\t# get global variables = variables that can be changed in main function\r\n\tglobal step_1, step_2, step_3, step_4, klick_counter,ch_poi, koord\r\n\t# start the main - try\r\n\ttry:\r\n\t\twhile True:\r\n\r\n\t\t\tframes = pipeline.wait_for_frames()\r\n\t\t\tdepth_frame = frames.get_depth_frame()\r\n\t\t\tcolor_frame = frames.get_color_frame()\r\n\t\t\taligned_frames = align.process(frames)\r\n\t\t\taligned_depth_frame = aligned_frames.get_depth_frame()\r\n\t\t\taligned_color_frame = aligned_frames.get_color_frame()\r\n\r\n\t\t\t# validate frames\r\n\t\t\tif not aligned_depth_frame or not aligned_color_frame:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tir_left_frame = frames.get_infrared_frame(1)\r\n\t\t\tir_right_frame = frames.get_infrared_frame(2)\r\n\t\t\tdepth_image = np.asanyarray(aligned_depth_frame.get_data())\r\n\t\t\tcolor_image = np.asanyarray(aligned_color_frame.get_data())\r\n\r\n\t\t\t# intrinsics and extrinsics\r\n\t\t\tdepth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics\r\n\t\t\tcolor_intrin = color_frame.profile.as_video_stream_profile().intrinsics\r\n\t\t\tdepth_to_color_intrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)\r\n\r\n\t\t\tdelta = 8\r\n\t\t\ttry:\r\n\r\n\t\t\t\t# Hold 'k' to intialize program. Once Test 2 is printed, the users clicks will be saved and registered. \r\n\t\t\t\tif cv2.waitKey(1) == ord('k') or step_1 == True:\r\n\t\t\t\t\tprint(\"Test2\")\r\n\t\t\t\t\tstep_1 = True\r\n\r\n\t\t\t\t\t# On click, get mouse postition and create rois about them. \r\n\t\t\t\t\tcv2.setMouseCallback('Align Example', get_mouse_position)\r\n\t\t\t\t\tcv2.rectangle(color_image, (int(ch_poi[0][0]) - delta, int(ch_poi[0][1]) - delta), (int(ch_poi[0][0]) + delta, int(ch_poi[0][1]) + delta), red, 0)\r\n\t\t\t\t\tcv2.rectangle(color_image, (int(ch_poi[1][0]) - delta, int(ch_poi[1][1]) - delta), (int(ch_poi[1][0]) + delta, int(ch_poi[1][1]) + delta), red, 0)\r\n\r\n\t\t\t\t\t# print ix and iy\r\n\t\t\t\t\tif klick_counter <= 2:\r\n\t\t\t\t\t\tch_poi[klick_counter-1][0] = ix\r\n\t\t\t\t\t\tch_poi[klick_counter-1][1] = iy\r\n\t\t\t\t\t\tif klick_counter == 2:\r\n\t\t\t\t\t\t\tklick_counter = 0\r\n\t\t\t\t\t\t\tstep_1 = False\r\n\t\t\t\t\t\t\tstep_2 = True\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"error at second try\")\r\n\r\n\t\t\ttry:\r\n\t\t\t\tif step_2 == True:\r\n\t\t\t\t\t# pulls a small picture of the overall frame\r\n\t\t\t\t\tcorner_picture_0 = color_image[int(ch_poi[0][1]) - delta: int(ch_poi[0][1]) + delta, int(ch_poi[0][0]) - delta: int(ch_poi[0][0]) + delta]\r\n\t\t\t\t\tcorner_picture_1 = color_image[int(ch_poi[1][1]) - delta: int(ch_poi[1][1]) + delta, int(ch_poi[1][0]) - delta: int(ch_poi[1][0]) + delta]\r\n\r\n\t\t\t\t\t# small frame is pushed to gftt which finds sharp corners to pull data off of\r\n\t\t\t\t\tkoord[0][0], koord[0][1] = good_features_to_track(corner_picture_0)\r\n\t\t\t\t\tkoord[1][0], koord[1][1] = good_features_to_track(corner_picture_1)\r\n\r\n\t\t\t\t\t# edits to create better coordinates\r\n\t\t\t\t\tfor i in range(0,2):\r\n\t\t\t\t\t\tkoord[i][0] = ch_poi[i][0] - delta + koord[i][0]\r\n\t\t\t\t\t\tkoord[i][1] = ch_poi[i][1] - delta + koord[i][1]\r\n\r\n\t\t\t\t\t# updates rectangele roi to green to show that updated\r\n\t\t\t\t\tcv2.rectangle(color_image, (int(koord[0][0]) - delta, int(koord[0][1]) - delta), (int(koord[0][0]) + delta, int(koord[0][1])+delta), green, 0)\r\n\t\t\t\t\tcv2.rectangle(color_image, (int(koord[1][0]) - delta, int(koord[1][1]) - delta), (int(koord[1][0]) + delta, int(koord[1][1])+delta), green, 0)\r\n\r\n\t\t\t\t\tstep_3 = True\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"error at third try\")\r\n\r\n\t\t\ttry:\r\n\t\t\t\tzero = 0\r\n\t\t\t\tnp.float64(zero)\r\n\t\t\t\tif step_3 == True:\r\n\t\t\t\t\tfor i in range(0,2):\r\n\t\t\t\t\t\t# pulls depth from aligned frames\r\n\t\t\t\t\t\tz = aligned_depth_frame.get_distance(int(koord[i][0]), int(koord[i][1]))\r\n\t\t\t\t\t\t# print z as part of koord vector\r\n\t\t\t\t\t\tif z > 0: # all depths from camera are positive\r\n\t\t\t\t\t\t\tkoord[i][2] = z\r\n\t\t\t\t\tif koord[0][2] > 0 and koord[1][2] > 0:\r\n\t\t\t\t\t\tstep_3 = False\r\n\t\t\t\t\t\tstep_4 = True\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"Error at the fourth try\")\r\n\r\n\t\t\ttry:\r\n\t\t\t\tif step_4 == True:\r\n\t\t\t\t\t# depth points are passed through from deprojected fram so all are in proper formatting \r\n\t\t\t\t\tdepth_point_1 = rs.rs2_deproject_pixel_to_point(depth_intrin, [koord[0][0], koord[0][1]], koord[0][2])\r\n\t\t\t\t\tdepth_point_2 = rs.rs2_deproject_pixel_to_point(depth_intrin, [koord[1][0], koord[1][1]], koord[1][2])\r\n\t\t\t\t\tvector_12 = np.zeros((1,3))\r\n\t\t\t\t\tfor i in range(0,3):\r\n\t\t\t\t\t\tvector_12[0][i] = depth_point_1[i] - depth_point_2[i]\r\n\t\t\t\t\tprint (\"the position of point 1 is \" + str(depth_point_1))\r\n\t\t\t\t\tprint (\"the position of point 2 is \" + str(depth_point_2))\r\n\r\n\t\t\t\t\twith open(\"C:/Users/Steve/Documents/GitHub/SoftResearch/Data/zeros.csv\", 'a') as csvfile: \r\n\t\t\t\t\t\tfilewriter = csv.writer(csvfile, delimiter = ',', quoting=csv.QUOTE_NONE, lineterminator = '\\n')\r\n\t\t\t\t\t\tfilewriter.writerow(depth_point_1 + depth_point_2)\r\n\t\t\t\t\t# print (\"the vector between the two points is \" + str(vector_12))\r\n\t\t\t\t\t# dist_12 = math.sqrt(vector_12[0][0]**2 + vector_12[0][1]**2 + vector_12[0][2]**2)\r\n\t\t\t\t\t# dist_12 = dist_12 * 1000\r\n\t\t\t\t\t# dist_12 = int(dist_12)\r\n\t\t\t\t\t# print (\"the 3D distance between points is \" + str(dist_12) + \"mm\")\r\n\t\t\t\t\tstep_4 = False\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"error at fifth try\")\r\n\r\n\t\t\ttry:\r\n\t\t\t\tcv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)\r\n\t\t\t\tcv2.imshow('Align Example', color_image)\r\n\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"error occured during streaming\")\t\r\n\r\n\t\t\tif cv2.waitKey(1) == 27: # when 0. program updates whenever button is pressed. when 1 program closes when escape is held\r\n\t\t\t\tbreak\r\n\texcept:\r\n\t\tprint (\"error occured in first try\")\r\n\tfinally:\r\n\t\tcv2.destroyAllWindows()\r\n\t\tpipeline.stop()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"programs/slinky.py","file_name":"slinky.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76499514","text":"\n##############################################################################\n#\n# Copyright (c) 2003-2018 by The University of Queensland\n# http://www.uq.edu.au\n#\n# Primary Business: Queensland, Australia\n# Licensed under the Apache License, version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Development until 2012 by Earth Systems Science Computational Center (ESSCC)\n# Development 2012-2013 by School of Earth Sciences\n# Development from 2014 by Centre for Geoscience Computing (GeoComp)\n#\n##############################################################################\n\nfrom __future__ import print_function, division\n\n__copyright__=\"\"\"Copyright (c) 2003-2018 by The University of Queensland\nhttp://www.uq.edu.au\nPrimary Business: Queensland, Australia\"\"\"\n__license__=\"\"\"Licensed under the Apache License, version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\"\"\"\n__url__=\"https://launchpad.net/escript-finley\"\n\nfrom esys.escript import *\nfrom esys.escript.linearPDEs import LinearPDE, TransportPDE\nfrom esys.dudley import Rectangle\nfrom esys.weipa import saveVTK\n\n# dom=Rectangle(12,8,l0=1.5)\n# dom=Rectangle(24,16,l0=1.5)\ndom=Rectangle(48,32,l0=1.5)\n# dom=Rectangle(8*48,8*32,l0=1.5)\n# dom=Rectangle(120,80,l0=1.5)\nV=Scalar(1.,Function(dom))*[-1.,0]\nTHETA=0.\nfc=TransportPDE(dom,num_equations=1,theta=THETA)\nfc.setTolerance(1.e-12)\nfc.setValue(M=Scalar(1.,Function(dom)),C=V)\nx=dom.getX()\nx_0=[0.5,0.5]\nsigma=0.075\nu0=1.\nfor i in range(dom.getDim()):\n u0=u0*exp(-(x[i]-x_0[i])**2/sigma**2)\n\nu0=whereNonPositive(abs(x[0]-0.4)-0.2)*whereNonPositive(abs(x[1]-0.5)-0.2)\n# f1=0.5\n# f2=2.\n# u0=f2*clip(x[0]-0.5,0.)-clip(0.5-x[0],0.)*f1+f1*0.5\n# u0=exp(-3*(x[0]-2.)**2)\n# u0=x[0]\nu0/=Lsup(u0)\nc=0\nsaveVTK(\"u.%s.vtu\"%c,u=u0)\nfc.setInitialSolution(u0)\n\nt_end=0.6\ndt=2.49999e-2*0+6.2499999e-02/4\ndt_out=2.49999e-2*0+6.2499999e-02/4\nc_stop=1\nn_out=int(t_end/dt+0.5)\nprint(n_out)\nt=0.\nt_out=0\nc_out=0\nc=0\nprint(t,\": range u\",inf(u0),sup(u0),integrate(u0,Function(dom)))\nwhile t=t_out+dt_out:\n c_out,t_out=c_out+1,t_out+dt_out\n saveVTK(\"u.%s.vtu\"%c_out,u=u)\n print(\"write time step \",c,\"(t=%s) to file u.%s.vtu\"%(t,c_out))\n\nif True:\n pde=LinearPDE(dom)\n pde.setValue(D=1.,C=-THETA*dt*V)\n pde.setTolerance(1e-12)\n t=0.\n t_out=0\n c_out=0\n c=0\n u=u0\n print(t,\": range u2\",inf(u0),sup(u0),integrate(u0,Function(dom)))\n while t=t_out+dt_out:\n c_out,t_out=c_out+1,t_out+dt_out\n saveVTK(\"u2.%s.vtu\"%c_out,u=u)\n print(\"write time step \",c,\"(t=%s) to file u2.%s.vtu\"%(t,c_out))\n","sub_path":"dudley/test/python/tp.py","file_name":"tp.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"644716319","text":"#\r\n# This file is part of the Infinite Improbability Drive.\r\n#\r\n# Copyright (C) 2009 by Jernej Kos \r\n# Copyright (C) 2009 by Anze Vavpetic \r\n#\r\nimport numpy\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nimport ode\r\nimport logging\r\nimport time\r\nimport traceback\r\nimport pyglet.media as Media\r\n\r\n# IID imports\r\nfrom iid.exceptions import *\r\nfrom iid.behaviour import EntityBehaviour\r\nfrom iid.frustum import Frustum\r\nfrom iid.sound import *\r\n\r\n# Logger for this module\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass SceneObject(object):\r\n \"\"\"\r\n Represents an abstract object that can be rendered by the scene.\r\n \"\"\"\r\n objectId = \"\"\r\n scene = None\r\n \r\n # Audio\r\n player = None\r\n \r\n # Object hierarchy\r\n parent = None\r\n children = None\r\n \r\n # Object attributes\r\n visible = False\r\n culled = False\r\n static = True\r\n \r\n # Transformations (when not using a physical body)\r\n coordinates = None\r\n rotation = None\r\n rotationMatrix = None\r\n scaling = None\r\n \r\n def __init__(self, scene, objectId, parent = None):\r\n \"\"\"\r\n Class constructor.\r\n \r\n @param scene: A valid Scene instance\r\n @param objectId: Unique object identifier\r\n @param parent: Parent object if this is a subobject\r\n \"\"\"\r\n self.scene = scene\r\n self.objectId = objectId\r\n self.parent = parent\r\n self.children = {}\r\n \r\n self.coordinates = numpy.zeros(3)\r\n self.rotation = numpy.zeros(3)\r\n self.scaling = numpy.ones(3)\r\n self.__prepareRotationMatrix()\r\n \r\n # Insert us into hierarchy\r\n if self.parent:\r\n self.parent.children[objectId] = self\r\n \r\n # Init audio player\r\n self.player = Media.Player()\r\n \r\n def isChild(self):\r\n \"\"\"\r\n Returns true if this object is not a top-level object, but\r\n a subobject for some other object.\r\n \"\"\"\r\n return self.parent is not None\r\n \r\n def mapCoordsToParent(self, coords):\r\n \"\"\"\r\n Maps coordinates in object coordinate system to world coordinate\r\n system.\r\n \r\n @param coords: A valid numpy array of coordinates\r\n \"\"\"\r\n if not self.isChild():\r\n return coords\r\n \r\n return self.parent.mapCoordsToParent(self.parent.coordinates + coords)\r\n \r\n def setCoordinates(self, x, y, z):\r\n \"\"\"\r\n Changes object's coordinates.\r\n \"\"\"\r\n self.coordinates[0:3] = [x, y, z]\r\n \r\n # Update the sound player's position\r\n self.player.position = [x, y, z]\r\n \r\n def setRotation(self, x, y, z):\r\n \"\"\"\r\n Changes object's rotation.\r\n \"\"\"\r\n self.rotation[0:3] = [x, y, z]\r\n self.__prepareRotationMatrix()\r\n \r\n def setScaling(self, x, y, z):\r\n \"\"\"\r\n Set object's scaling.\r\n \"\"\"\r\n self.scaling[0:3] = [x, y, z]\r\n \r\n def rotateX(self, phi):\r\n \"\"\"\r\n Rotate the object on X-axis.\r\n \r\n @param phi: Rotation angle\r\n \"\"\"\r\n self.rotation[0] = phi\r\n self.__prepareRotationMatrix()\r\n \r\n def rotateY(self, phi):\r\n \"\"\"\r\n Rotate the object on Y-axis.\r\n \r\n @param phi: Rotation angle\r\n \"\"\"\r\n self.rotation[1] = phi\r\n self.__prepareRotationMatrix()\r\n \r\n def rotateZ(self, phi):\r\n \"\"\"\r\n Rotate the object on Z-axis.\r\n \r\n @param phi: Rotation angle\r\n \"\"\"\r\n self.rotation[2] = phi\r\n self.__prepareRotationMatrix()\r\n \r\n def setVisible(self, visible):\r\n \"\"\"\r\n Sets object's visibility.\r\n \"\"\"\r\n self.visible = visible\r\n \r\n def prepare(self):\r\n \"\"\"\r\n Should prepare the object for rendering.\r\n \"\"\"\r\n # Call play on the player to initialize it, otherwise it takes 0.1 sec\r\n # when play is first called and this might cause nasty lag the first time\r\n self.player.play()\r\n \r\n for obj in self.children.values():\r\n obj.prepare()\r\n \r\n def render(self, picking = False):\r\n \"\"\"\r\n Should render the given object.\r\n \"\"\"\r\n for obj in self.children.values():\r\n if obj.visible and not obj.culled:\r\n obj.render(picking)\r\n \r\n def __prepareRotationMatrix(self):\r\n \"\"\"\r\n Prepares the object's rotation matrix.\r\n \"\"\"\r\n x, y, z = self.rotation\r\n \r\n # Calculate the rotation matrix\r\n cx, cy, cz = numpy.cos([numpy.pi * x/180., numpy.pi * y/180., numpy.pi * z/180.])\r\n sx, sy, sz = numpy.sin([numpy.pi * x/180., numpy.pi * y/180., numpy.pi * z/180.])\r\n \r\n self.rotationMatrix = (\r\n cx*cz - sx*cy*sz, -sx*cz - cx*cy*sz, sy*sz,\r\n cx*sz + sx*cy*cz, -sx*sz + cx*cy*cz, -sy*cz,\r\n sx*sy, cx*sy, cy\r\n )\r\n\r\nclass Entity(SceneObject):\r\n \"\"\"\r\n Represents a model that can be drawn by the scene. This entity does\r\n not participate in neither physical simulation nor collision detection.\r\n \"\"\"\r\n model = None\r\n texture = None\r\n shader = None\r\n \r\n # Entity behaviour\r\n behaviour = None\r\n \r\n # Can the object be picked\r\n pickable = False\r\n \r\n # OpenGL list identifier\r\n textureId = None\r\n listId = None\r\n nameId = 0\r\n \r\n def __init__(self, scene, objectId, model, texture, parent = None):\r\n \"\"\"\r\n Class constructor.\r\n \r\n @param scene: A valid Scene instance\r\n @param objectId: Unique object identifier\r\n @param model: A valid model\r\n @param texture: A valid texture\r\n @param parent: Parent object if this is a subobject\r\n \"\"\"\r\n super(Entity, self).__init__(scene, objectId, parent)\r\n self.model = model\r\n self.texture = texture\r\n \r\n def setShader(self, shader):\r\n \"\"\"\r\n Sets a GLSL shader for this entity.\r\n \"\"\"\r\n self.shader = shader\r\n \r\n def prepare(self):\r\n \"\"\"\r\n Prepares the model's vertices/textures so they can be rendered.\r\n \"\"\"\r\n # First prepare all subentities\r\n super(Entity, self).prepare()\r\n \r\n # Prepare this entity texture and model\r\n self.textureId = self.texture.prepare() if self.texture else None\r\n self.listId = self.model.prepare()\r\n \r\n # Prepare object name identifier when the object is marked as pickable\r\n if self.pickable:\r\n self.nameId = self.scene.assignName(self)\r\n \r\n def render(self, picking = False):\r\n \"\"\"\r\n Renders the model by first transforming model coordinates to\r\n scene coordinates and then pushing model's vertices via\r\n calls to OpenGL.\r\n \"\"\"\r\n x, y, z = self.coordinates\r\n R = self.rotationMatrix\r\n M = [\r\n R[0], R[3], R[6], 0.,\r\n R[1], R[4], R[7], 0.,\r\n R[2], R[5], R[8], 0.,\r\n x, y, z, 1.0\r\n ]\r\n \r\n glPushName(self.nameId)\r\n glPushMatrix()\r\n glMultMatrixd(M)\r\n \r\n if self.shader and not picking:\r\n self.shader.activate()\r\n \r\n # Render all sub-entities\r\n super(Entity, self).render(picking)\r\n \r\n # Add texture when requested\r\n if self.textureId is not None and not picking:\r\n glBindTexture(GL_TEXTURE_2D, self.textureId)\r\n \r\n # Execute precompiled OpenGL commands\r\n if self.listId:\r\n glCallList(self.listId)\r\n \r\n if self.textureId is not None:\r\n glBindTexture(GL_TEXTURE_2D, 0)\r\n \r\n if self.shader and not picking:\r\n self.shader.deactivate()\r\n \r\n if self.scene.showBoundingVolumes and (self.scene.showSubentityVolumes or self.parent == None) and not picking:\r\n self.__drawBoundingVolume()\r\n \r\n glPopMatrix()\r\n glPopName()\r\n \r\n def __drawBoundingVolume(self):\r\n \"\"\"\r\n Draws a bounding volume of the model\r\n \"\"\"\r\n if self.scene.showBoundingSpheres:\r\n glMaterialfv(GL_FRONT, GL_EMISSION, (1.0, 0.0, 0.0))\r\n glutWireSphere(self.model.radius, 10, 10)\r\n \r\n if self.scene.showBoundingBoxes:\r\n glMaterialfv(GL_FRONT, GL_EMISSION, (0.0, 1.0, 0.0))\r\n glScalef(*self.model.dimensions)\r\n glutWireCube(1.0)\r\n\r\nclass PhysicalEntity(Entity):\r\n \"\"\"\r\n Represents an entity that follows the laws of simulated\r\n physics.\r\n \"\"\"\r\n body = None\r\n \r\n # Some properties\r\n bounce = 0.2\r\n friction = 5000\r\n \r\n def __init__(self, scene, objectId, model, texture, parent = None):\r\n \"\"\"\r\n Class constructor.\r\n \r\n @param scene: A valid Scene instance\r\n @param objectId: Unique object identifier\r\n @param model: A valid model\r\n @param texture: A valid texture\r\n @param parent: Parent object if this is a subobject\r\n \"\"\"\r\n super(PhysicalEntity, self).__init__(scene, objectId, model, texture, parent)\r\n self.prepare()\r\n self.body = self.preparePhysicalModel()\r\n \r\n def setCoordinates(self, x, y, z):\r\n \"\"\"\r\n Changes object's coordinates.\r\n \"\"\"\r\n super(PhysicalEntity, self).setCoordinates(x, y, z)\r\n self.body.setPosition(self.mapCoordsToParent(self.coordinates))\r\n \r\n def setRotation(self, x, y, z):\r\n \"\"\"\r\n Changes object's rotation.\r\n \"\"\"\r\n super(PhysicalEntity, self).setRotation(x, y, z)\r\n self.body.setRotation(self.rotationMatrix)\r\n \r\n def setVisible(self, visible):\r\n \"\"\"\r\n Sets object's visibility.\r\n \"\"\"\r\n super(PhysicalEntity, self).setVisible(visible)\r\n if visible:\r\n self.body.enable()\r\n else:\r\n self.body.disable()\r\n \r\n def setDensity(self, density):\r\n \"\"\"\r\n Sets entity's density.\r\n \"\"\"\r\n if not isinstance(self.body, ode.Body):\r\n return\r\n \r\n lx, ly, lz = self.model.dimensions\r\n M = self.body.getMass()\r\n M.setBox(density, lx, ly, lz)\r\n self.body.setMass(M)\r\n \r\n def setMass(self, mass):\r\n \"\"\"\r\n Sets entity's mass.\r\n \"\"\"\r\n if not isinstance(self.body, ode.Body):\r\n return\r\n \r\n lx, ly, lz = self.model.dimensions\r\n M = self.body.getMass()\r\n M.setBoxTotal(mass, lx, ly, lz)\r\n self.body.setMass(M)\r\n \r\n def render(self, picking = False):\r\n \"\"\"\r\n Renders the model by first transforming model coordinates to\r\n scene coordinates and then pushing model's vertices via\r\n calls to OpenGL.\r\n \"\"\"\r\n self.updateScenePosition()\r\n super(PhysicalEntity, self).render(picking)\r\n \r\n def updateScenePosition(self):\r\n \"\"\"\r\n Just updates the object's parameters.\r\n \"\"\"\r\n self.coordinates = self.body.getPosition()\r\n self.rotationMatrix = self.body.getRotation()\r\n \r\n # Update the sound player's position\r\n if self.player: \r\n self.player.position = self.coordinates \r\n \r\n def preparePhysicalModel(self):\r\n \"\"\"\r\n Should prepare the physical model. Default just represents all\r\n models as boxes. When using subentities this method should connect\r\n all subentities together via joints when needed.\r\n \r\n @return: A valid ode.Body instance\r\n \"\"\"\r\n lx, ly, lz = self.model.dimensions\r\n body = ode.Body(self.scene.physicalWorld)\r\n M = ode.Mass()\r\n M.setBox(10.0, lx, ly, lz)\r\n body.setMass(M)\r\n \r\n # Create a box geom for collision detection\r\n geom = ode.GeomBox(self.scene.space, lengths = (lx, ly, lz))\r\n geom.sceneObject = self\r\n geom.setBody(body)\r\n \r\n return body\r\n\r\nclass StaticObstacle(PhysicalEntity):\r\n \"\"\"\r\n Represents an entity that participates in collision detection but not\r\n in physical simulation (so it is static).\r\n \"\"\"\r\n def preparePhysicalModel(self):\r\n \"\"\"\r\n This actually returns a geometry object, since methods match.\r\n \"\"\"\r\n lx, ly, lz = self.model.dimensions\r\n geom = ode.GeomBox(self.scene.space, lengths = (lx, ly, lz))\r\n geom.sceneObject = self\r\n \r\n return geom\r\n\r\nclass Camera(SceneObject):\r\n \"\"\"\r\n Represents the camera.\r\n \"\"\"\r\n frustum = None\r\n visible = True\r\n \r\n # At which point is the camera 'looking'\r\n center = numpy.array([0., 0., 0.])\r\n \r\n # Up vector (default)\r\n up = numpy.array([0., 1., 0.])\r\n \r\n # Sound listener instance\r\n listener = None\r\n \r\n def __init__(self, scene, objectId, parent = None):\r\n \"\"\"\r\n Class constructor.\r\n \r\n @param scene: A valid Scene instance\r\n @param objectId: Unique object identifier\r\n @param parent: Parent object if this is a subobject\r\n \"\"\"\r\n super(Camera, self).__init__(scene, objectId, parent)\r\n \r\n # Setup audio listener\r\n self.listener = Media.listener\r\n \r\n def setCoordinates(self, x, y, z):\r\n \"\"\"\r\n Changes camera's coordinates.\r\n \"\"\"\r\n super(Camera, self).setCoordinates(x, y, z)\r\n self.__updateDirection()\r\n \r\n def lookAt(self, x, y, z):\r\n \"\"\"\r\n Look at point (x,y,z) from the current position.\r\n \"\"\"\r\n self.center[0:3] = (x, y, z)\r\n self.__updateDirection()\r\n \r\n def walk(self, distance):\r\n \"\"\"\r\n Move camera forward or backward according to current center point.\r\n \"\"\"\r\n x = self.center - self.coordinates\r\n x = (x / numpy.linalg.norm(x)) * distance\r\n \r\n self.setCoordinates(*self.coordinates + x)\r\n self.lookAt(*self.center + x)\r\n \r\n def setRotation(self, x, y, z):\r\n \"\"\"\r\n Set camera rotation.\r\n \"\"\"\r\n self.rotation[0:3] = (x, y, z)\r\n \r\n # Calculate the rotation matrix\r\n cx, cy, cz = numpy.cos([numpy.pi * x/180., numpy.pi * y/180., numpy.pi * z/180.])\r\n sx, sy, sz = numpy.sin([numpy.pi * x/180., numpy.pi * y/180., numpy.pi * z/180.])\r\n \r\n R = numpy.array([\r\n [cy*cz, -cy*sz, sy ],\r\n [cz*sx*sy + cx*sz, cx*cz - sz*sx*sy, -sx*cy],\r\n [sx*sz - cx*sy*cz, cy*sy*sz + sx*cz, cx*cy ]\r\n ])\r\n \r\n self.up = numpy.dot(R, self.up)\r\n self.center = numpy.dot(R, self.center - self.coordinates) + self.coordinates\r\n self.__updateDirection()\r\n \r\n def setUp(self, x, y, z):\r\n \"\"\"\r\n Define up vector.\r\n \"\"\"\r\n self.up[0:3] = (x, y, z)\r\n self.__updateDirection()\r\n \r\n def setFrustum(self, frustum):\r\n \"\"\"\r\n Set the camera's frustum.\r\n \"\"\"\r\n self.frustum = frustum\r\n self.__updateDirection()\r\n \r\n def __updateDirection(self):\r\n \"\"\"\r\n Updates the frustum each time the camera's\r\n position or orientation changes.\r\n \"\"\"\r\n if self.frustum:\r\n # Update frustum\r\n self.frustum.setCamDef(\r\n self.coordinates,\r\n self.center,\r\n self.up\r\n )\r\n \r\n # Update sound listener orientation\r\n self.listener.position = self.coordinates\r\n \r\n # Direction vector\r\n self.listener.forward_orientation = self.center - self.coordinates \r\n self.listener.up_orientation = self.up\r\n \r\n def render(self):\r\n \"\"\"\r\n Move the camera to proper position.\r\n \"\"\"\r\n glMatrixMode(GL_MODELVIEW)\r\n gluLookAt(\r\n self.coordinates[0], self.coordinates[1], self.coordinates[2],\r\n self.center[0], self.center[1], self.center[2],\r\n self.up[0], self.up[1], self.up[2] \r\n )\r\n\r\nclass Light(SceneObject):\r\n \"\"\"\r\n A light source that can be added to a scene.\r\n \"\"\"\r\n __lightNumber = None\r\n __globalLights = GL_LIGHT0-1\r\n \r\n ambient = (0.2, 0.2, 0.2, 0.0)\r\n diffuse = (0.8, 0.8, 0.8, 0.0)\r\n specular = (1.0, 1.0, 1.0, 0.0)\r\n constantAttenuation = 1.0\r\n linearAttenuation = 0.0\r\n quadraticAttenuation = 0.0\r\n \r\n def __init__(self, scene, objectId):\r\n super(Light, self).__init__(scene, objectId)\r\n Light.__globalLights += 1 # Increment the number of all lights\r\n self.__lightNumber = Light.__globalLights;\r\n \r\n def prepare(self):\r\n \"\"\"\r\n Prepare the lights.\r\n \"\"\"\r\n self.setVisible(True)\r\n \r\n def render(self):\r\n \"\"\"\r\n 'Render' the light.\r\n \"\"\"\r\n glLight(self.__lightNumber, GL_POSITION, self.coordinates)\r\n glLight(self.__lightNumber, GL_AMBIENT, self.ambient)\r\n glLight(self.__lightNumber, GL_DIFFUSE, self.diffuse)\r\n glLight(self.__lightNumber, GL_SPECULAR, self.diffuse)\r\n glLightf(self.__lightNumber, GL_CONSTANT_ATTENUATION , self.constantAttenuation)\r\n glLightf(self.__lightNumber, GL_LINEAR_ATTENUATION , self.linearAttenuation)\r\n glLightf(self.__lightNumber, GL_QUADRATIC_ATTENUATION , self.quadraticAttenuation)\r\n \r\n def setVisible(self, visible):\r\n \"\"\"\r\n Set the light's visibility.\r\n \"\"\"\r\n self.visible = visible\r\n if not visible:\r\n glDisable(self.__lightNumber)\r\n else:\r\n glEnable(self.__lightNumber) \r\n \r\n def setCoordinates(self, x, y, z, a):\r\n \"\"\"\r\n Changes object's coordinates.\r\n \"\"\"\r\n self.coordinates = [x, y, z, a]\r\n \r\n def setAmbient(self, x, y, z, a):\r\n \"\"\"\r\n Change ambient parameters.\r\n \"\"\"\r\n self.ambient = [x, y, z, a]\r\n \r\n def setDiffuse(self, x, y, z, a):\r\n \"\"\"\r\n Change diffuse parameters.\r\n \"\"\"\r\n self.diffuse = [x, y, z, a]\r\n \r\n def setSpecular(self, x, y, z, a):\r\n \"\"\"\r\n Change specular parameters.\r\n \"\"\"\r\n self.specular = [x, y, z, a]\r\n \r\n def setConstantAttenuation(self, constant):\r\n \"\"\"\r\n Set the constant attenuation value.\r\n \"\"\"\r\n self.constantAttenuation = constant\r\n \r\n def setLinearAttenuation(self, linear):\r\n \"\"\"\r\n Set the linear attenuation value.\r\n \"\"\"\r\n self.linearAttenuation = linear\r\n \r\n def setQuadraticAttenuation(self, quadratic):\r\n \"\"\"\r\n Set the quadratic attenuation value.\r\n \"\"\"\r\n self.quadraticAttenuation = quadratic\r\n \r\nclass Scene(object):\r\n \"\"\"\r\n The scene is an object container that renders those objects to\r\n the screen by calling their respective render methods. It has\r\n its own coordinate system that corresponds to OpenGL's internal\r\n coordinate system.\r\n \"\"\"\r\n # Viewpoint information\r\n angle = 0\r\n width = 0\r\n height = 0\r\n nearDistance = 0\r\n farDistance = 0\r\n \r\n objects = None\r\n camera = None\r\n lights = None\r\n frustum = None\r\n \r\n # Object naming (name 0 is reserved)\r\n names = None\r\n lastNameId = 0\r\n\r\n # Entity brain container\r\n behaviours = None\r\n \r\n # Settings\r\n cull = False\r\n showBoundingVolumes = False\r\n showSubentityVolumes = True\r\n showBoundingBoxes = False\r\n showBoundingSpheres = False\r\n \r\n # Physical world\r\n physicalWorld = None\r\n space = None\r\n contactGroup = None\r\n lastTime = 0\r\n \r\n def __init__(self):\r\n \"\"\"\r\n Class constructor.\r\n \"\"\"\r\n self.objects = {}\r\n self.behaviours = {}\r\n self.lights = {}\r\n self.names = {}\r\n \r\n # Create ODE physical world\r\n self.physicalWorld = ode.World()\r\n self.physicalWorld.setGravity((0, -9.81, 0))\r\n self.physicalWorld.setERP(0.8)\r\n self.physicalWorld.setCFM(1E-5)\r\n\r\n self.space = ode.Space()\r\n self.contactGroup = ode.JointGroup()\r\n \r\n def assignName(self, obj):\r\n \"\"\"\r\n Assign a name to an object.\r\n \"\"\"\r\n self.lastNameId += 1\r\n self.names[self.lastNameId] = obj\r\n return self.lastNameId\r\n \r\n def registerObject(self, obj):\r\n \"\"\"\r\n Registers a new object with the scene.\r\n \"\"\"\r\n if isinstance(obj, Camera):\r\n self.camera = obj\r\n return\r\n \r\n if isinstance(obj, Light):\r\n self.lights[obj.objectId] = obj\r\n return\r\n \r\n self.objects[obj.objectId] = obj\r\n \r\n def unregisterObject(self, obj):\r\n \"\"\"\r\n Removes an existing object from the scene.\r\n \"\"\"\r\n if isinstance(obj, Camera):\r\n self.camera = None\r\n return\r\n \r\n if isinstance(obj, Light):\r\n try:\r\n del self.lights[obj.objectId]\r\n except KeyError:\r\n raise SceneObjectNotFound\r\n return\r\n \r\n try:\r\n del self.objects[obj.objectId]\r\n except KeyError:\r\n raise SceneObjectNotFound\r\n \r\n def registerBehaviour(self, behaviour):\r\n \"\"\"\r\n Registres a new entity's behaviour. Entity must previously be a\r\n part of the scene.\r\n \r\n @param behaviour: A valid EntityBehaviour (or subclass) instance\r\n \"\"\"\r\n self.behaviours[behaviour.entity] = behaviour\r\n \r\n def getObjectByName(self, name):\r\n \"\"\"\r\n Returns a top-level object identified by its name.\r\n \"\"\"\r\n return self.objects.get[name]\r\n \r\n def prepare(self):\r\n \"\"\"\r\n Prepare all objects.\r\n \"\"\"\r\n logger.info(\"Preparing scene objects, stand by...\")\r\n \r\n # Prepare frustum and set it to the camera\r\n self.frustum = Frustum(self.angle, float(self.width) / float(self.height), self.nearDistance, self.farDistance)\r\n self.camera.setFrustum(self.frustum)\r\n \r\n # Prepare lights\r\n glEnable(GL_LIGHTING)\r\n for light in self.lights.values():\r\n light.prepare()\r\n \r\n glEnable(GL_TEXTURE_2D)\r\n glShadeModel(GL_SMOOTH)\r\n glClearColor(0, 0, 0, 0)\r\n glClearDepth(1)\r\n glEnable(GL_DEPTH_TEST)\r\n glDepthFunc(GL_LEQUAL)\r\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)\r\n \r\n glViewport(0, 0, self.width, self.height)\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluPerspective(self.angle, float(self.width) / float(self.height), self.nearDistance, self.farDistance)\r\n \r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n \r\n # Prepare all objects that have been loaded into the scene\r\n for obj in self.objects.values():\r\n try:\r\n obj.prepare()\r\n \r\n # Prepare entity's brain if it has one\r\n if 'behaviour' in obj.__dict__ and obj.behaviour is not None:\r\n obj.behaviour.prepare()\r\n except:\r\n logger.error(\"Unhandled exception while preparing an object!\")\r\n logger.error(traceback.format_exc())\r\n sys.exit(1)\r\n \r\n logger.info(\"Scene preparation completed! Let's render some stuff.\")\r\n \r\n def pick(self, x, y):\r\n \"\"\"\r\n Returns the object that is under the specified window coordinates.\r\n \"\"\"\r\n y = self.height - y\r\n \r\n # Get viewport information\r\n view = glGetIntegerv(GL_VIEWPORT)\r\n \r\n # Prepare the name buffer\r\n glSelectBuffer(64)\r\n glRenderMode(GL_SELECT)\r\n glInitNames()\r\n \r\n # Modify the projection so we only render one pixel around the cursor\r\n glMatrixMode(GL_PROJECTION)\r\n glPushMatrix()\r\n glLoadIdentity()\r\n \r\n # Perform picking\r\n gluPickMatrix(x, y, 1.0, 1.0, view)\r\n gluPerspective(self.angle, float(self.width) / float(self.height), self.nearDistance, self.farDistance)\r\n glMatrixMode(GL_MODELVIEW)\r\n \r\n # Render the scene as usual\r\n glDisable(GL_LIGHTING)\r\n glDisable(GL_TEXTURE_2D)\r\n self.render(picking = True)\r\n glEnable(GL_LIGHTING)\r\n glEnable(GL_TEXTURE_2D)\r\n \r\n # Restore projection\r\n glMatrixMode(GL_PROJECTION)\r\n glPopMatrix()\r\n glMatrixMode(GL_MODELVIEW)\r\n \r\n # Get the closest object\r\n closest = (None, None)\r\n selection = glRenderMode(GL_RENDER)\r\n \r\n for minZ, maxZ, names in selection:\r\n if not closest[0] or minZ < closest[0]:\r\n closest = (minZ, names)\r\n \r\n # Return the first name that matches, starting with subentity that has the\r\n # most depth (otherwise subentities would never be matched)\r\n if closest[1] is not None:\r\n for name in closest[1][::-1]:\r\n if name in self.names:\r\n return self.names[name]\r\n \r\n return None\r\n \r\n def render(self, picking = False):\r\n \"\"\"\r\n Renders all visible objects to the scene.\r\n \"\"\"\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n glLoadIdentity()\r\n \r\n # Render the camera first\r\n if self.camera:\r\n self.camera.render()\r\n \r\n # Render the lights\r\n for light in self.lights.values():\r\n light.render()\r\n \r\n # Determine visible object by frustum culling\r\n if self.cull and self.camera.frustum:\r\n self.__cull(self.objects)\r\n \r\n # Now render all other (visible and marked for rendering) objects\r\n for obj in self.objects.values():\r\n if obj.visible and not obj.culled:\r\n try:\r\n obj.render(picking)\r\n except:\r\n logger.error(\"Unhandled exception while rendering an object!\")\r\n logger.error(traceback.format_exc())\r\n sys.exit(1)\r\n elif isinstance(obj, PhysicalEntity):\r\n # Just update the position (needed for further frustum checks)\r\n obj.updateScenePosition()\r\n\r\n def __nearCallback(self, args, g1, g2):\r\n \"\"\"\r\n Callback function for collision detection.\r\n \"\"\"\r\n bounce = 0.2\r\n friction = 5000\r\n contacts = ode.collide(g1, g2)\r\n if contacts:\r\n # Invoke entity's behaviour class with proper arguments\r\n entity1 = g1.sceneObject\r\n entity2 = g2.sceneObject\r\n \r\n if entity1:\r\n bounce = entity1.bounce\r\n friction = entity1.friction\r\n \r\n if entity1.behaviour:\r\n entity1.behaviour.collision(entity2)\r\n \r\n if entity2:\r\n bounce = (bounce + entity2.bounce) / 2.\r\n friction += entity2.friction\r\n \r\n if entity2.behaviour:\r\n entity2.behaviour.collision(entity1)\r\n \r\n world, contactGroup = args\r\n for c in contacts:\r\n # Set bounce/friction coefficients\r\n c.setBounce(bounce)\r\n c.setMu(friction)\r\n \r\n # Create a new contact joint\r\n j = ode.ContactJoint(world, contactGroup, c)\r\n j.attach(g1.getBody(), g2.getBody())\r\n \r\n def __cull(self, objects):\r\n \"\"\"\r\n Check for visible objects.\r\n \"\"\"\r\n level = objects.values()\r\n while len(level):\r\n children = []\r\n \r\n for obj in level:\r\n if obj.parent:\r\n # Calculate coordinates relative to the parent\r\n coordinates = obj.coordinates + obj.parent.coordinates\r\n else:\r\n coordinates = obj.coordinates\r\n \r\n state = self.frustum.sphereInFrustum(coordinates, obj.model.radius)\r\n if state == Frustum.OUTSIDE:\r\n obj.culled = True\r\n elif state == Frustum.INTERSECT:\r\n obj.culled = False\r\n if obj.children:\r\n children += obj.children.values() # Check the sub entities only if the parent is intersecting the frustum\r\n else:\r\n # The whole entity is inside the frustum\r\n obj.culled = False\r\n \r\n level = children # Next check the visibility of children of visible objects\r\n \r\n def update(self):\r\n \"\"\"\r\n Process any collisions and dynamic physical simulation.\r\n \"\"\"\r\n t = 0.02 - (time.time() - self.lastTime)\r\n if t > 0:\r\n time.sleep(t)\r\n \r\n # Wakup all behaviour classes\r\n for behaviour in self.behaviours.values():\r\n try:\r\n behaviour.update()\r\n except:\r\n logger.error(\"Unhandled exception while processing entity behaviour!\")\r\n logger.error(traceback.format_exc())\r\n sys.exit(1)\r\n \r\n glutPostRedisplay()\r\n \r\n # Perform physical simulation\r\n for i in xrange(2):\r\n self.space.collide((self.physicalWorld, self.contactGroup), self.__nearCallback)\r\n self.physicalWorld.step(0.02 / 2)\r\n self.contactGroup.empty()\r\n \r\n self.lastTime = time.time()\r\n\r\n def listVisible(self, event):\r\n \"\"\"\r\n List all at this moment visible objects.\r\n \"\"\"\r\n def vis(objects):\r\n l = []\r\n for obj in objects:\r\n if obj.visible:\r\n l.append(obj.model.itemId+\"::%s\" % vis(obj.children.values()))\r\n return l\r\n \r\n visible = vis(self.objects.values())\r\n logger.debug(\"All visible objects: %s\" % visible)\r\n","sub_path":"iid/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":26612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"254651390","text":"##!/usr/bin/python3\n#The whole piece of fabric they're working on is a very large square - at least 1000 inches on each side.\n#\n#Each Elf has made a claim about which area of fabric would be ideal for Santa's suit. All claims have an ID and consist of a single rectangle with edges parallel to the edges of the fabric. Each claim's rectangle is defined as follows:\n#\n#The number of inches between the left edge of the fabric and the left edge of the rectangle.\n#The number of inches between the top edge of the fabric and the top edge of the rectangle.\n#The width of the rectangle in inches.\n#The height of the rectangle in inches.\n#A claim like #123 @ 3,2: 5x4 means that claim ID 123 specifies a rectangle 3 inches from the left edge, 2 inches from the top edge, 5 inches wide, and 4 inches tall. Visually, it claims the square inches of fabric represented by # (and ignores the square inches of fabric represented by .) in the diagram below:\n#\n#...........\n#...........\n#...#####...\n#...#####...\n#...#####...\n#...#####...\n#...........\n#...........\n#...........\n#The problem is that many of the claims overlap, causing two or more claims to cover part of the same areas. For example, consider the following claims:\n#\n##1 @ 1,3: 4x4\n##2 @ 3,1: 4x4\n##3 @ 5,5: 2x2\n#Visually, these claim the following areas:\n#\n#........\n#...2222.\n#...2222.\n#.11XX22.\n#.11XX22.\n#.111133.\n#.111133.\n#........\n#The four square inches marked with X are claimed by both 1 and 2. (Claim 3, while adjacent to the others, does not overlap either of them.)\n#\n#If the Elves all proceed with their own plans, none of them will have enough fabric. \n#How many square inches of fabric are within two or more claims?\n\n#Check each square against each claim and see if it is within two or more claims. If it is then store it as 1 matching square inche\n#Every square is checked only once, so the final result will be accurate\n\n#Each square has two coordinates x1,y1 (top left corner) and x2,y2 (bottom right corner)\n#Each claim has similar coordinates x3,y3 (top left corner) and x4,y4 (bottom right corner)\n\n#Add input from file into list\ninputlist = []\nwith open(\"input3.txt\", \"r\") as file:\n line = file.readline()\n while line:\n #Add all the lines to a list that is later used over and over again\n inputlist.append(line.strip())\n line = file.readline()\n\n#Plan is to go over all the squares (square inches) from top right to bottom left and in every case check whether the square is inside a claim more than twice\n#NB! This is very slow and not optimal. Possibly the worst way of doing it.\n#Define how large the fabric is\nmaxfabric = 1000\n##Count the final result of how many square inches of fabric are within two or more claims\ncount = 0\ny1 = 0\ny2 = 1\nwhile y1 < maxfabric:\n print(\"Checking row \" + str(y1))\n #y1 determins the row. While it is 0 the first row of square inches is checked. Then it is incremented\n #y2 determins the height of the square inch we are comparing to, as it needs to be 1 inches then that stays constant and incraeses with y1\n #x1 determins the column. \n #x2 is constantly higher by 1 \n #Both of the x values are reset when we start with another row\n x1 = 0\n x2 = 1\n while x1 < maxfabric:\n #For every square track the number of times it is inside different claims\n conflict = 0\n #print(x1,y1,x2,y2)\n #Go over each claim in the input list\n for claim in inputlist:\n #Parse input and calculate coordinates for the top right (x3,y3) and bottom left (x4,y4) corner of each claim\n ID = claim.split( )[0]\n x3 = claim.split( )[2].split(',')[0]\n y3 = claim.split( )[2].split(',')[1][:-1]\n x4 = int(claim.split( )[3].split('x')[0]) + int(x3)\n y4 = int(claim.split( )[3].split('x')[1]) + int(y3)\n #print(\"ID = \" + str(ID) + \"; x1,y1 = \" + str(x1) + \",\" + str(y1) + \"; x2,y2 = \" + str(x2) + \",\" + str(y2) + \"; x3,y3 = \" + str(x3) + \",\" + str(y3) + \"; x4,y4 = \" + str(x4) + \",\" + str(y4))\n #Check if the square is inside any of the claims \n #This is the case when (x1 and x2 are within x3 until x4) and (y1 and y2 are within y3 until y4)\n if int(x3) <= int(x1) <= int(x4) and int(x3) <= int(x2) <= int(x4) and int(y3) <= int(y1) <= int(y4) and int(y3) <= int(y2) <= int(y4):\n conflict = conflict + 1\n #print(\"Square: \" + str(x1) + \",\" + str(y1) + \",\" + str(x2) + \",\" + str(y2) + \" inside ID \" + ID + \" conflict=\" + str(conflict))\n #print(conflict)\n if int(conflict) >= 2:\n #print(\"Oh no square: \" + str(x1) + \",\" + str(y1) + \",\" + str(x2) + \",\" + str(y2) + \" inside several claims!\")\n count = count + 1\n x1 = x1 + 1\n x2 = x2 + 1\n y1 = y1 + 1\n y2 = y2 + 1\nprint(count)\n\n#PART 2\n# NB! First part of the script is really slow, Part 2 does not rely on it so it can be commented out\n# This solution is not perfect, it returned three answers and only one was correct.\n#What is the ID of the only claim that doesn't overlap?\nconflict_list = []\nfor claim1 in inputlist:\n #Parse the ID\n ID1 = claim1.split( )[0]\n x3 = claim1.split( )[2].split(',')[0]\n y3 = claim1.split( )[2].split(',')[1][:-1]\n x4 = int(claim1.split( )[3].split('x')[0]) + int(x3)\n y4 = int(claim1.split( )[3].split('x')[1]) + int(y3)\n #print(\"Checking claim \" + str(ID1))\n for claim2 in inputlist:\n #Parse the ID\n ID2 = claim2.split( )[0]\n #Jump to next one if comparing against same claim\n if ID2 == ID1:\n #print(\"Claim 1 and 2 match: \" + str(ID1), str(ID2))\n continue\n x5 = claim2.split( )[2].split(',')[0]\n y5 = claim2.split( )[2].split(',')[1][:-1]\n x6 = int(claim2.split( )[3].split('x')[0]) + int(x5)\n y6 = int(claim2.split( )[3].split('x')[1]) + int(y5)\n print(\"Checking claim \" + ID1 + \" against claim \" + str(ID2))\n if (int(x5) < int(x3) < int(x6) and int(y5) < int(y3) < int(y6)) or \\\n (int(x5) < int(x4) < int(x6) and int(y5) < int(y4) < int(y6)) or \\\n (int(x5) < int(x4) < int(x6) and int(y5) < int(y3) < int(y6)) or \\\n (int(x5) < int(x3) < int(x6) and int(y5) < int(y4) < int(y6)) or \\\n (int(x3) < int(x5) and int(x4) > int(x6) and int(y5) < int(y3) < int(y6) and int(y5) < int(y4) < int(y6)):\n #print(\"Conflict\")\n if ID1 not in conflict_list:\n conflict_list.append(ID1)\n if ID2 not in conflict_list:\n conflict_list.append(ID2)\n#Go through the final list and find the claim with no overlap\nfor claim1 in inputlist:\n #Parse the ID\n ID = claim1.split( )[0]\n if ID not in conflict_list:\n print(\"ID: \" + ID + \" has no overlap with others\")","sub_path":"adventofcode/2018/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287973018","text":"\"\"\"Functions to parse a file containing student data.\"\"\"\n\n\ndef all_houses(filename):\n \"\"\"Return a set of all house names in the given file.\n\n For example:\n >>> unique_houses('cohort_data.txt')\n {\"Dumbledore's Army\", 'Gryffindor', ..., 'Slytherin'}\n\n Arguments:\n - filename (str): the path to a data file\n\n Return:\n - set[str]: a set of strings\n \"\"\"\n\n file = open(filename)\n\n houses = set()\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n if student_record[2]:\n houses.add(student_record[2])\n\n return houses\n\n\ndef students_by_cohort(filename, cohort='All'):\n \"\"\"Return a list of students' full names by cohort.\n\n Names are sorted in alphabetical order. If a cohort isn't\n given, return a list of all students. For example:\n >>> students_by_cohort('cohort_data.txt')\n ['Adrian Pucey', 'Alicia Spinnet', ..., 'Zacharias Smith']\n\n >>> students_by_cohort('cohort_data.txt', cohort='Fall 2015')\n ['Angelina Johnson', 'Cho Chang', ..., 'Terence Higgs', 'Theodore Nott']\n\n >>> students_by_cohort('cohort_data.txt', cohort='Winter 2016')\n ['Adrian Pucey', 'Andrew Kirke', ..., 'Roger Davies', 'Susan Bones']\n\n >>> students_by_cohort('cohort_data.txt', cohort='Spring 2016')\n ['Cormac McLaggen', 'Demelza Robins', ..., 'Zacharias Smith']\n\n >>> students_by_cohort('cohort_data.txt', cohort='Summer 2016')\n ['Alicia Spinnet', 'Dean Thomas', ..., 'Terry Boot', 'Vincent Crabbe']\n\n Arguments:\n - filename (str): the path to a data file\n - cohort (str): optional, the name of a cohort\n\n Return:\n - list[list]: a list of lists\n \"\"\"\n\n students = []\n file = open(filename)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n \n if cohort == student_record[4]:\n student_name = student_record[0]+ \" \" + student_record[1]\n students.append(student_name)\n elif cohort == \"All\":\n if len(student_record[4]) > 1:\n student_name = student_record[0]+ \" \" + student_record[1]\n students.append(student_name)\n \n \n return sorted(students)\n \n\n\ndef all_names_by_house(filename):\n \"\"\"Return a list that contains rosters for all houses, ghosts, instructors.\n\n Rosters appear in this order:\n - Dumbledore's Army\n - Gryffindor\n - Hufflepuff\n - Ravenclaw\n - Slytherin\n - Ghosts\n - Instructors\n\n Each roster is a list of names sorted in alphabetical order.\n\n For example:\n >>> rosters = hogwarts_by_house('cohort_data.txt')\n >>> len(rosters)\n 7\n\n >>> rosters[0]\n ['Alicia Spinnet', ..., 'Theodore Nott']\n >>> rosters[-1]\n ['Filius Flitwick', ..., 'Severus Snape']\n\n Arguments:\n - filename (str): the path to a data file\n\n Return:\n - list[list]: a list of lists\n \"\"\"\n\n dumbledores_army = []\n gryffindor = []\n hufflepuff = []\n ravenclaw = []\n slytherin = []\n ghosts = []\n instructors = []\n\n # roster= []\n\n file = open(filename)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n \n if student_record[2] == \"Dumbledore's Army\":\n name = student_record[0]+ \" \" + student_record[1]\n dumbledores_army.append(name)\n dumbledores_army.sort()\n elif student_record[2] == \"Gryffindor\":\n name = student_record[0]+ \" \" + student_record[1]\n gryffindor.append(name)\n gryffindor.sort()\n elif student_record[2] == \"Hufflepuff\":\n name = student_record[0]+ \" \" + student_record[1]\n hufflepuff.append(name)\n hufflepuff.sort()\n elif student_record[2] == \"Ravenclaw\":\n name = student_record[0]+ \" \" + student_record[1]\n ravenclaw.append(name)\n ravenclaw.sort()\n elif student_record[2] == \"Slytherin\":\n name = student_record[0]+ \" \" + student_record[1]\n slytherin.append(name)\n slytherin.sort()\n elif student_record[4] == \"G\":\n name = student_record[0]+ \" \" + student_record[1]\n ghosts.append(name)\n ghosts.sort()\n elif student_record[4] == \"I\":\n name = student_record[0]+ \" \" + student_record[1]\n instructors.append(name)\n instructors.sort()\n \n return [dumbledores_army, gryffindor, hufflepuff, ravenclaw, slytherin, ghosts, instructors]\n\n\ndef all_data(filename):\n \"\"\"Return all the data in a file.\n\n Each line in the file is a tuple of (full_name, house, advisor, cohort)\n\n Iterate over the data to create a big list of tuples that individually\n hold all the data for each person. (full_name, house, advisor, cohort)\n\n For example:\n >>> all_student_data('cohort_data.txt')\n [('Harry Potter', 'Gryffindor', 'McGonagall', 'Fall 2015'), ..., ]\n\n Arguments:\n - filename (str): the path to a data file\n\n Return:\n - list[tuple]: a list of tuples\n \"\"\"\n\n all_data = []\n\n file = open(filename)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n\n # if student_record[3] != \"\":\n name = student_record[0]+ \" \" + student_record[1]\n house = student_record[2]\n advisor = student_record[3]\n cohort = student_record[4]\n all_data.extend([(name, house, advisor, cohort)])\n\n return all_data\n\n\ndef get_cohort_for(filename, name):\n \"\"\"Given someone's name, return the cohort they belong to.\n\n Return None if the person doesn't exist. For example:\n >>> get_cohort_for('cohort_data.txt', 'Harry Potter')\n 'Fall 2015'\n\n >>> get_cohort_for('cohort_data.txt', 'Hannah Abbott')\n 'Winter 2016'\n\n >>> get_cohort_for('cohort_data.txt', 'Balloonicorn')\n None\n\n Arguments:\n - filename (str): the path to a data file\n - name (str): a person's full name\n\n Return:\n - str: the person's cohort or None\n \"\"\"\n\n file = open(filename)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n\n student_name = student_record[0]+ \" \" + student_record[1]\n if student_name == name:\n return student_record[4]\n \n\n\n\ndef find_duped_last_names(filename):\n \"\"\"Return a set of duplicated last names that exist in the data.\n\n For example:\n >>> find_name_duplicates('cohort_data.txt')\n {'Creevey', 'Weasley', 'Patil'}\n\n Arguments:\n - filename (str): the path to a data file\n\n Return:\n - set[str]: a set of strings\n \"\"\"\n\n \n dupe = set()\n\n\n last_names = []\n\n file = open(filename)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n\n last_names.append(student_record[1]) #put all last names in the list\n\n for lname in last_names: \n if last_names.count(lname)>1:\n dupe.add(lname)\n\n return dupe\n\n\n\n\ndef get_housemates_for(filename, name):\n \"\"\"Return a set of housemates for the given student.\n\n Given a student's name, return a list of their housemates. Housemates are\n students who belong to the same house and were in the same cohort as the\n given student.\n\n For example:\n >>> get_housemates_for('cohort_data.txt', 'Hermione Granger')\n {'Angelina Johnson', ..., 'Seamus Finnigan'}\n \"\"\"\n\n house = \"\"\n cohort= \"\"\n housemate = []\n\n file = open(filename)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record = line.split(\"|\") #split into a list\n\n student_name = student_record[0]+ \" \" + student_record[1]\n\n if name == student_name:\n house = student_record[2]\n cohort = student_record[4]\n\n file.seek(0)\n\n for line in file:\n line = line.rstrip() #remove all the white spaces to the right\n student_record_two = line.split(\"|\") #split into a list\n\n student_name = student_record_two[0]+ \" \" + student_record_two[1]\n if (house == student_record_two[2]) and (cohort == student_record_two[4]) and (name != student_name):\n \n\n housemate.append(student_name)\n\n housemate = set(housemate)\n return housemate\n\n\n\n\n##############################################################################\n# END OF MAIN EXERCISE. Yay! You did it! You Rock!\n#\n\nif __name__ == '__main__':\n import doctest\n\n result = doctest.testfile('doctests.py',\n report=False,\n optionflags=(\n doctest.REPORT_ONLY_FIRST_FAILURE\n ))\n doctest.master.summarize(1)\n if result.failed == 0:\n print('ALL TESTS PASSED')\n","sub_path":"cohort_data.py","file_name":"cohort_data.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479096895","text":"class queue():\n def __init__(self):\n self.data = []\n\n def empty(self):\n return len(self.data) == 0\n\n def pop(self):\n if not self.empty():\n return self.data.pop()\n\n def push(self, value):\n self.data.insert(0, value)\n\n def front(self):\n if not self.empty():\n return self.data[-1]\n\n def size(self):\n return len(self.data)\n\n def clear(self):\n self.data = []\n\n\ns = input()\nst = queue()\nwhile s != 'exit':\n cmd = s.split()\n if len(cmd) == 2:\n st.push(int(cmd[1]))\n print('ok')\n elif s == 'front':\n if st.empty():\n print('error')\n else:\n print(st.front())\n elif s == 'pop':\n if st.empty():\n print('error')\n else:\n print(st.pop())\n elif s == 'size':\n print(st.size())\n elif s == 'clear':\n st.clear()\n print('ok')\n s = input()\nprint('bye')\n","sub_path":"18/l.py","file_name":"l.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204607531","text":"\"\"\"Classes for models with related entities.\"\"\"\n\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom django.db import models\nfrom django.db.models import QuerySet\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom core.models.model import ExtendedModel\nfrom core.models.model_with_cache import store\n\nif TYPE_CHECKING:\n from apps.entities.models.entity import Entity\n\nATTRIBUTE_NAMES = ('attributees', 'involved_entities', 'affiliated_entities')\n\n\nclass ModelWithRelatedEntities(ExtendedModel):\n \"\"\"\n A model that has related entities (attributees, involved entities, etc.).\n\n Ideally, this class would be a mixin, but due to Django's model magic,\n it must be defined as an abstract model class.\n \"\"\"\n\n related_entities = models.ManyToManyField(\n to='entities.Entity',\n related_name='%(class)s_set',\n blank=True,\n verbose_name=_('related entities'),\n )\n\n class Meta:\n \"\"\"Meta options for ModelWithRelatedEntities.\"\"\"\n\n # https://docs.djangoproject.com/en/dev/ref/models/options/#model-meta-options\n\n abstract = True\n\n @property\n def _related_entities(self) -> 'QuerySet[Entity]':\n \"\"\"Return the queryset of entities related to the model instance, or None.\"\"\"\n for attribute_name in ATTRIBUTE_NAMES:\n attribute_value = getattr(self, attribute_name, None)\n if attribute_value:\n return attribute_value\n return self.related_entities.all()\n\n @property # type: ignore\n @store(attribute_name='serialized_entities')\n def serialized_entities(self) -> list[dict]:\n \"\"\"Return a list of dictionaries representing the instance's images.\"\"\"\n return [entity.serialize() for entity in self._related_entities.all().iterator()]\n\n def preprocess_html(self, html: str) -> str:\n \"\"\"Modify the value of an HTML field during cleaning.\"\"\"\n # Wrap entity names in spans to identify them (so that links can be added if desired).\n entities = self.serialized_entities\n if entities:\n for entity in entities:\n aliases = entity.get('aliases') or []\n for name in set([entity['name']] + aliases):\n opening_tag = (\n f''\n )\n closing_tag = ''\n html = re.sub(\n # match instances not in quotations\n rf'(^|^

|[^>])({name})(?:(?!\\w|[^\\ ]\\\"))',\n rf'\\g<1>{opening_tag}\\g<2>{closing_tag}',\n html,\n )\n return html\n","sub_path":"apps/entities/models/model_with_related_entities.py","file_name":"model_with_related_entities.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498183861","text":"#!/usr/bin/env python3\n\n# Send beacons detected periodically to AWS IoT. Based on bluez library.\n# Must be run with \"sudo python3\"\n\nimport time\nimport datetime\nimport ssl\nimport json\nimport paho.mqtt.client as mqtt\nimport bluetooth.ble as ble\nimport grovepi\n\n# LCD import\nimport time, sys\nimport RPi.GPIO as GPIO\nimport smbus\n\n# LED import\nfrom grovepi import *\n\n# Connect buzzer to D8, LED to D4 and Ultrasonic ranger to D7\nbuzzer = 8\nled = 4\nultrasonic_ranger = 7\n\ngrovepi.pinMode(buzzer, \"OUTPUT\")\ngrovepi.pinMode(led, \"OUTPUT\")\n\n\n# TODO: Change this to the name of our Raspberry Pi, also known as our \"Thing Name\"\ndeviceName = \"g46pi\"\ndeviceName_actuation = \"g46pi_actuation\"\n\n# Public certificate of our Raspberry Pi, as provided by AWS IoT.\ndeviceCertificate = \"tp-iot-certificate.pem.crt\"\n# Private key of our Raspberry Pi, as provided by AWS IoT.\ndevicePrivateKey = \"tp-iot-private.pem.key\"\n# Root certificate to authenticate AWS IoT when we connect to their server.\nawsCert = \"aws-iot-rootCA.crt\"\n\nisConnected = False\n\n\n# This is the main logic of the program. We connect to AWS IoT via MQTT, send sensor data periodically to AWS IoT,\n# and handle any actuation commands received from AWS IoT.\ndef main():\n global isConnected\n # Create an MQTT client for connecting to AWS IoT via MQTT.\n client = mqtt.Client(deviceName + \"_sr\") # Client ID must be unique because AWS will disconnect any duplicates.\n client.on_connect = on_connect # When connected, call on_connect.\n client.on_message = on_message # When message received, call on_message.\n client.on_log = on_log # When logging debug messages, call on_log.\n\n # Set the certificates and private key for connecting to AWS IoT. TLS 1.2 is mandatory for AWS IoT and is supported\n # only in Python 3.4 and later, compiled with OpenSSL 1.0.1 and later.\n client.tls_set(awsCert, deviceCertificate, devicePrivateKey, ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2)\n\n # Connect to AWS IoT server. Use AWS command line \"aws iot describe-endpoint\" to get the address.\n print(\"Connecting to AWS IoT...\")\n client.connect(\"A1P01IYM2DOZA0.iot.us-west-2.amazonaws.com\", 8883, 60)\n\n # Start a background thread to process the MQTT network commands concurrently, including auto-reconnection.\n client.loop_start()\n\n # Create the beacon service for scanning beacons.\n beacon_service = ble.BeaconService()\n\n # Loop forever.\n while True:\n try:\n # If we are not connected yet to AWS IoT, wait 1 second and try again.\n if not isConnected:\n time.sleep(1)\n continue\n\n # Scan for beacons and add to the sensor data payload.\n beacons = {}\n # Nick: Added to initialise beacon_id value\n beacon_id = \"\"\n beacons_detected = beacon_service.scan(2)\n for beacon_address, beacon_info in list(beacons_detected.items()):\n # For each beacon found, add to the payload. Need to flip the bytes.\n beacon = {\n \"uuid\": beacon_info[0].replace('-', ''),\n \"major\": (beacon_info[1] % 256) * 256 + beacon_info[1] // 256,\n \"minor\": (beacon_info[2] % 256) * 256 + beacon_info[2] // 256,\n \"power\": beacon_info[3],\n \"rssi\": beacon_info[4],\n \"address\": beacon_address\n }\n # Beacon ID is B_(uuid)_(major)_(minor). This format allows us\n # to match beacon IDs within IoT rules. Prepad major and minor\n # with 0s to max length, so that we can slice beacons by fixed\n # length in IoT rules. Sample beacon ID:\n # \"B_b9407f30f5f8466eaff925556b57fe6d_00602_29434\"\n beacon_id = \"B_\" + beacon[\"uuid\"] + \"_\" + \\\n str(beacon[\"major\"]).rjust(5, '0') + \"_\" + \\\n str(beacon[\"minor\"]).rjust(5, '0')\n # Nick: Originally beacon[\"id\"] = beacons\n beacon[\"id\"] = beacon_id\n beacons[beacon_id] = beacon\n\n # Prepare our sensor data in JSON format.\n # Nick: For beacon, only send beacon_id since it is uniquely identifiable incl. UUID+major+minor\n payload = {\n \"state\": {\n \"reported\": {\n \"cplot\": \"lot1\",\n \"beacons\": beacon_id,\n \"distance\": grovepi.ultrasonicRead(ultrasonic_ranger),\n \"timestamp\": datetime.datetime.now().isoformat()\n }\n }\n }\n\n\n print(\"Sending sensor data to AWS IoT...\\n\" +\n json.dumps(payload, indent=4, separators=(',', ': ')))\n\n # Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our \"Thing Shadow\".\n client.publish(\"$aws/things/\" + deviceName + \"/shadow/update\", json.dumps(payload))\n print(\"Sent to AWS IoT\")\n\n # Wait 30 seconds before sending the next set of sensor data.\n time.sleep(30)\n\n except KeyboardInterrupt:\n # Stop the program when we press Ctrl-C.\n break\n except Exception as e:\n # For all other errors, we wait a while and resume.\n print(\"Exception: \" + str(e))\n time.sleep(10)\n continue\n\n\n# This is called when we are connected to AWS IoT via MQTT.\n# We subscribe for notifications of desired state updates.\ndef on_connect(client, userdata, flags, rc):\n global isConnected\n isConnected = True\n print(\"Connected to AWS IoT\")\n # Nick: Subscribe to MQTT topic: deviceName_actuation so that we will receive notifications of desired states.\n topic = \"$aws/things/\" + deviceName_actuation + \"/shadow/update/accepted\"\n print(\"Subscribing to MQTT topic \" + topic)\n client.subscribe(topic)\n # Subscribe to our MQTT topic so that we will receive notifications of updates.\n topic = \"$aws/things/\" + deviceName + \"/shadow/update/accepted\"\n print(\"Subscribing to MQTT topic \" + topic)\n client.subscribe(topic)\n\n\n# This is called when we receive a subscription notification from AWS IoT.\ndef on_message(client, userdata, msg):\n # Convert the JSON payload to a Python dictionary.\n # The payload is in binary format so we need to decode as UTF-8.\n payload2 = json.loads(msg.payload.decode(\"utf-8\"))\n print(\"Received message, topic: \" + msg.topic + \", payload:\\n\" +\n json.dumps(payload2, indent=4, separators=(',', ': ')))\n\n # If there is a desired state in this message, then we actuate, e.g. if we see \"led=on\", we switch on the LED.\n if payload2.get(\"state\") is not None and payload2[\"state\"].get(\"desired\") is not None:\n # Get the desired state and loop through all attributes inside.\n desired_state = payload2[\"state\"][\"desired\"]\n for attribute in desired_state:\n # We handle the attribute and desired value by actuating.\n value = desired_state.get(attribute)\n actuate(client, attribute, value)\n\n\n# Send the reported state of our actuator to AWS IoT after it has been triggered, e.g. \"led\": \"on\".\ndef send_reported_state(client, attribute, value):\n # Prepare our sensor data in JSON format.\n payload = {\n \"state\": {\n \"reported\": {\n attribute: value,\n \"timestamp\": datetime.datetime.now().isoformat()\n }\n }\n }\n print(\"Sending sensor data to AWS IoT...\\n\" +\n json.dumps(payload, indent=4, separators=(',', ': ')))\n\n # Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our \"Thing Shadow\".\n client.publish(\"$aws/things/\" + deviceName_actuation + \"/shadow/update\", json.dumps(payload))\n print(\"Sent to AWS IoT\")\n\n # Print out log messages for tracing.\n def on_log(client, userdata, level, buf):\n print(\"Log: \" + buf)\n\n\n# Control my actuators based on the specified attribute and value,\n# e.g. \"led=on\" will switch on my LED.\n\ndef actuate(client, attribute, value):\n if attribute == \"timestamp\":\n # Ignore the timestamp attribute, it's only for info.\n return\n print(\"Setting \" + attribute + \" to \" + value + \"...\")\n\n if attribute == \"led\":\n # We actuate the LED for \"on\", \"off\" or \"flash1\".\n if value == \"on\":\n # Switch on LED.\n grovepi.digitalWrite(led, 1)\n send_reported_state(client, \"led\", \"on\")\n return\n elif value == \"off\":\n # Switch off LED.\n grovepi.digitalWrite(led, 0)\n send_reported_state(client, \"led\", \"off\")\n return\n elif value == \"flash1\":\n #Switch on LED, wait 1 second, switch it off.\n grovepi.digitalWrite(led, 1)\n send_reported_state(client, \"led\", \"on\")\n time.sleep(1)\n grovepi.digitalWrite(led, 0)\n send_reported_state(client, \"led\", \"off\")\n time.sleep(1)\n return\n # Show an error if attribute or value are incorrect.\n else:\n print(\"Error: Don't know how to set \" + attribute + \" to \" + value)\n\n # here is the start of the buzzer part---------------------\n # Connect the Grove Buzzer to digital port D8\n # SIG,NC,VCC,GND\n\n\n if attribute == \"buzzer\":\n if value == \"on\":\n # Start buzzing for 8 second; in actual implementation, it can be made to buzz until violation is over\n grovepi.digitalWrite(buzzer, 8)\n print('start buzzer')\n time.sleep(1)\n\n # Stop buzzing after 1 second, since it is for demo\n grovepi.digitalWrite(buzzer, 0)\n print('stop')\n send_reported_state(client, \"buzzer\", \"on\")\n return\n \n elif value == \"off\":\n # Switch off buzzer. In this demo, this condition has no effect since buzzer only buzz for 1 sec. This is included for completeness sake in case future(actual) implementation need to buzz until violation is over.\n grovepi.digitalWrite(buzzer, 0)\n send_reported_state(client, \"buzzer\", \"off\")\n return\n # except KeyboardInterrupt:\n #\tgrovepi.digitalWrite(buzzer,0)\n #\tbreak\n # except IOError:\n #\tprint (\"Error\") \n else:\n print(\"no buzzer actuated\")\n\n\n # here is the end of the buzzer part------------------------\n\n # here is the start of the LCD part ------------------------\n if attribute == \"lcd\":\n # We actuate the lcd\n if value == \"on\":\n\n # this device has two I2C addresses\n DISPLAY_RGB_ADDR = 0x62\n DISPLAY_TEXT_ADDR = 0x3e\n\n # use the bus that matches your raspi version\n rev = GPIO.RPI_REVISION\n if rev == 2 or rev == 3:\n bus = smbus.SMBus(1)\n else:\n bus = smbus.SMBus(0)\n\n # set backlight to (R,G,B) (values from 0..255 for each)\n def setRGB(r,g,b):\n bus.write_byte_data(DISPLAY_RGB_ADDR,0,0)\n bus.write_byte_data(DISPLAY_RGB_ADDR,1,0)\n bus.write_byte_data(DISPLAY_RGB_ADDR,0x08,0xaa)\n bus.write_byte_data(DISPLAY_RGB_ADDR,4,r)\n bus.write_byte_data(DISPLAY_RGB_ADDR,3,g)\n bus.write_byte_data(DISPLAY_RGB_ADDR,2,b)\n\n # send command to display (no need for external use) \n def textCommand(cmd):\n bus.write_byte_data(DISPLAY_TEXT_ADDR,0x80,cmd)\n\n # set display text \\n for second line(or auto wrap) \n def setText(text):\n textCommand(0x01) # clear display\n time.sleep(.05)\n textCommand(0x08 | 0x04) # display on, no cursor\n textCommand(0x28) # 2 lines\n time.sleep(.05)\n count = 0\n row = 0\n for c in text:\n if c == '\\n' or count == 16:\n count = 0\n row += 1\n if row == 2:\n break\n textCommand(0xc0)\n if c == '\\n':\n continue\n count += 1\n bus.write_byte_data(DISPLAY_TEXT_ADDR,0x40,ord(c))\n\n\n # LCD warning\n if __name__==\"__main__\":\n setText(\"Illegal Parking!\\nWheelclamp OTW!\")\n setRGB(0,255,0)\n send_reported_state(client, \"lcd\", \"on\")\n return\n\n\n elif value == \"off\":\n\n # this device has two I2C addresses\n DISPLAY_RGB_ADDR = 0x62\n DISPLAY_TEXT_ADDR = 0x3e\n\n # use the bus that matches your raspi version\n rev = GPIO.RPI_REVISION\n if rev == 2 or rev == 3:\n bus = smbus.SMBus(1)\n else:\n bus = smbus.SMBus(0)\n\n # set backlight to (R,G,B) (values from 0..255 for each)\n def setRGB(r,g,b):\n bus.write_byte_data(DISPLAY_RGB_ADDR,0,0)\n bus.write_byte_data(DISPLAY_RGB_ADDR,1,0)\n bus.write_byte_data(DISPLAY_RGB_ADDR,0x08,0xaa)\n bus.write_byte_data(DISPLAY_RGB_ADDR,4,r)\n bus.write_byte_data(DISPLAY_RGB_ADDR,3,g)\n bus.write_byte_data(DISPLAY_RGB_ADDR,2,b)\n\n # send command to display (no need for external use) \n def textCommand(cmd):\n bus.write_byte_data(DISPLAY_TEXT_ADDR,0x80,cmd)\n\n # Switch off LCD\n if __name__==\"__main__\":\n textCommand(0x01)\n setRGB(0,0,0)\n send_reported_state(client, \"lcd\", \"off\")\n return\n\n\n# here is the end of the LCD part----------------------\n\n\n# Print out log messages for tracing.\ndef on_log(client, userdata, level, buf):\n print(\"Log: \" + buf)\n\n\n# Start the main program.\nmain()\n","sub_path":"handypark.py","file_name":"handypark.py","file_ext":"py","file_size_in_byte":14125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"138227085","text":"import numpy as np\nimport scipy.optimize as so\nimport math\nimport data\n\n\ndef func(newpara):\n global x\n tx = np.copy(x)\n global cross\n global distance\n l = len(tx)\n tcos = np.array(list(map(lambda t: math.cos(t), newpara)))\n for idx in range(l):\n tx[idx] = tx[idx] * tcos\n tx = np.append(tx, np.array(list(map(lambda t: math.sin(t), newpara))))\n tx = tx.reshape(l + 1, num)\n newcross = tx.transpose().dot(tx)\n result = []\n for idx in range(num):\n result.append(sum(((newcross[idx][j] - distance[idx][j]) * (\n math.cos(newpara[idx]) * math.sin(newpara[j]) - math.cos(newpara[j]) * math.sin(newpara[idx]) *\n cross[idx][j])) for j in range(num)))\n\n return np.array(result)\n\nmaxitr = 1000\nytol = 0.01\nnum = 10\ndistance = data.generate(num)\npara = np.ndarray((0, num))\nx = np.ones(num)\nit = 1\nx = x.reshape(it, num)\ncross = x.transpose().dot(x)\npara = np.append(para, np.array(so.fsolve(func, np.random.rand(num))))\npara = para.reshape(it, num)\nfor alphapara in para:\n for i in range(it):\n x[i] = x[i] * np.array(list(map(lambda t: math.cos(t), alphapara)))\n x = np.append(x, np.array(list(map(lambda t: math.sin(t), alphapara))))\n it += 1\n x = x.reshape(it, num)\ncross = x.transpose().dot(x)\nyold = 10000\ny = sum(sum((cross[i][j] - distance[i][j]) ** 2 for i in range(j)) for j in range(num))\nwhile ytol < y or abs(y - yold) > 0.00001:\n if it > maxitr:\n break;\n para = np.append(para, np.array(so.fsolve(func, np.random.rand(num))))\n para = para.reshape(it, num)\n for i in range(it):\n x[i] = x[i] * np.array(list(map(lambda t: math.cos(t), para[-1])))\n x = np.append(x, np.array(list(map(lambda t: math.sin(t), para[-1]))))\n it += 1\n x = x.reshape(it, num)\n cross = x.transpose().dot(x)\n yold = y\n y = sum(sum((cross[i][j] - distance[i][j]) ** 2 for i in range(j)) for j in range(num))\n print(y)\nprint(para)\nprint('')\nprint('data dimension: ' + str(len(para)))\nprint('with var: ' + str(y))\n","sub_path":"MDS.py","file_name":"MDS.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618679321","text":"import DGLGCNTrainer\nfrom dglConverter import StackExchangeDataset, stratifiedKFold\nimport statistics as stats\n\n\ndef main():\n ## Dataset building operartions done here\n dataset = StackExchangeDataset()\n labels_ints = dataset.labels_ints\n labelledindices = dataset.labelledindices\n g = dataset[0] \n \n\n badgeclasshyperparams = {'target': 0.7482505099467229, \n 'params': {\n 'dropout': 0.2095972572016474, 'hidden1': 47.631414020631496, \n 'learning_rate': 0.020524779748178592, 'num_epochs': 180.49878982255126, 'reg_factor': 0.00027387593197926164}}\n \n nicePostBinaryHyperparams = {'target': 0.8259088957051715, \n 'params': { 'dropout': 0.0461692973843989, 'hidden1': 21.685530991638885, \n 'learning_rate': 0.03462151663160047, 'num_epochs': 103.48279587690719, 'reg_factor': 0.005388167340033569}}\n\n\n nicePostMulticlassHyperparams = {'target': 0.7147793751578765, \n 'params': {'dropout': 0.0, 'hidden1': 12.0, 'learning_rate': 0.03106249938783281, \n 'num_epochs': 117.92144217515235, 'reg_factor': 0.0}}\n\n # BADGE CLASS RUN\n params = badgeclasshyperparams.get('params')\n dataset.update_labels_from_labelsets(0)\n graph = dataset[0]\n\n model = DGLGCNTrainer.GCN(graph.ndata['feat'].shape[1], int(params.get('hidden1')), dataset.num_classes, allow_zero_in_degree=True, dropout=params.get('dropout'))\n reportbadgeclass = DGLGCNTrainer.train(graph, model, weight_decay=params.get('reg_factor'), learning_rate=params.get('learning_rate'), num_epochs=int(params.get('num_epochs')), validation=False)\n \n # NicePostBinary RUN\n params = nicePostBinaryHyperparams.get('params')\n dataset.update_labels_from_labelsets(1)\n graph = dataset[0]\n\n model = DGLGCNTrainer.GCN(graph.ndata['feat'].shape[1], int(params.get('hidden1')), dataset.num_classes, allow_zero_in_degree=True, dropout=params.get('dropout'))\n reportnicepostbinary = DGLGCNTrainer.train(graph, model, weight_decay=params.get('reg_factor'), learning_rate=params.get('learning_rate'), num_epochs=int(params.get('num_epochs')), validation=False)\n\n # NicePostBinary RUN\n params = nicePostMulticlassHyperparams.get('params')\n dataset.update_labels_from_labelsets(2)\n graph = dataset[0]\n\n model = DGLGCNTrainer.GCN(graph.ndata['feat'].shape[1], int(params.get('hidden1')), dataset.num_classes, allow_zero_in_degree=True, dropout=params.get('dropout'))\n reportnicepostmulti = DGLGCNTrainer.train(graph, model, weight_decay=params.get('reg_factor'), learning_rate=params.get('learning_rate'), num_epochs=int(params.get('num_epochs')), validation=False)\n\n print(\"RESULTS:\")\n print(\"Badge Class Classification Report:\")\n print(reportbadgeclass)\n print(\"NicePostBinary Classification Report:\")\n print(reportnicepostbinary)\n print(\"NicePostMulti Classification Report:\")\n print(reportnicepostmulti)\n # Demo Code, 1 run each\n\ndef kfoldPipelineRun():\n dataset = StackExchangeDataset()\n labels_ints = dataset.labels_ints\n labelledindices = dataset.labelledindices\n num_classes = dataset.num_classes\n\n reports = []\n\n badgeclasshyperparams = {'target': 0.7492577835171325, \n 'params': {'dropout': 0.4022805061070413, 'hidden1': 33.6998497230906, 'learning_rate': 0.0559131138617306, \n 'num_epochs': 62.461910175237406, 'reg_factor': 0.001981014890848788}}\n\n \n nicePostBinaryHyperparams = {'target': 0.8302443037409214, \n 'params': {'dropout': 0.2515167086419769, 'hidden1': 47.631414020631496, 'learning_rate': 0.020524779748178592, \n 'num_epochs': 180.49878982255126, 'reg_factor': 0.00027387593197926164}}\n\n\n nicePostMulticlassHyperparams = {'target': 0.7147793751578765, \n 'params': {'dropout': 0.0, 'hidden1': 12.0, 'learning_rate': 0.03106249938783281, \n 'num_epochs': 117.92144217515235, 'reg_factor': 0.0}}\n\n\n tkipfOGHyperparams = {'dropout': 0.5, 'hidden1': 16, \n 'learning_rate': 0.01, 'num_epochs': 200, 'reg_factor': 0.0005}\n\n num_folds = 4\n\n\n # choice of params (depends on label type)\n params = nicePostBinaryHyperparams.get('params')\n # params = tkipfOGHyperparams\n\n kfoldSplits = stratifiedKFold(labels_ints, labelledindices, num_classes, num_folds)\n\n dataset.update_masks_from_indices(new_val_indices=[])\n\n for i in range(num_folds):\n test_indices = kfoldSplits[i]\n train_indices = []\n for j in range(num_folds):\n if j != i:\n train_indices += kfoldSplits[j]\n\n dataset.update_masks_from_indices(new_train_indices=train_indices, new_test_indices=test_indices)\n graph = dataset[0]\n\n model = DGLGCNTrainer.GCN(graph.ndata['feat'].shape[1], int(params.get('hidden1')), dataset.num_classes, allow_zero_in_degree=True, dropout=params.get('dropout'))\n report = DGLGCNTrainer.train(graph, model, weight_decay=params.get('reg_factor'), learning_rate=params.get('learning_rate'), num_epochs=int(params.get('num_epochs')), validation=False)\n reports.append(report)\n\n meanstdevs, class_meanstdevs = getMetricAvgsStdDevsFromReports(reports, num_classes)\n output_text = \"\\n\\n\"\n output_text += \"Results for label set NicePostBinary:\\n\"\n output_text += \"Means and standard deviations:\\n\"\n\n output_text += \"Accuracy: \" + str(meanstdevs[0]) + \"\\n\"\n output_text += \"Macro Precision: \" + str(meanstdevs[1]) + \"\\n\"\n output_text += \"Macro Recall: \" + str(meanstdevs[2]) + \"\\n\"\n output_text += \"Macro F1: \" + str(meanstdevs[3]) + \"\\n\"\n output_text += \"Weighted Precision: \" + str(meanstdevs[4]) + \"\\n\"\n output_text += \"Weighted Recall: \" + str(meanstdevs[5]) + \"\\n\"\n output_text += \"Weighted F1: \" + str(meanstdevs[6]) + \"\\n\"\n\n output_text += \"Class Results: (each sublist reps a class, elems are [precision,recall,f1])\\n\"\n output_text += str(class_meanstdevs) + \"\\n\"\n\n print(output_text)\n\n file = open(\"output.txt\",\"a\")\n file.write(output_text)\n\n print(\"Run complete with NicePostBinary labels!\")\n\n\n\n# this code is not brilliant but f it\ndef getMetricAvgsStdDevsFromReports(reports, num_classes):\n accs = []\n\n macro_precs = []\n macro_recs = []\n macro_f1s = []\n\n weighted_precs = []\n weighted_recalls = []\n weighted_f1s = []\n\n class_metric_agg = [[[],[],[]] for i in range(num_classes)]\n\n for report in reports:\n acc, macro_avgs, weighted_avgs, class_avgs = extractMetricsFromReport(report, num_classes)\n\n accs.append(acc)\n macro_precs.append(macro_avgs.get('precision'))\n macro_recs.append(macro_avgs.get('recall'))\n macro_f1s.append(macro_avgs.get('f1-score'))\n\n weighted_precs.append(weighted_avgs.get('precision'))\n weighted_recalls.append(weighted_avgs.get('recall'))\n weighted_f1s.append(weighted_avgs.get('f1-score'))\n\n for i in range(len(class_avgs)):\n class_report = class_avgs[i]\n class_metric_agg[i][0].append(class_report[\"precision\"])\n class_metric_agg[i][1].append(class_report[\"recall\"])\n class_metric_agg[i][2].append(class_report[\"f1-score\"])\n\n \n metrics = [accs, macro_precs, macro_recs, macro_f1s, weighted_precs, weighted_recalls, weighted_f1s]\n\n meanstdevs = list(map(lambda lst: (stats.mean(lst),stats.stdev(lst)),metrics))\n\n class_meanstdevs = [[(stats.mean(lst), stats.stdev(lst)) for lst in classaggs] for classaggs in class_metric_agg]\n\n return meanstdevs, class_meanstdevs\n\n\n\ndef extractMetricsFromReport(report, num_classes):\n accuracy = report.get('accuracy')\n macro_avgs = report.get('macro avg')\n weighted_avgs = report.get('weighted avg')\n\n class_avgs = [{} for i in range(num_classes)]\n for i in range(num_classes):\n class_avgs[i] = report.get(str(i))\n\n return accuracy, macro_avgs, weighted_avgs, class_avgs\n\n\n \n\"\"\"\n{'0': {'precision': 1.0, 'recall': 0.6666666666666666, 'f1-score': 0.8, 'support': 3.0}, \n'1': {'precision': 0.5, 'recall': 1.0, 'f1-score': 0.6666666666666666, 'support': 1.0}, \n'2': {'precision': 1.0, 'recall': 1.0, 'f1-score': 1.0, 'support': 1.0}, \n'3': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 1.0}, \n'accuracy': 0.6666666666666666, \n'macro avg': {'precision': 0.625, 'recall': 0.6666666666666666, 'f1-score': 0.6166666666666667, 'support': 6.0}, \n'weighted avg': {'precision': 0.75, 'recall': 0.6666666666666666, 'f1-score': 0.6777777777777777, 'support': 6.0}}\n\"\"\"\n \n\n \n \n\nif __name__=='__main__':\n main()\n # kfoldPipelineRun()","sub_path":"src/generalised/classificationRunner.py","file_name":"classificationRunner.py","file_ext":"py","file_size_in_byte":8482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22979178","text":"from .CryptoTrader import (\n CryptoTrader,\n Alert,\n Buy,\n Sell,\n User,\n InsufficientFundsError,\n InsufficientCoinsError,\n)\nfrom bot.Bot import Bot, Command, SlackBot\nfrom typing import Dict, List, Union, Optional\nimport threading\n\n\nclass CryptoBot(SlackBot):\n def __init__(self, token, bot: Bot, trader: CryptoTrader) -> None:\n super().__init__(token, bot, None)\n self.prices: Dict[str, float] = {}\n self.trader = trader\n self.lastLeaderboard: Union[str, None] = None\n self.lastTopCoins: Union[str, None] = None\n self.poll_and_execute_ifs()\n\n def poll_and_execute_ifs(self) -> None:\n # poll every 30 minutes\n # CoinMarketCap API limit is only 300 calls per day, so we need to limit the poll frequency here\n threading.Timer(60 * 30, self.poll_and_execute_ifs).start()\n\n # get all users\n self.old_prices = self.prices\n self.prices = self.trader.api.getPrices()\n if self.old_prices:\n msg = ''\n for ticker,price in self.prices.items():\n old_price = self.old_prices[ticker]\n change = 1 - price/old_price\n if change >= .1:\n msg += \"{} is UP {:.1%} in the last 30 min\\n\".format(ticker,change)\n elif change <= -.1:\n msg += \"{} is DOWN {:.1%} in the last 30 min\\n\".format(ticker,abs(change))\n msg = msg.strip()\n if msg != '':\n self.api_call(\n \"chat.postMessage\",\n channel=\"#crypto-notifications\",\n text=_mono(msg),\n username=self.bot.name,\n icon_emoji=self.bot.icon_emoji,\n )\n for user in self.trader.getAllUsers():\n self.execute_ifs(user, self.prices)\n\n def execute_ifs(self, user: User, prices: Dict[str, float]) -> None:\n idx = 0\n ifs = user.ifs\n while idx < len(ifs):\n try:\n i = ifs[idx]\n if not i.meets_condition(prices):\n print(\n \"{} if id {} not met: {}\".format(\n user.user_name, i.id, i.condition.render()\n )\n )\n else:\n print(\n \"{} if id {} triggered! {}\".format(\n user.user_name, i.id, i.condition.render()\n )\n )\n if i.action[\"type\"] == \"alert\": # type: ignore\n self.api_call(\n \"chat.postMessage\",\n channel=\"@{}\".format(user.user_name),\n text=\"{}\".format(i.action[\"msg\"]), # type: ignore\n username=self.bot.name,\n icon_emoji=self.bot.icon_emoji,\n )\n\n elif i.action[\"type\"] == \"buy\": # type: ignore\n coin = i.action[\"coin\"] # type: ignore\n fromQty = \"{0:.6g}\".format(user.portfolio[coin] or 0)\n buyQty = i.action[\"qty\"]\n fromUSD = \"{0:.2f}\".format(user.balance)\n self.trader.buy(user.user_name, coin, buyQty)\n db_user = self.trader._getUser(user.user_name)\n user.portfolio = db_user.portfolio\n user.balance = db_user.balance\n toQty = \"{0:.6g}\".format(user.portfolio[coin] or 0)\n toUSD = \"{0:.2f}\".format(user.balance)\n msg = \"{}\\n[triggered] {} USD {} -> {}, {} {} -> {}\".format(\n i.render(),\n user.user_name,\n fromUSD,\n toUSD,\n coin,\n fromQty,\n toQty,\n )\n self.api_call(\n \"chat.postMessage\",\n channel=\"#crypto\",\n text=_mono(msg),\n username=self.bot.name,\n icon_emoji=self.bot.icon_emoji,\n )\n self._onLeaderboard(\"#crypto\", None)\n elif i.action[\"type\"] == \"sell\": # type: ignore\n coin = i.action[\"coin\"] # type: ignore\n fromQty = \"{0:.6g}\".format(user.portfolio[coin] or 0)\n sellQty = i.action[\"qty\"] # type: ignore\n fromUSD = \"{0:.2f}\".format(user.balance)\n self.trader.sell(user.user_name, coin, sellQty)\n db_user = self.trader._getUser(user.user_name)\n user.portfolio = db_user.portfolio\n user.balance = db_user.balance\n toQty = \"{0:.6g}\".format(user.portfolio[coin] or 0)\n toUSD = \"{0:.2f}\".format(user.balance)\n msg = (\n \"{}\\n[trade executed] {} USD {} -> {}, {} {} -> {}\".format(\n i.render(),\n user.user_name,\n fromUSD,\n toUSD,\n coin,\n fromQty,\n toQty,\n )\n )\n self.api_call(\n \"chat.postMessage\",\n channel=\"#crypto\",\n text=_mono(msg),\n username=self.bot.name,\n icon_emoji=self.bot.icon_emoji,\n )\n self._onLeaderboard(\"#crypto\", None)\n # action succeeded, so remove it from ifs\n del ifs[idx]\n except Exception as e:\n i = ifs[idx]\n self.api_call(\n \"chat.postMessage\",\n channel=\"@{}\".format(user.user_name),\n text=\"Execution of if failed. Condition: {}, Action: {}, Error: {}\".format(\n i.condition, i.action, str(e)\n ),\n username=self.bot.name,\n icon_emoji=self.bot.icon_emoji,\n )\n idx = idx + 1\n self.trader._setUser(user)\n\n def deleteFileUploads(self, file):\n try:\n result = self.api_call(\"files.delete\", file=file)\n print(result)\n self.fileUploads = []\n except:\n print(\"failed to delete files in deleteFileUploads()\")\n\n def onHelp(self, cmd: Command):\n # crypto help\n channel, thread = cmd.channel, cmd.thread\n msg = \"\\n\".join(\n [\n \"crypto help\",\n \"crypto leaderboard\",\n \"crypto top\",\n \"crypto buy \",\n \"crypto sell \",\n \"crypto if \",\n \"crypto price \",\n \"crypto play\",\n \"crypto quit\",\n ]\n )\n self.postMessage(channel, _mono(msg), thread)\n\n def onWhen(self, cmd: Command):\n # crypto when\n channel, thread = cmd.channel, cmd.thread\n\n msg = \"no clue\"\n if cmd.args[0] == \"lambo\":\n msg = \":racing_car:\"\n elif cmd.args[0] == \"moon\":\n msg = \":full_moon_with_face:\"\n elif cmd.args[0] == \"hax\":\n msg = \"YOUR BALANCE = $420.69\"\n\n self.postMessage(channel, msg, thread)\n\n def onPing(self, cmd: Command):\n channel, thread = cmd.channel, cmd.thread\n self.postMessage(channel, \"pong!\", thread)\n\n def onNewUser(self, cmd: Command):\n # crypto play\n user_name, args, channel, thread = (\n cmd.user_name,\n cmd.args,\n cmd.channel,\n cmd.thread,\n )\n # create user here...\n self.trader.create_user(user_name)\n self.onLeaderboard(cmd)\n\n def onUserQuit(self, cmd: Command):\n # crypto quit\n user_name, args, channel, thread = (\n cmd.user_name,\n cmd.args,\n cmd.channel,\n cmd.thread,\n )\n # delete user here...\n self.trader.delete_user(user_name)\n self.onLeaderboard(cmd)\n\n def displayIfs(self, user_name: str, channel: str, thread: str) -> None:\n ifs = self.trader._getUser(user_name).ifs\n if len(ifs) == 0:\n msg = \"No ifs for {}!\".format(user_name)\n else:\n msg = \"\\n\".join([i.render() for i in ifs])\n msg = \"Crypto ifs for {}\\n{}\".format(user_name, msg)\n self.postMessage(channel, _mono(msg), thread)\n\n # crypto if btc > 100 alert\n # crypto if btc > 100 sell btc max\n # crypto if btc > 100 sell btc 10\n # crypto if btc < 50 buy btc max\n # crypto if btc < 50 buy btc 10\n # crypto if\n # crypto if delete \n\n def onIf(self, cmd: Command):\n user_name, args, channel, thread = (\n cmd.user_name,\n cmd.args,\n cmd.channel,\n cmd.thread,\n )\n\n print(args)\n\n # crypto if\n if len(args) == 0:\n self.displayIfs(user_name, channel, thread)\n\n # crypto if delete \n elif args[0] == \"delete\":\n try:\n id = int(args[1])\n self.trader.deleteIf(user_name, id)\n self.displayIfs(user_name, channel, thread)\n except Exception as e:\n print(e)\n self.postMessage(\n channel,\n \"`crypto delete ` is the format you're looking for.\",\n thread,\n )\n\n # crypto if btc > 100 alert\n # crypto if btc > 100 buy btc 100\n else:\n try:\n coin = args[0]\n comparator = args[1]\n amount = float(args[2])\n action = args[3]\n if action == \"alert\":\n try:\n self.trader.setAlertIf(user_name, coin, comparator, amount)\n self.displayIfs(user_name, channel, thread)\n except Exception as e:\n self.postMessage(channel, _mono(str(e)), thread)\n return\n elif action == \"buy\":\n try:\n buyCoin = args[4]\n buyQty = args[5]\n self.trader.setBuyIf(\n user_name, coin, comparator, amount, buyCoin, buyQty\n )\n self.displayIfs(user_name, channel, thread)\n except Exception as e:\n self.postMessage(channel, _mono(str(e)), thread)\n return\n elif action == \"sell\":\n try:\n sellCoin = args[4]\n sellQty = args[5]\n self.trader.setSellIf(\n user_name, coin, comparator, amount, sellCoin, sellQty\n )\n self.displayIfs(user_name, channel, thread)\n except Exception as e:\n self.postMessage(channel, _mono(str(e)), thread)\n return\n except Exception as e:\n print(e)\n msg = \"\\n\".join(\n [\n \"example commands\",\n \"crypto if btc > 100 alert\",\n \"crypto if btc > 100 sell btc max\",\n \"crypto if btc > 100 sell btc 10\",\n \"crypto if btc < 50 buy btc max\",\n \"crypto if btc < 50 buy btc 10\",\n \"crypto if\",\n \"crypto if delete <id>\",\n ]\n )\n self.postMessage(channel, _mono(msg), thread)\n\n def onBuy(self, cmd: Command):\n # crypto buy eth 200\n user_name, args, channel, thread = (\n cmd.user_name,\n cmd.args,\n cmd.channel,\n cmd.thread,\n )\n try:\n ticker = args[0].lower().strip()\n quantity = args[1].lower().strip()\n except:\n self.postMessage(\n channel,\n \"`crypto buy ` is the format you're looking for.\",\n thread,\n )\n return\n try:\n self.trader.buy(user_name, ticker, quantity)\n self.postMessage(\n channel,\n \"{u} bought {t} x {q}\".format(u=user_name, t=ticker, q=quantity),\n thread,\n )\n self.onLeaderboard(cmd)\n except InsufficientFundsError:\n self.postMessage(\n channel, \"Insufficient funds. Try selling some coins for $!\", thread\n )\n except:\n self.postMessage(channel, \"Something went wrong.\", thread)\n\n def onSell(self, cmd: Command):\n # crypto sell eth 200\n user_name, args, channel, thread = (\n cmd.user_name,\n cmd.args,\n cmd.channel,\n cmd.thread,\n )\n try:\n ticker = args[0].lower().strip()\n quantity = args[1].lower().strip()\n except:\n self.postMessage(\n channel,\n \"`crypto sell ` is the format you're looking for.\",\n thread,\n )\n return\n try:\n self.trader.sell(user_name, ticker, quantity)\n self.postMessage(\n channel,\n \"{u} sold {t} x {q}\".format(u=user_name, t=ticker, q=quantity),\n thread,\n )\n self.onLeaderboard(cmd)\n except InsufficientCoinsError:\n self.postMessage(\n channel,\n \"{u} does not have {t} x {q} to sell!\".format(\n u=user_name, t=ticker, q=quantity\n ),\n thread,\n )\n except:\n self.postMessage(channel, \"Something went wrong.\", thread)\n\n def _onLeaderboard(self, channel: str, thread: Optional[str]):\n png = self.trader.leaderboard()\n try:\n if self.lastLeaderboard:\n self.api_call(\"files.delete\", file=self.lastLeaderboard)\n self.lastLeaderboard = None\n response = self.api_call(\n \"files.upload\",\n channels=[channel],\n username=self.bot.name,\n icon_emoji=self.bot.icon_emoji,\n filename=\"leaderboard.png\",\n file=png\n )\n self.lastLeaderboard = response[\"file\"][\"id\"]\n except:\n self.postMessage(channel, \"Something went wrong.\", thread)\n\n def onLeaderboard(self, cmd: Command):\n # crypto leaderboard\n channel, thread = (cmd.channel, cmd.thread)\n self._onLeaderboard(channel, thread)\n\n def onPrices(self, cmd: Command):\n # example slack command:\n # \"crypto price BTC ETH\"\n tickers, channel, thread = cmd.args, cmd.channel, cmd.thread\n res = {\n ticker + \": \" + str(price)\n for ticker, price in self.trader.api.getPrices().items()\n if ticker.lower() in map(lambda t: t.lower(), tickers)\n }\n self.postMessage(channel, _mono(\", \".join(res)), thread)\n\n def onTopCoins(self, cmd: Command):\n # example slack commands:\n # crypto top\n # crypto top 100\n args, channel, thread = cmd.args, cmd.channel, cmd.thread\n try:\n numCoins = int(args[0]) if args else 10\n numCoins = numCoins if numCoins <= 25 else 25\n png = self.trader.topCoins(numCoins)\n\n if self.lastTopCoins:\n self.api_call(\"files.delete\", file=self.lastTopCoins)\n self.lastTopCoins = None\n response = self.api_call(\n \"files.upload\", channels=[channel], filename=\"top coins.png\", file=png\n )\n self.lastTopCoins = response[\"file\"][\"id\"]\n except:\n self.postMessage\n (channel, \"crypto top ... try again\", thread)\n\n\ndef _mono(str):\n return \"```{str}```\".format(str=str)\n","sub_path":"crypto/CryptoBot.py","file_name":"CryptoBot.py","file_ext":"py","file_size_in_byte":16792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"114176401","text":"# Reports\nnew_button = 'id=new'\ndelete_button = 'id=delete'\ndelete_button_disabled = 'css=#delete.disabled'\nlog_query = 'id=log'\ntable = 'id=DSYTable'\ntable_reports = \"//*[@id='DSYTable']/tbody/tr\"\ntable_report_name_ = table_reports + '[%s]/td[2]' # 1-based row\ntable_report_checkbox_ = table_reports + '[%s]/td[1]/input'\ntable_report_cell_ = table_reports + '[%s]/td[%s]' # 1-based (row, col)\ntable_report_cell_link_ = table_reports + '[%s]/td[%s]/a'\n\n# New Report\nnew_name_field = 'name=report_name'\nnew_generate_button = 'id=button_gen'\nnew_cancel_button = 'id=button_cancel'\nradio_one_time = 'id=r_fqcy_0'\nradio_weekly = 'id=r_fqcy_1'\nradio_monthly = 'id=r_fqcy_2'\nnew_report_name_inline_msg = 'css=#report_name_error'\nnew_report_tooltip = 'css=.msg'\n\n# Edit Report\nedit_name_field = 'name=report_name'\nedit_save_button = 'id=button_save'\nedit_cancel_button = 'id=button_cancel'\n\nimport sys, varutils\nget_variables = varutils.get_variables(sys.modules[__name__], 'loc_reports')\n","sub_path":"lib/tm/wfbssweb/loc/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"455250524","text":"import math\nimport numpy as np\nfrom matplotlib.tri import Triangulation\nimport matplotlib.pyplot as plt # For displaying array as image\nfrom matplotlib.patches import Polygon # https://matplotlib.org/3.1.1/gallery/event_handling/trifinder_event_demo.html\nimport pylab\n\n# We want to the read the file and try to vertex mappings are retained.\ndef readObjFile(path, filename):\n\tf = open(path + filename, \"r+\")\n\n\tOriginalsamples = np.array([])\n\tFlatsamples = np.array([])\n\tOriginalIndexMap = dict()\n\tFlatIndexMap = dict()\n\n\tOriginalfaces = np.array([], dtype=np.int)\n\t#Originalfaces = []\n\tFlatfaces = np.array([], dtype=np.int)\n\n\tfileContent = f.read().split(\"\\n\");\n\tf.close()\n\n\tglobalIndex = 0\n\toriginalIndex = 0\n\tflatIndex = 0\n\tfaceCount = 0\n\n\tfor line in fileContent:\n\t\t# print(line)\n\t\tlineSplit = line.split()\n\t\t# print(lineSplit)\n\t\tif (len(lineSplit) > 0):\n\t\t\tlineType = lineSplit[0]\n\t\t\tif (lineType == 'v'):\n\t\t\t\tglobalIndex += 1\n\t\t\t\toriginalIndex += 1\n\n\t\t\t\t# This is a vertex for original mesh.\n\t\t\t\t# Originalsamples = np.concatenate((Originalsamples, np.array(lineSplit[1:])))\n\t\t\t\tOriginalsamples = np.concatenate((Originalsamples, [float(i) for i in lineSplit[1:]] ))\n\n\n\t\t\telif (lineType == 'vt'):\n\t\t\t\tglobalIndex += 1\n\t\t\t\tflatIndex += 1\n\n\t\t\t\t# This is a vertex for flattened mesh.\n\t\t\t\t# Flatsamples = np.concatenate((Flatsamples, np.array(lineSplit[1:])))\n\t\t\t\tFlatsamples = np.concatenate((Flatsamples, [float(i) for i in lineSplit[1:]] ))\n\n\n\t\t\telif (lineType == 'f'):\n\t\t\t\tfaceCount += 1\n\t\t\t\t# Now we need to create the faces.\n\t\t\t\t# The face entries f v/vt v/vt v/vt\n\t\t\t\tv1, vf1 = lineSplit[1].split(\"/\")\n\t\t\t\tv2, vf2 = lineSplit[2].split(\"/\")\n\t\t\t\tv3, vf3 = lineSplit[3].split(\"/\")\n\n\t\t\t\t# So the v and vt are interleaved throughout the file. We need a way to figure out the un-interleaved index of the vertices.\n\t\t\t\t# We need a way to map from f index to separated f index.\n\t\t\t\tnewFace = [int(v1)-1, int(v2)-1, int(v3)-1 ]\n\t\t\t\tnewFlatFace = [ int(vf1)-1, int(vf2)-1, int(vf3)-1 ]\n\n\t\t\t\tOriginalfaces = np.concatenate((Originalfaces, newFace))\n\t\t\t\t#Originalfaces.append( newFace )\n\t\t\t\tFlatfaces = np.concatenate((Flatfaces, newFlatFace))\n\n\t\t\t\t# Create index maps.\n\t\t\t\t# OriginalIndexMap[tuple(newFace)] = tuple(newFlatFace)\n\t\t\t\t# FlatIndexMap[tuple(newFlatFace)] = tuple(newFace)\n\n\t\t\telse:\n\t\t\t\traise(\"Unkown line type: \", line)\n\n\tOriginalsamples = np.reshape(Originalsamples, (originalIndex,3))\n\tFlatsamples = np.reshape(Flatsamples, (flatIndex, 2))\n\tOriginalfaces = np.reshape(Originalfaces, (faceCount, 3))\n\tFlatfaces = np.reshape(Flatfaces, (faceCount, 3))\n\n\tprint(\"GlobalIndex: \", globalIndex)\n\tprint(\"Originalsamples: \", len(Originalsamples))\n\t# Shift Original coordinates to positive quadrant.\n\txmin = abs(np.min(Originalsamples[:, 0]))\n\tymin = abs(np.min(Originalsamples[:, 1]))\n\tOriginalsamples[:, 0] = Originalsamples[:, 0] + xmin\n\tOriginalsamples[:, 1] = Originalsamples[:, 1] + ymin\n\n\tprint(\"Originalfaces: \", len(Originalfaces))\n\tprint(\"Max of faces: \", np.max(Originalfaces))\n\tprint(\"Flatfaces: \", len(Flatfaces))\n\n\n\tOriginalSampleMap = np.array([None] * len(Originalsamples))\n\tFlatSamplesMap = np.array([None]*len(Flatsamples))\n\n\t# Create Original Triangle <--> Flat Triangle map\n\tindexOri = 1\n\t# for triangle in Originalfaces:\n\t# \tprint(triangle)\n\t# \tFlattriangle = OriginalIndexMap[tuple(triangle)]\n\t# \tprint(\"Flat triangle: \", Flattriangle)\n\t# \tindexFlat = np.where(Flatfaces == OriginalIndexMap[tuple(triangle)])\n\t#\n\t# \tprint(\"IndexFlat: \", indexFlat)\n\t# \tprint(Flatfaces[indexFlat[:, 0]])\n\t#\n\t# \tOriginalSampleMap[indexOri] = indexFlat[0][0]\n\t#\n\t# \tindexOri += 1\n\t# \tbreak;\n\n\n\t\t# Line can be one of the following types: v, vt, or f.\n\t\t# v is a vertex in 3D\n\t\t# vt is a vertex in 2D\n\t\t# f is a facet definition. It will contain 3 vertices that form a triangle. There are pairings that map from the 3D vertex to the 2D vertex.\n\n\t\t# TODO:\n\t\t# Create arrays for the 3D and 2D vertices.\n\t\t# Create mapping 3D to 2D vertices by using the f lines.\n\n\t\t# The vertex references are combined.\n\n\n\t\t\t# 1. Create list of vertices. Either prefix of v or vt.\n\t\t\t#\n\t\t\t# What we need is a list of 3D coordinates and a list of 2D coordinates.\n\t\t\t# The 3D coordinates will represent the original mesh (v)\n\t\t\t# The 2D coordinates will represent the flattened mesh (vt)\n\t\t\t#\n\t\t\t# The vertices will be interleaved. So the indeces are common between the 3D and 2D coordinates.\n\t\t\t#\n\t\t\t#\n\t\t\t# 2. Create list of facets.\n\t\t\t# These will be prefixed with an f, then followed by three pairs of indeces in the format of /.\n\t\t\t#\n\t\t\t# These represent the triangles.\n\t\t\t#\n\t\t\t# 3. Create the Triangulation objects for the 3D Mesh and 2D mesh.\n\t\t\t# The Triangulation objects contain x, y, and triangle data structures: https://www.programcreek.com/python/example/91951/matplotlib.tri.Triangulation\n\t\t\t#\n\t\t\t#\n\n\tprint(\"Reading OBJ file ****************************************************\")\n\tValidateSamples(Flatsamples, Flatfaces)\n\tprint(\"Done OBJ file ****************************************************\")\n\treturn Originalsamples, Originalfaces, Flatsamples, Flatfaces\n\n\n\ndef ValidateSamples(samples, faces):\n\t# We will go through the list of points and confirm there are no duplicate values.\n\texistingPoints = {}\n\tduplicate = False\n\tfor point in samples:\n\t\tpoint = tuple(point)\n\t\t# print('Checking: ', point)\n\t\tif existingPoints.get(point, None) == None:\n\t\t\texistingPoints[point] = 1\n\t\telse:\n\t\t\tprint('duplicatePoint: ', point)\n\t\t\tduplicate = True\n\n\tif duplicate:\n\t\tprint(\"Failed point checking.\")\n\tprint(\"Done checking points.\")\n\n\tprint(\"Checking Faces: ***************************\")\n\texistingFaces = {}\n\tduplicate = False\n\tfailedFaces = []\n\tfor face in faces:\n\t\tface = tuple(np.sort(face))\n\t\t# print('Checking:', face)\n\t\tif existingFaces.get(face, None) == None:\n\t\t\texistingFaces[face] = 1\n\t\telse:\n\t\t\tprint('duplicateFace: ', face)\n\t\t\tduplicate = True\n\tif duplicate:\n\t\tprint(\"Failed Face checking.\")\n\tprint(\"Done Checking Faces: ***************************\")\n\ndef update_polygon(tri, polygon):\n\tif tri == -1:\n\t\tpoints = [0, 0, 0]\n\telse:\n\t\tpoints = triang.triangles[tri]\n\t# print(\"Update polygon Points: \", points)\n\txs = triang.x[points]\n\tys = triang.y[points]\n\tpolygon.set_xy(np.column_stack([xs, ys]))\n\n\ndef update_polygon2(tri, polygon):\n\tif tri == -1:\n\t\tpoints = [0, 0, 0]\n\telse:\n\t\tpoints = triang2.triangles[tri]\n\t# print(\"Update polygon2 Points: \", points)\n\txs = triang2.x[points]\n\tys = triang2.y[points]\n\tpolygon.set_xy(np.column_stack([xs, ys]))\n\n\ndef motion_notify(event):\n\tif event.inaxes == ax1:\n\t\ttri = trifinder(event.xdata, event.ydata)\n\telif event.inaxes == ax2:\n\t\ttri = trifinder2(event.xdata, event.ydata) # Make the event handler check both images.\n\telse:\n\t\ttri = -1\n\n\tupdate_polygon(tri, polygon1)\n\tupdate_polygon2(tri, polygon2) # alpha - force an update on the other mesh.\n\tplt.title('In triangle %i' % tri)\n\tfig = pylab.gcf()\n\tfig.canvas.set_window_title('In triangle %i' % tri)\n\tevent.canvas.draw()\n\ndef motion_notify2(event):\n\t# No longer required. Combinted into motion notify1.\n\tif event.inaxes is None:\n\t\ttri = -1\n\telse:\n\t\ttri = trifinder2(event.xdata, event.ydata)\n\n\tupdate_polygon(tri, polygon1) # alpha - force an update on the other mesh.\n\tupdate_polygon2(tri, polygon2)\n\tplt.title('In triangle %i' % tri)\n\tfig = pylab.gcf()\n\tfig.canvas.set_window_title('In triangle %i' % tri)\n\tevent.canvas.draw()\n\ndef on_click(event):\n\t# https://stackoverflow.com/questions/41824662/how-to-plot-a-dot-each-time-at-the-point-the-mouse-is-clicked-in-matplotlib\n\tif event.inaxes == ax1:\n\t\tprint('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % (event.button, event.x, event.y, event.xdata, event.ydata))\n\t\tax1.plot(event.xdata, event.ydata, ',')\n\t\tevent.canvas.draw()\n\telif event.inaxes == ax2:\n\t\tax2.plot(event.xdata, event.ydata, ',')\n\t\tevent.canvas.draw()\n\n\n\nif __name__ == '__main__':\n\tpath = \"../../boundary-first-flattening/build/\"\n\tOriginalsamples, Originalfaces, Flatsamples, Flatfaces = readObjFile(path, \"test1_out.obj\")\n\n\n\t# exit(1)\n\t#Originalfaces = list(Originalfaces)\n\tmin_radius = .25\n\n\t# First subplot\n\ttriang = Triangulation(Originalsamples[:, 0], Originalsamples[:, 1], triangles=Originalfaces)\n\tax1 = plt.subplot(121, aspect='equal') # Create first subplot.\n\tplt.triplot(triang, 'b-')\n\n\ttriang.set_mask(np.hypot(Originalsamples[:, 0][triang.triangles].mean(axis=1), Originalsamples[:, 1][triang.triangles].mean(axis=1)) < min_radius)\n\ttrifinder = triang.get_trifinder()\n\n\tpolygon1 = Polygon([[0, 0], [0, 0]], facecolor='y') # dummy data for xs,ys\n\tupdate_polygon(-1, polygon1)\n\n\tplt.gca().add_patch(polygon1)\n\tplt.gcf().canvas.mpl_connect('motion_notify_event', motion_notify)\n\t# plt.gcf().canvas.mpl_connect('button_press_event', motion_notify1) # https://matplotlib.org/3.1.1/users/event_handling.html\n\tplt.gcf().canvas.mpl_connect('button_press_event',\n\t on_click) # https://matplotlib.org/3.1.1/users/event_handling.html\n\n\t# Second subplot\n\tprint(Flatfaces)\n\ttriang2 = Triangulation(Flatsamples[:, 0], Flatsamples[:, 1], triangles=Flatfaces)\n\tax2 = plt.subplot(122, aspect='equal') # Create first subplot.\n\tplt.triplot(triang2, 'b-')\n\n\ttriang2.set_mask(np.hypot(Flatsamples[:, 0][triang2.triangles].mean(axis=1), Flatsamples[:, 1][triang2.triangles].mean(axis=1)) < min_radius)\n\ttrifinder2 = triang2.get_trifinder()\n\n\n\tpolygon2 = Polygon([[0, 0], [0, 0]], facecolor='y') # dummy data for xs,ys\n\tupdate_polygon2(-1, polygon2)\n\n\tplt.gca().add_patch(polygon2)\n\t# plt.gcf().canvas.mpl_connect('motion_notify_event', motion_notify2)\n\t# plt.gcf().canvas.mpl_connect('button_press_event', on_click) # https://matplotlib.org/3.1.1/users/event_handling.html\n\n\tplt.show()\n\n","sub_path":"PycharmProjects/geodesic/readOBJFile.py","file_name":"readOBJFile.py","file_ext":"py","file_size_in_byte":9631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"485447294","text":"#! /usr/bin/env python\n\"\"\"\nSpace Traversal Matching: compute matches from rays projecting them into voxels.\n\nExample:\n\n export PATH_INPUT_DATA=\"../../Documentation/TestData/Processed_DATA/MyExperiment/Parallel/Matching/Rays/rays_1-10.dat\"\n ./stm.py $PATH_INPUT_DATA 1 2 2 0.2 400 400 250 2\n\nor (to specify the limits of the visualized region):\n\n ./stm.py $PATH_INPUT_DATA 1 2 2 0.2 400 400 250 2 \"[[-140, 140], [-150, 150], [5, 170]]\"\n\n\"\"\"\nimport os\nimport copy\nimport struct\nimport traceback\nfrom time import perf_counter\nimport argparse\nimport h5py\nfrom stm_util import save_hdf5\n\nUSE_UNOPTIMIZED = os.environ.get(\"STM_PYTHON_USE_UNOPTIMIZED\", False)\n\n\"\"\"\nFor xonsh::\n\n $PATH_INPUT_DATA=\"../../Documentation/TestData/Processed_DATA/MyExperiment/Parallel/Matching/Rays/rays_1-10.dat\"\n\nNote: the \"unoptimized\" code is actually slightly faster than the first\nPython code: 179.5 s versus 184 s, i.e. 2.4 % faster :-)\n\nTo be compared to the perf of the C++ code: 8.45 s!\n\n- The old Python code was 21.8 time slower than the optimized C++ one.\n\n- The unoptimized Python code is 21.2 time slower than the optimized C++ one.\n\nCommand to launch the same computation (?) with Python::\n\n STM_PYTHON_USE_UNOPTIMIZED=1 python stm.py $PATH_INPUT_DATA 1 2 2 0.2 400 400 250 2\n python stm.py $PATH_INPUT_DATA 1 2 2 0.2 400 400 250 2\n\nWith both versions, the same number of matches are found out of the same number\nof candidates: \"6754 matched found (out of 135271 candidates)\".\n\nCommand to launch the C++ code::\n\n ../STMCpp/STM -i $PATH_INPUT_DATA -f 1 -c 2 -d 0.2 -m 2 -x 400 -y 400 -z 250 -b -140 140 -150 150 5 170 --hdf5\n\nThe result of the C++ code is 7466 matched found (out of 135271 candidates)\n\"\"\"\n\nif not USE_UNOPTIMIZED:\n from stm_util import space_traversal_matching\nelse:\n print(\"Using stm_util_unoptimized\")\n from stm_util_unoptimized import space_traversal_matching\n\n\ndef compute_stm(\n filename,\n start_frame,\n stop_frames,\n cam_match,\n max_distance,\n nx,\n ny,\n nz,\n max_matches_per_ray,\n bounding_box=[[-140, 140], [-150, 150], [5, 170]],\n min_distance_matches_1ray=None,\n neighbours=6,\n):\n \"\"\"\n Compute matches from rays projecting them into voxels.\n\n See the output of ``./stm.py -h`` for the meaning of the arguments.\n\n - neighbours\n\n Number of illuminated voxels: due to noise, when a ray crosses a voxel, it\n is possible that in reality, the ray crosses a close voxel. neighbours\n indicates how many neighbours we consider in reality when a ray crosses a\n voxel. =6 by defaut.\n\n \"\"\"\n #############################################################################################################\n # Parameters to adjust\n tstart = perf_counter()\n\n #############################################################################################################\n\n fileout = copy.copy(filename).split(\".\")\n fileout = \".\".join(fileout[0 : len(fileout) - 1])\n fileout = (\n fileout.replace(\"rays\", \"matched\")\n + f\"cam{cam_match}_{start_frame}-{stop_frames-1}.h5\"\n )\n fileforlog = copy.copy(fileout).split(\".\")\n fileforlog = \".\".join(fileforlog[0 : len(fileforlog) - 1])\n filelog = fileforlog + \".log\"\n fin = open(filename, \"rb\")\n frameid = start_frame\n numpts = fin.read(4) # Read 4 bytes header\n with h5py.File(fileout, \"w\") as file:\n while len(numpts) > 0 and frameid < stop_frames: # If something is read\n numpts = struct.unpack(\"I\", numpts)[\n 0\n ] # Interpret header as 4 byte uint\n with open(filelog, \"a\") as flog:\n flog.write(\n f\"#######\\nFrame: {frameid}\\nNumber of rays: {numpts}\\n\"\n )\n\n print(\"Frame:\", frameid, \". # of rays:\", numpts)\n\n # Read rays\n raydata = fin.read(numpts * 27) # 27 bytes per line 2+1+6*4\n raydata = struct.unpack(\n \"=\" + (\"BH6f\" * numpts), raydata\n ) # Create string '=BHFFFFFFBHFFFFFFBHFFFFFF...BHFFFFFF'\n raydata = list(\n map(\n lambda i: list(raydata[8 * i : 8 * i + 8]),\n range(len(raydata) // 8),\n )\n ) # Reshape to 8*N np.arreyreshape converts everything to floats...\n # The actual call\n try:\n output = space_traversal_matching(\n raydata,\n bounding_box,\n nx=nx,\n nz=nz,\n ny=ny,\n cam_match=cam_match,\n neighbours=neighbours,\n logfile=filelog,\n max_distance=max_distance,\n min_distance_matches_1ray=min_distance_matches_1ray,\n )\n except ValueError:\n tb = traceback.format_exc()\n with open(filelog, \"a\") as flog:\n flog.write(tb + \"\\n\")\n\n raise\n\n # Prepare output\n print(\"Matches found:\", len(output))\n save_hdf5(output, frameid, file)\n numpts = fin.read(4) # Read next header\n frameid += 1\n fin.close()\n print(\"Finished\")\n\n elapsed = perf_counter() - tstart\n print(f\"Elapsed time: {elapsed:.2f} s\")\n print(f\"Elapsed time/frame: {elapsed / (stop_frames - start_frame):.2f} s\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument(\n \"path_file\",\n type=str,\n help=\"Path towards the file containing the ray data\",\n )\n\n parser.add_argument(\n \"start_frame\",\n type=int,\n help=\"Index of the first frame\",\n )\n\n parser.add_argument(\n \"stop_frame\",\n type=int,\n help=\"Index of the last frame + 1\",\n )\n\n parser.add_argument(\n \"cam_match\",\n type=int,\n help=\"Minimum number of rays crossing to get a match\",\n )\n\n parser.add_argument(\n \"max_distance\",\n type=float,\n help=\"Maximum distance allowed for a match\",\n )\n\n parser.add_argument(\n \"nx\",\n type=int,\n help=\"Number of voxels in the x direction\",\n )\n\n parser.add_argument(\n \"ny\",\n type=int,\n help=\"Number of voxels in the y direction\",\n )\n\n parser.add_argument(\n \"nz\",\n type=int,\n help=\"Number of voxels in the z direction\",\n )\n\n parser.add_argument(\n \"max_matches_per_ray\",\n type=int,\n help=\"Maximum number of matches/ray\",\n )\n\n parser.add_argument(\n \"bounding_box\",\n type=str,\n nargs=\"?\",\n help=(\n \"Corresponds to the volume visualized \"\n \"[[minX, maxX], [minY, maxY], [minZ, maxZ]]\"\n ),\n default=\"[[-140, 140], [-150, 150], [5, 170]]\",\n )\n\n parser.add_argument(\n \"-md1r\",\n \"--min-distance-matches-1ray\",\n type=float,\n default=None,\n help=\"Minimum distance for multiple matches per ray\",\n )\n\n args = parser.parse_args()\n bounding_box_as_str = args.bounding_box\n args.bounding_box = eval(bounding_box_as_str)\n\n return args\n\n\ndef main():\n\n args = parse_args()\n print(args)\n\n compute_stm(\n args.path_file,\n args.start_frame,\n args.stop_frame,\n args.cam_match,\n args.max_distance,\n args.nx,\n args.ny,\n args.nz,\n args.max_matches_per_ray,\n args.bounding_box,\n args.min_distance_matches_1ray,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Matching/STMPython/stm_hdf5.py","file_name":"stm_hdf5.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"499313900","text":"import clr\n\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\n\nclr.AddReference('RevitServices')\nimport RevitServices\nfrom RevitServices.Persistence import DocumentManager\n\ndoc = DocumentManager.Instance.CurrentDBDocument\nelements = UnwrapElement(IN[0])\n\nWorksetIDList = list()\nWorksetNameList = list()\n\nfor e in elements:\n WorksetIDList.append(doc.GetWorksetTable().GetWorkset(e.WorksetId))\n\nfor ws in WorksetIDList:\n WorksetNameList.append(ws.Name)\n\nOUT = WorksetNameList","sub_path":"WorksetGetNames.py","file_name":"WorksetGetNames.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207068722","text":"# Imports\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\n\n# Import plotting libraries\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.model_selection import GridSearchCV\n\nfrom DMProj1 import *\n\n# Ignore Warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Define target variable and features\ny = data_OH['IsBadBuy']\nX = data_OH.drop(['IsBadBuy'], axis=1)\n\n\n# Split the data into test and train groups with a test size of 20%\nX_mat = X.values # Turn X into a matrix\nX_train, X_test, y_train, y_test = train_test_split(X_mat, y, test_size=0.2, \n stratify=y, random_state=rs)\n\n# Build the decision Tree\nmodel = DecisionTreeClassifier(random_state=rs) # Define the model\nmodel.fit(X_train, y_train) # Fit the data\ny_pred = model.predict(X_test)\n\n\nprint(\"With no parameter tuning\")\nprint(\"Train accuracy:\", model.score(X_train, y_train))\nprint(\"Test accuracy:\", model.score(X_test, y_test))\nprint(classification_report(y_test, y_pred))\n\n# grab feature importances from the model and feature name from the original X\nimportances = model.feature_importances_\nfeature_names = X.columns\n\n# sort them out in descending order\nindices = np.argsort(importances)\nindices = np.flip(indices, axis=0)\n\n# limit to 20 features, you can leave this out to print out everything\nindices = indices[:20]\n\nfor i in indices:\n print(feature_names[i], ':', importances[i])\n \n\n# Test the model on different depths and plot the accuracy\ntest_score = [] # Define empty set for test scores\ntrain_score = [] # Define empty set for train scores\n\n# check the model performance for max depth from 2-20\nfor max_depth in range(2, 21):\n model = DecisionTreeClassifier(max_depth=max_depth, random_state=rs) # Define the model\n model.fit(X_train, y_train) # Fit the model to the data\n \n # Append the score into empty sets\n test_score.append(model.score(X_test, y_test)) \n train_score.append(model.score(X_train, y_train))\n \n \n# Plot the accuracy scores and see the best range for depth\nplt.plot(range(2, 21), train_score, 'b', range(2,21), test_score, 'r')\nplt.xlabel('max_depth\\nBlue = training acc. Red = test acc.')\nplt.ylabel('accuracy')\nplt.show()\n\n# Perform a grid search over the best hyperparameters\nparams = {'criterion': ['gini', 'entropy'], # What criterion to check\n 'max_depth': range(2, 7), # Check the depth, use the graph generated above\n 'min_samples_leaf': range(5, 26, 5)} # Define the min sample leafs\n\ncv = GridSearchCV(param_grid=params, estimator=DecisionTreeClassifier(random_state=rs), cv=10) # Define the model\ncv.fit(X_train, y_train) # Fit the data to the model\ny_pred = cv.predict(X_test) # test the best model\n\nprint(\"Using grid search the accuracy is\")\nprint(\"Train accuracy:\", cv.score(X_train, y_train))\nprint(\"Test accuracy:\", cv.score(X_test, y_test))\nprint(classification_report(y_test, y_pred))\n\n# print parameters of the best model\nprint(cv.best_params_)\n\n# Use these parameters to refine the model\nparams = {'max_depth': range(2, cv.best_params_['max_depth']+2),\n 'min_samples_leaf': range(cv.best_params_['min_samples_leaf']-4, \n cv.best_params_['min_samples_leaf']+5)}\n\ncv = GridSearchCV(param_grid=params, estimator=DecisionTreeClassifier(criterion= cv.best_params_['criterion'], random_state=rs), cv=10)\ncv.fit(X_train, y_train)\n\nprint(\"Using the refinded parameters\")\nprint(\"Train accuracy:\", cv.score(X_train, y_train))\nprint(\"Test accuracy:\", cv.score(X_test, y_test))\n\n# test the best model\ny_pred = cv.predict(X_test)\nprint(classification_report(y_test, y_pred))\n\n# print parameters of the best model\nprint(cv.best_params_)","sub_path":"Assignment_1/DTree.py","file_name":"DTree.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41458596","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\nimport unittest\n\nfrom lab2 import RecursiveDict\n\n__author__ = 'asaskevich'\n\n\nclass TestCase(unittest.TestCase):\n def test_general(self):\n d = RecursiveDict()\n d['a'] = 1\n d['b']['a'] = 10\n self.assertEqual(d['a'], 1)\n self.assertEqual(d['b']['a'], 10)\n\n def test_exceptions_set(self):\n try:\n d = RecursiveDict()\n d['a'][{}] = 1\n except Exception as e:\n self.assertIsInstance(e, KeyError)\n\n def test_exceptions_get(self):\n try:\n d = RecursiveDict()\n d[d]\n except Exception as e:\n self.assertIsInstance(e, KeyError)\n\n def test_contains(self):\n d = RecursiveDict()\n self.assertEqual('a' in d, False)\n self.assertEqual('a' in d, False)\n self.assertEqual('a' in d['a']['b']['c'], False)\n self.assertEqual('a' in d, True)\n d['a'] = 1\n self.assertEqual('a' in d, True)\n\n def test_delete(self):\n d = RecursiveDict()\n d['a']['b']['c'] = 10\n del d['a']['b']\n self.assertEqual('b' in d['a'], False)\n\n def test_str(self):\n d = RecursiveDict()\n d['a']['b'] = 1\n self.assertEqual(str(d), '{\"a\":{\"b\":1}}')\n self.assertEqual(str(RecursiveDict()), '{}')\n","sub_path":"kurs_3/sem_1/IGI/lb/Laboratornaya_2/Лабораторная 2/tests/test_recursive_dict.py","file_name":"test_recursive_dict.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140078587","text":"\"\"\"\nComputational Neurodynamics\nExercise 4 - Dynamical complexity\n\n(C) Chen Chen, Jiarou Fan, Yingjing Feng, 2015\n\"\"\"\n\nimport random as rd\nimport numpy as np\n\ndef GetWeight(i,j,N1,N2):\n \"\"\"\n GetWeight define by the type of connection\n \n Inputs:\n i,j -- connect from j neuron to i neuron.\n N1 -- layer 0 neurons number.\n N2 -- layer 1 neurons number.\n\n return:\n weight \n \"\"\"\n if 0<=i 0):\n i = rd.choice(range(MSize * k, MSize * (k + 1)))\n j = rd.choice(range(MSize * k, (1 + k) * MSize))\n if(i != j) and CIJ[i,j] != 1:\n CIJ[i,j] = 1 #connect\n times = times - 1\n\n ## Initialize matrix from e neuron to i neuro, if one excitatoty should connect to only one inhibitory\n mark = np.zeros(M) # Mark select each module to inhibitory neuron total times mark[8]\n for i in range(N1, N): # For each inhibitory neuron\n k = rd.choice(range(M)) # Select a module k\n while(mark[k] >= N2/M): k = rd.choice(range(M))# If the module have connect to 25 inhibitory, then re-select\n mark[k] = mark[k] + 1\n times = 4 # Each module, get 4 exbitatory neuron randomly\n while times > 0:\n j = rd.choice(range(MSize * k,MSize * (k + 1))) # random choose j from the k module\n if(np.sum(CIJ[N1:N,j]) == 0):# If haven't been connected to any inhibitory neuron\n CIJ[i,j] = 1 # Connect from j to i neuron\n times = times - 1\n\n ## Initialize matrix from i neuron to e neuro\n for i in range(0, N1):\n for j in range(N1, N):\n CIJ[i,j] = 1\n\n ## Initialize matrix from i neuron to i neuro\n for i in range(N1, N):\n for j in range(N1, N):\n CIJ[i,j] = 1\n\n ## Rewire\n CIJ = Rewire(CIJ, p, N1, N2)\n\n ## Get weight matrix for each connect\n SIJ = np.zeros((N, N))\n for i in range(0, N):\n for j in range (0, N):\n SIJ[i,j] = CIJ[i,j] * GetWeight(i, j, N1, N2)\n\n return SIJ\n\n","sub_path":"code/SMatrix.py","file_name":"SMatrix.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457125082","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView\nfrom django.views.generic import TemplateView\n\nfrom .models import *\nfrom .forms import *\n\n\nclass CategoriaListView(ListView):\n model = Categoria\n template_name = 'categoria/listar.html'\n\n\nclass CategoriaCreateView(CreateView):\n model = Categoria\n template_name = 'categoria/formulario.html'\n fields = ('__all__')\n success_url = reverse_lazy('eventos:categoriaList')\n\n\nclass CategoriaUpdateView(UpdateView):\n model = Categoria\n template_name = 'categoria/formulario.html'\n fields = ('__all__')\n success_url = reverse_lazy('eventos:categoriaList')\n\n\nclass CategoriaDeleteView(DeleteView):\n model = Categoria\n template_name = 'categoria/eliminar.html'\n success_url = reverse_lazy('eventos:categoriaList')\n\n\nclass CategoriaDetailView(DetailView):\n model = Categoria\n template_name = 'categoria/detalle.html'\n\n\nclass EventoListView(ListView):\n model = Evento\n template_name = 'eventos/listar.html'\n\n def get_queryset(self):\n queryset = super(EventoListView, self).get_queryset()\n queryset = queryset.filter(organizador=self.request.user)\n\n return queryset\n\n\nclass EventoCreateView(CreateView):\n model = Evento\n template_name = 'eventos/formulario.html'\n form_class = EventoForm\n success_url = reverse_lazy('eventos:eventoList')\n\n def get_initial(self):\n return {\n 'organizador':self.request.user\n }\n\n\nclass EventoUpdateView(UpdateView):\n model = Evento\n template_name = 'eventos/formulario.html'\n form_class = EventoForm\n success_url = reverse_lazy('eventos:eventoList')\n\n\nclass EventoDeleteView(DeleteView):\n model = Evento\n template_name = 'eventos/eliminar.html'\n success_url = reverse_lazy('eventos:eventoList')\n\n\nclass EventoDetailView(DetailView):\n model = Evento\n template_name = 'eventos/detalle.html'\n\n\nclass HomeView(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n context['eventos'] = Evento.objects.all()\n context['categorias'] = Categoria.objects.all()\n context['organizadores'] = User.objects.all()\n\n return context\n\n","sub_path":"apps/eventos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141355043","text":"\"\"\"\nMVP Test.py\nThe test parameters\n\"\"\"\nfrom ece163.Containers.Payload import Payload\nimport math\nimport ece163.Utilities.MatrixMath as mm\nimport ece163.Utilities.Rotations as Rotations\nimport ece163.Modeling.VehicleGeometry as VG\nimport ece163.Containers.Controls as Controls\nimport ece163.Modeling.VehicleAerodynamicsModel as VAM\nfrom ece163.Modeling import WindModel\nfrom ece163.Constants import VehiclePhysicalConstants as VPC\nfrom ece163.Containers import States\nfrom ece163.Containers import Inputs\nfrom ece163.Controls import VehicleTrim\nfrom ece163.Controls import VehiclePerturbationModels as Perturb\nfrom ece163.Controls import VehicleClosedLoopControl as VCLC\nfrom ece163.Controls import PayloadDeliveryControl as PDC\nfrom ece163.Sensors import SensorsModel\nfrom ece163.Containers import Target\nfrom ece163.Containers import Inputs\nfrom ece163 . Utilities import MatrixMath as mm\nfrom matplotlib import pyplot as plt\nimport math\nimport numpy as np\n\ntargetVa = 25\ntargetAlt = 100\n\nt_steps = np.arange(0,100, VPC.dT)\n\nn_steps = len(t_steps)\n\nflag = 0\n\nwhile flag != 1:\n northPosition = input(\"Enter a target Pn value: \")\n eastPosition = input(\"Enter a target Pe value: \")\n targetVa = int(input(\"Enter a target airspeed: \"))\n targetAlt = int(input(\"Enter a target altitude: \"))\n \n targetPn = int(northPosition)\n targetPe = int(eastPosition)\n\n if (abs(targetPe) < 100) or (abs(targetPn) < 100):\n print(\"Invalid target! Target is TOO CLOSE. \\nPlease input a Pn that is > 100 or < -100, and > 100 or < -100\")\n elif (targetPe > 5000) or (targetPn > 5000):\n print(\"Invalid target! Target is TOO FAR. \\nPlease input a Pn < 5,000 or > -5,000 and < 5,000 or > -5,000\")\n elif (targetVa > 35) or (targetVa < 25):\n print(\"Invalid target airspeed! \\nPlease input a Va > 25 and < 35\")\n elif (targetAlt > 200) or (targetAlt < 25):\n print(\"Invalid target altitude! \\nPlease input an altitude > 25 and < 200\") \n else:\n flag = 1\n\n\npdc = PDC.PayloadDeliveryControl(States.vehicleState(pn=targetPn,pe=targetPe))\n\n\nconGains = Controls.controlGains(kp_roll =3.0,\n kd_roll = 0.04,\n ki_roll = 0.001,\n kp_sideslip = 2.0,\n ki_sideslip = 2.0,\n kp_course = 5.0,\n ki_course = 2.0,\n kp_pitch = -10.0,\n kd_pitch = -0.8,\n kp_altitude = 0.08,\n ki_altitude = 0.03,\n kp_SpeedfromThrottle = 2.0,\n ki_SpeedfromThrottle = 1.0,\n kp_SpeedfromElevator = -0.5,\n ki_SpeedfromElevator = -0.1)\n\n\ninitState = States.vehicleState(pn=0.0,\n pe=0.0,\n pd=-100.0,\n u=20.0,\n v=0.0,\n w=0.0,\n yaw=math.pi/4,\n pitch=0.0,\n roll=0.0,\n p=0.0,\n q=0.0,\n r=0.0)\n\npdc.VCLC.setVehicleState(initState)\n\npdc.VCLC.setControlGains(conGains)\nPayloadPnVec = np.zeros(t_steps.shape)\nPayloadPeVec = np.zeros(t_steps.shape)\nPayloadPdVec = np.zeros(t_steps.shape)\n\nUAVPnVec = np.zeros(t_steps.shape)\nUAVPeVec = np.zeros(t_steps.shape)\nUAVPdVec = np.zeros(t_steps.shape)\n\ncommandedChi = np.zeros(t_steps.shape)\ncommandedAltitude = np.zeros(t_steps.shape)\ncommandedVa = np.zeros(t_steps.shape)\n\nUAVChi = np.zeros(t_steps.shape)\nUAVAltitude = np.zeros(t_steps.shape)\nUAVVa = np.zeros(t_steps.shape)\n\npdc.target.setDeliveryAirSpeed(targetVa)\npdc.target.setDeliveryAltitude(targetAlt)\n\n\npdc.Update(1)\n\nfor i in range(n_steps):\n \n payloadState = pdc.Payload.getState()\n UAVState = pdc.VCLC.VAM.getVehicleState()\n\n PayloadPnVec[i] = payloadState.pn\n PayloadPeVec[i] = payloadState.pe\n PayloadPdVec[i] = payloadState.pd\n\n UAVPnVec[i] = UAVState.pn\n UAVPeVec[i] = UAVState.pe\n UAVPdVec[i] = UAVState.pd\n\n commandedChi[i] = pdc.chi\n commandedAltitude[i] = pdc.target.deliveryAltitude\n commandedVa[i] = pdc.target.deliveryAirSpeed\n\n UAVChi[i] = UAVState.chi\n UAVAltitude[i] = -UAVState.pd\n UAVVa[i] = UAVState.Va\n pdc.Update(0)\n\n\nactualDistanceToTarget = math.sqrt((pdc.target.pn-pdc.Payload.aeroModel.getVehicleState().pn)**2 + (pdc.target.pe-pdc.Payload.aeroModel.getVehicleState().pe)**2)\nprint(\"Distance from target: %.3f meters\"%(actualDistanceToTarget))\n\npnPlot = plt.figure()\n\nplt.plot(t_steps,PayloadPnVec)\nplt.plot(t_steps,UAVPnVec)\nplt.xlabel('Time is seconds')\nplt.ylabel('Pn in meters')\nplt.title('Pn value of the UAV and the Payload')\nplt.legend([\"Payload Pn\",\"UAV Pn\"])\n\n\npePlot = plt.figure()\n\nplt.plot(t_steps,PayloadPeVec)\nplt.plot(t_steps,UAVPeVec)\nplt.xlabel('Time is seconds')\nplt.ylabel('Pe in meters')\nplt.title('Pe value of the UAV and the Payload')\nplt.legend([\"Payload Pe\",\"UAV Pe\"])\n\n\nchiPlot = plt.figure()\n\nplt.plot(t_steps,commandedChi)\nplt.plot(t_steps,UAVChi)\nplt.xlabel('Time is seconds')\nplt.ylabel('Course angle in radians')\nplt.title('Course Angle of the UAV versus in realation to the commanded angle')\nplt.legend([\"Commanded Chi\",\"UAV Chi\"])\n\n\nplot3d = plt.figure()\n\nax = plt.axes(projection='3d')\n\n# Data for a three-dimensional line\n\nax.plot3D(PayloadPnVec, PayloadPeVec, -PayloadPdVec, 'blue')\nax.plot3D(UAVPnVec, UAVPeVec, -UAVPdVec, 'orange')\nax.legend([\"Payload Path\",\"UAV Path\",\"Target\"])\nax.scatter(targetPn,targetPe, 0, c=['red'])\nax.set_xlabel('N')\nax.set_ylabel('E')\nax.set_zlabel('Alt')\n\nplot3d.suptitle('Position of the UAV and Payload', fontsize=20)\n\nplt.show()\n\n\n\n\n","sub_path":"UAV_Payload_Delivery/CodeBase/FinalTestDemo.py","file_name":"FinalTestDemo.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"174212891","text":"import os\nimport sys\nimport numpy as np\nimport scipy.io as io\nimport theano\nimport theano.tensor as T\n\nsys.path.append('../nn')\nsys.path.append('../motion')\n\nfrom ActivationLayer import ActivationLayer\nfrom DropoutLayer import DropoutLayer\nfrom Pool1DLayer import Pool1DLayer\nfrom AdamTrainer import AdamTrainer\nfrom network import create_core, create_regressor, create_footstepper\n\nrng = np.random.RandomState(23455)\n\ndata = np.load('../data/processed/data_edin_locomotion.npz')['clips']\n\nI = np.arange(len(data))\nrng.shuffle(I)\n\ndata_train = data[I[:len(data)//2]]\ndata_valid = data[I[len(data)//2:]]\n\nX = data_train\nX = np.swapaxes(X, 1, 2).astype(theano.config.floatX)\n\npreprocess = np.load('preprocess_core.npz')\nX = (X - preprocess['Xmean']) / preprocess['Xstd']\n\nT = X[:,-7:-4]\nF = X[:,-4:]\n\nW = np.zeros((F.shape[0], 5, F.shape[2]))\n\nfor i in range(len(F)):\n \n w = np.zeros(F[i].shape)\n \n for j in range(F[i].shape[0]):\n last = -1\n for k in range(1, F[i].shape[1]):\n if last == -1 and F[i,j,k-1] < 0 and F[i,j,k-0] > 0: last = k; continue\n if last == -1 and F[i,j,k-1] > 0 and F[i,j,k-0] < 0: last = k; continue\n if F[i,j,k-1] > 0 and F[i,j,k-0] < 0:\n if k-last+1 > 10 and k-last+1 < 60:\n w[j,last:k+1] = np.pi/(k-last)\n else:\n w[j,last:k+1] = w[j,last-1]\n last = k\n continue\n if F[i,j,k-1] < 0 and F[i,j,k-0] > 0:\n if k-last+1 > 10 and k-last+1 < 60:\n w[j,last:k+1] = np.pi/(k-last)\n else:\n w[j,last:k+1] = w[j,last-1]\n last = k\n continue\n \n c = np.zeros(F[i].shape)\n \n for k in range(0, F[i].shape[1]):\n window = slice(max(k-100,0),min(k+100,F[i].shape[1]))\n ratios = (\n np.mean((F[i,:,window]>0).astype(np.float), axis=1) / \n np.mean((F[i,:,window]<0).astype(np.float), axis=1))\n ratios[ratios==np.inf] = 100\n c[:,k] = ((np.pi*ratios) / (1+ratios))\n \n w[w==0.0] = np.nan_to_num(w[w!=0.0].mean())\n \n W[i,0:1] = w.mean(axis=0)\n W[i,1:5] = c\n \n # import matplotlib.pyplot as plt\n # plt.plot(F[i,0])\n # plt.plot(np.sin(np.cumsum(W[i,0:1])))\n # plt.ylim([-1.1, 1.1])\n # plt.show()\n \nprint(T.shape, W.shape)\n\nWmean = W.mean(axis=2).mean(axis=0)[np.newaxis,:,np.newaxis]\nWstd = W.std(axis=2).mean(axis=0)[np.newaxis,:,np.newaxis]\nW = (W - Wmean) / Wstd\n\nnp.savez_compressed('preprocess_footstepper.npz', Wmean=Wmean, Wstd=Wstd)\n\nI = np.arange(len(T))\nrng.shuffle(I)\nT, F, W = T[I], F[I], W[I]\n\nbatchsize = 1\n\nT, W = theano.shared(T), theano.shared(W)\n\nnetwork = create_footstepper(batchsize=batchsize, window=X.shape[2], dropout=0.1)\ntrainer = AdamTrainer(rng=rng, batchsize=batchsize, epochs=100, alpha=0.0001)\ntrainer.train(network, T, W, filename='network_footstepper.npz')\n\n","sub_path":"synth/train_footstepper.py","file_name":"train_footstepper.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"223882208","text":"# from azure.servicebus import ServiceBusService, Message, Queue\n# import time\n# bus_service = ServiceBusService(\n# service_namespace='tripQueue',\n# shared_access_key_name='RootManageSharedAccessKey',\n# shared_access_key_value='AeyEpMBtdtQLdH8SxoWdzMfKx3CYrjlRGW4WEziB1bM=')\n#\n# #msg = Message(b'without CorelationID2')\n# #msg.custom_properties = {'CorelationID' : 'sdsdsdsdsa3333'}\n#\n# #bus_service.send_queue_message('presentation-request', msg)\n#\n#\n# while True:\n# msg = bus_service.receive_queue_message('presentation-request', peek_lock=True)\n# time.sleep(1)\n# print(str(msg.body))\n# #print(str(msg.custom_properties))\n# #print(dir(msg))\n# if 'corelationid' in msg.custom_properties.keys():\n# if msg.custom_properties['corelationid'] == 'sdsdsdsdsa3333':\n# print('found corelation')\n#\n# # msg = bus_service.receive_queue_message('presentation-request', peek_lock=False)\n# # print(str(msg.body))\n\nimport time\nfrom azure.storage.queue import QueueService\nimport json\nqueue_service = QueueService(account_name='tripappdisks435', account_key='goOmWqmWbUi6OvMMRuOBKeaGjYuBRI4J0UZGj7LUn4VmgiCGvdOuwvKTuJLdXJpAAm3u7SejQTeiaHUnx5ltHg==')\n\n#queue_service.create_queue('presentationgen')\nexample_response = {\n 'corelationId':'526f9aa4-8465-45b2-a5d2-70f091b15b32',\n 'status':'completed',\n 'progress':'100',\n 'content':'http://tripappdisks435.blob.core.windows.net/trip-media/Caption-This_10.jpg'\n}\n#queue_service.put_message('presentationresp', json.dumps(example_response))\n\n\nmessages = queue_service.get_messages('presentationgen',visibilitytimeout=1,numofmessages=16)\nprint('#####################################')\nprint('presentationgen:')\nfor message in messages:\n print(message.message_text)\n\nmessages = queue_service.get_messages('presentationresp',visibilitytimeout=1,numofmessages=16)\nprint('#####################################')\nprint('presentationresp:')\nfor message in messages:\n print(message.message_text)\n\nmessages = queue_service.get_messages('thumbnailgen',visibilitytimeout=1,numofmessages=16)\nprint('#####################################')\nprint('thumbnailgen:')\nfor message in messages:\n print(message.message_text)\n\nmessages = queue_service.get_messages('thumbnailresp',visibilitytimeout=1,numofmessages=16)\nprint('#####################################')\nprint('thumbnailresp:')\nfor message in messages:\n print(message.message_text)\n","sub_path":"code/TripEngine/trips/azuremsg.py","file_name":"azuremsg.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"248918675","text":"# Copyright (C) 2017 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\"\"\"Common aliases for applications.\"\"\"\n\n# boolean aliases.\nTRUE_VAL = \"True\"\nFALSE_VAL = \"False\"\nYES_VAL = \"Yes\"\nNO_VAL = \"No\"\n\n# aliases for operator\nAND_OP = \"AND\"\nOR_OP = \"OR\"\nEQUAL_OP = \"=\"\nCONTAINS_OP = \"~\"\n\n# aliases for size attrs\nWIDTH = \"width\"\nHEIGHT = \"height\"\nSCROLL_HEIGHT = \"scrollHeight\"\n\nDEFAULT = \"default\"\nDASHBOARD = \"Dashboard\"\n","sub_path":"test/selenium/src/lib/constants/value_aliases.py","file_name":"value_aliases.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351035918","text":"import itertools\n\ndef validate_prefix(s, prefix):\n n = len(s)\n source = 0\n for i in range(prefix, n):\n if s[i] > s[source]:\n return prefix\n elif s[i] == s[source]:\n source += 1\n if source == prefix:\n source = 0\n continue\n assert s[i] < s[source]\n prefix = i + 1\n source = 0\n return prefix\n\n\ndef determine_initial_prefix(s):\n prefix = 1\n while True:\n new_prefix = validate_prefix(s, prefix)\n if new_prefix == prefix:\n break\n prefix = new_prefix\n return prefix\n\n\ndef solve():\n n, k = [int(x) for x in input().split()]\n s = list(input())\n pref = determine_initial_prefix(s)\n return ''.join(itertools.islice(itertools.cycle(s[:pref]), k))\n\n\nif __name__=='__main__':\n print(solve())","sub_path":"1537E1.py","file_name":"1537E1.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"606496362","text":"#%%\n\"\"\" Calculates the propability of a electron ionizing from a atomic core in a external electric field.\"\"\"\n\"\"\" The constant names for Landau are choosen to be the same as in Babuskhin. \"\"\"\n\"\"\" Landau want as arguments a array with the E-field in plasma units at a specific t and the laser \"\"\"\n\"\"\" frequency for the simulation. \"\"\"\n\nimport numpy as np\nimport scipy.constants as const\nimport Plasmaunit\n\nm_e = const.value(\"electron mass\")\ne_e = const.value(\"elementary charge\")\nepsilon = const.value(\"electric constant\")\nhbar = const.value(\"Planck constant over 2 pi\")\nJ_to_eV = const.value(\"joule-electron volt relationship\")\n\nU_Ar = np.zeros(19)\nU_Ar[0] = 0\nU_Ar[1] = 15.7596117 # Taken from NIST\nU_Ar[2] = 27.62967\nU_Ar[3] = 40.735\nU_Ar[4] = 59.58\nU_Ar[5] = 74.84\nU_Ar[6] = 91.290\nU_Ar[7] = 124.41\nU_Ar[8] = 143.4567\nU_Ar[9] = 422.60\nU_Ar[10] = 479.76\nU_Ar[11] = 540.4\nU_Ar[12] = 619.0\nU_Ar[13] = 685.5\nU_Ar[14] = 755.13\nU_Ar[15] = 855.5\nU_Ar[16] = 918.375\nU_Ar[17] = 4120.6656\nU_Ar[18] = 4426.2228\nU_H = 13.59843449\n\nfyra_epsilon_pi = 4*np.pi*epsilon\nOMEGA_A = (m_e*e_e**4)/(fyra_epsilon_pi**2*hbar**3)\nE_a = (m_e**2*e_e**5)/(fyra_epsilon_pi**3*hbar**4)\nr_H = U_Ar/U_H\n\ndef Landau_element(E,OMEGA_0,Z,dt):\n if np.abs(E) == 0:\n W = 0\n else:\n Ereal = Plasmaunit.Ereal(E,OMEGA_0)\n W = (4*OMEGA_A*r_H[Z]**(5/2)*(E_a/np.abs(Ereal))*np.exp(-2*r_H[Z]**(3/2)*(E_a/(3*np.abs(Ereal)))))/OMEGA_0\n if dt*W > 1:\n W = 1\n return W\n \ndef Landau_array(E,W,OMEGA_0,Z,dt):\n for z in range(len(E)):\n if np.abs(E[z]) == 0:\n W[z] = 0\n else:\n Ereal = Plasmaunit.Ereal(E[z],OMEGA_0)\n W[z] = (4*OMEGA_A*r_H[Z]**(5/2)*(E_a/np.abs(Ereal))*np.exp(-2*r_H[Z]**(3/2)*(E_a/(3*np.abs(Ereal)))))/OMEGA_0\n if dt*W[z] > 1:\n W[z] = 1\n","sub_path":"Energy_conservation4.0/Ionization.py","file_name":"Ionization.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420290215","text":"from __future__ import annotations\nimport random\nfrom datetime import timedelta\nfrom typing import List\nfrom unittest import mock\nfrom unittest.mock import Mock\n\nfrom django.utils import timezone\nfrom rest_framework import status\nfrom rest_framework.test import force_authenticate\n\nfrom accesses.models import Access, Profile, Role\nfrom accesses.views import ProfileViewSet\nfrom admin_cohort.models import User\nfrom admin_cohort.settings import MANUAL_SOURCE\nfrom admin_cohort.tests_tools import random_str, \\\n new_user_and_profile, CaseRetrieveFilter, ViewSetTestsWithBasicPerims, \\\n ListCase, CreateCase, DeleteCase, PatchCase, RequestCase\nfrom admin_cohort.tools import prettify_json, prettify_dict\nfrom admin_cohort.types import IdResp\n\nPROFILES_URL = \"/profiles\"\n\n\nclass ObjectView(object):\n def __init__(self, d):\n self.__dict__ = d\n\n\nclass CheckedProfile:\n def __init__(self, o: dict):\n errs = {}\n for f in ['firstname', 'lastname', 'user_id', 'email']:\n if f not in o or not isinstance(o.get(f), str):\n errs.setdefault(f, f'Missing or wrong type : {o.get(f, \"\")}')\n if len(errs) > 0:\n raise Exception(prettify_dict(errs))\n\n self.user: dict = o.get('user', None)\n self.manual_profile: dict = o.get('manual_profile', None)\n\n def assert_match_id_resp(self, other: CheckedProfile):\n errs: dict = {}\n for s in ['firstname', 'lastname', 'user_id', 'email']:\n if getattr(self, s, \"\") != getattr(other, s, \"\"):\n errs.setdefault(\n s, f\"Different: expected is {getattr(self, s)}, \"\n f\"got is {getattr(other, s)}\")\n\n if self.user is not None and \\\n self.user.get('provider_username') \\\n != other.user.get('provider_username'):\n errs['user'] = f\"Different: \" \\\n f\"expected {self.user.get('displayed_name')}, \" \\\n f\"got {other.user.get('displayed_name')}\"\n\n if self.manual_profile is not None \\\n and self.manual_profile.get('id') \\\n != other.manual_profile.get('id'):\n errs['manual_profile'] = f\"Different: \" \\\n f\"expected {str(self.manual_profile)}, \" \\\n f\"got {str(other.manual_profile)}\"\n if len(errs):\n raise Exception(prettify_dict(errs))\n\n def __str__(self):\n return prettify_dict({\n **self.__dict__,\n 'user': str(self.user), 'manual_profile': str(self.manual_profile)})\n\n\nclass CheckCase(RequestCase):\n def __init__(self, mocked_value: IdResp, mock_called: bool = False,\n to_find: CheckedProfile = None,\n checked_id: str = '', **kwargs):\n super(CheckCase, self).__init__(**kwargs)\n self.mocked_value = mocked_value\n self.to_find = to_find\n self.checked_id = checked_id\n self.mock_called = mock_called\n\n\nclass ProfileTests(ViewSetTestsWithBasicPerims):\n unupdatable_fields = [\"is_active\", \"valid_start_datetime\",\n \"valid_end_datetime\", \"id\"]\n unsettable_default_fields = dict(source=MANUAL_SOURCE,)\n unsettable_fields = [\"id\"]\n manual_dupplicated_fields = ['valid_start_datetime', 'valid_end_datetime',\n 'is_active']\n\n objects_url = \"/profiles/\"\n retrieve_view = ProfileViewSet.as_view({'get': 'retrieve'})\n list_view = ProfileViewSet.as_view({'get': 'list'})\n create_view = ProfileViewSet.as_view({'post': 'create'})\n delete_view = ProfileViewSet.as_view({'delete': 'destroy'})\n update_view = ProfileViewSet.as_view({'patch': 'partial_update'})\n model = Profile\n model_objects = Profile.objects\n model_fields = Profile._meta.fields\n\n def setUp(self):\n super(ProfileTests, self).setUp()\n\n # ROLES\n self.role_full: Role = Role.objects.create(**dict([\n (f, True) for f in self.all_rights\n ]), name='FULL')\n\n # user with all the rights\n self.user_full_admin, self.prof_full_admin = new_user_and_profile(\n email='full@admin.us')\n Access.objects.create(role=self.role_full, profile=self.prof_full_admin,\n perimeter_id=self.aphp.id)\n\n\nclass ProfileCaseRetrieveFilter(CaseRetrieveFilter):\n def __init__(self, user_id: str, source: str, exclude: dict = None):\n self.user_id = user_id\n self.source = source\n\n super(ProfileCaseRetrieveFilter, self).__init__(exclude=exclude)\n\n\nclass ProfileGetListTests(ProfileTests):\n def setUp(self):\n super(ProfileGetListTests, self).setUp()\n # can_read_users\n self.user_that_can_read_users, self.prof_that_can_read_users = \\\n new_user_and_profile(email=\"can@read.users\")\n role_read_users = Role.objects.create(right_read_users=True)\n Access.objects.create(\n perimeter_id=self.hospital3.id,\n profile=self.prof_that_can_read_users,\n role=role_read_users\n )\n\n # cannot_read_users\n self.user_that_cannot_read_users, self.prof_that_cannot_read_users = \\\n new_user_and_profile(email=\"cannot@read.users\")\n role_all_but_read_users = Role.objects.create(\n **dict([(r, True) for r in self.all_rights\n if r != 'right_read_users'])\n )\n Access.objects.create(\n perimeter_id=self.aphp.id,\n profile=self.prof_that_cannot_read_users,\n role=role_all_but_read_users\n )\n\n self.name_pattern = \"pat\"\n self.id_pattern = 34\n\n nb_providers = 500\n\n self.provider_firstnames = [\n random_str(random.randint(4, 8)) for _ in\n range(nb_providers - 110)\n ] + [\n random_str(random.randint(1, 3))\n + self.name_pattern\n + random_str(random.randint(0, 3)) for _\n in range(110)\n ]\n\n self.provider_lastnames = [\n random_str(random.randint(4, 8)) for _ in\n range(nb_providers - 110)\n ] + [\n random_str(random.randint(1, 3))\n + self.name_pattern\n + random_str(random.randint(0, 3)) for _\n in range(110)\n ]\n\n self.users_provider_usernames = list(set(\n [str(random.randint(0, 10000000))\n for _ in range(nb_providers - 110)] + [\n f\"{random.randint(0, 100)}{self.id_pattern}\"\n f\"{random.randint(0, 1000)}\" for _ in range(110)\n ]))\n\n self.list_users: List[User] = User.objects.bulk_create([User(\n provider_username=sv,\n firstname=fn,\n lastname=ln,\n email=f\"{fn}.{ln}@aphp.fr\",\n provider_id=int(sv)\n ) for (sv, fn, ln) in zip(\n self.users_provider_usernames,\n self.provider_firstnames,\n self.provider_lastnames\n )])\n\n self.sources = [\n random_str(random.randint(0, 3))\n + self.name_pattern\n + random_str(random.randint(0, 3)) for _ in range(2)\n ]\n\n self.list_profs: List[Profile] = Profile.objects.bulk_create(sum([[\n Profile(\n provider_id=u.provider_id,\n provider_name=f\"{u.firstname} {u.lastname}\",\n firstname=u.firstname,\n lastname=u.lastname,\n email=u.email,\n source=s,\n user=u,\n is_active=random.random() < 0.8\n ) for s in random.sample(self.sources, 2)]\n for u in self.list_users], [])) + [\n self.prof_that_cannot_read_users,\n self.prof_that_can_read_users,\n self.prof_full_admin]\n\n def test_admin_get_all_ph(self):\n # As a user with read_users right, I can get all profiles\n case = ListCase(\n to_find=[*self.list_profs],\n success=True,\n status=status.HTTP_200_OK,\n user=self.user_that_can_read_users\n )\n self.check_get_paged_list_case(case)\n\n def test_err_admin_get_all_ph(self):\n # As a user with all the rights but not read_users one,\n # I cannot see get profile\n case = ListCase(\n to_find=[],\n success=False,\n status=status.HTTP_403_FORBIDDEN,\n user=self.user_that_cannot_read_users\n )\n self.check_get_paged_list_case(case)\n\n def test_get_list_with_params(self):\n # As a user with read_users right,\n # I can get profiles given query parameters\n basic_case_dict = dict(success=True, status=status.HTTP_200_OK,\n user=self.user_that_can_read_users)\n cases = [\n ListCase(\n **basic_case_dict,\n title=f\"provider_id={self.id_pattern}\",\n to_find=[\n prof for prof in self.list_profs\n if str(self.id_pattern) == str(prof.provider_id)],\n params=dict(provider_id=self.id_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"source={self.id_pattern}\",\n to_find=[\n prof for prof in self.list_profs\n if str(self.id_pattern) == prof.source],\n params=dict(source=self.id_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"cdm_source={self.id_pattern}\",\n to_find=[\n prof for prof in self.list_profs\n if str(self.id_pattern) == prof.source],\n params=dict(cdm_source=self.id_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"user={self.list_users[0].pk}\",\n to_find=list(self.list_users[0].profiles.all()),\n params=dict(user=self.list_users[0].pk)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"provider_source_value={self.list_users[0].pk}\",\n to_find=list(self.list_users[0].profiles.all()),\n params=dict(provider_source_value=self.list_users[0].pk)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"provider_name={self.name_pattern}\",\n to_find=[prof for prof in self.list_profs\n if str(self.name_pattern)\n in str(prof.provider_name)],\n params=dict(provider_name=self.name_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"lastname={self.name_pattern}\",\n to_find=[prof for prof in self.list_profs\n if str(self.name_pattern) in str(prof.lastname)],\n params=dict(lastname=self.name_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"firstname={self.name_pattern}\",\n to_find=[prof for prof in self.list_profs\n if str(self.name_pattern) in str(prof.firstname)],\n params=dict(firstname=self.name_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"email={self.name_pattern}\",\n to_find=[prof for prof in self.list_profs\n if str(self.name_pattern) in str(prof.email)],\n params=dict(email=self.name_pattern)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"provider_history_id={self.list_profs[0].id}\",\n to_find=[self.list_profs[0]],\n params=dict(provider_history_id=self.list_profs[0].id)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"id={self.list_profs[0].id}\",\n to_find=[self.list_profs[0]],\n params=dict(id=self.list_profs[0].id)\n ),\n ListCase(\n **basic_case_dict,\n title=f\"is_active={False}\",\n to_find=list(filter(lambda p: not p.is_active,\n self.list_profs)),\n params=dict(is_active=False)\n ),\n ]\n [self.check_get_paged_list_case(case) for case in cases]\n\n\nclass ProfileCreateTests(ProfileTests):\n def setUp(self):\n super(ProfileCreateTests, self).setUp()\n # USERS\n # empty_user\n self.user_empty: User = User.objects.create(\n provider_username=str(random.randint(0, 10000000)),\n lastname=\"empty-last\",\n firstname=\"empty-last\",\n email=\"em@pty.user\",\n )\n\n # can_add_users\n self.user_that_can_add_users, self.prof_that_can_add_users = \\\n new_user_and_profile(email=\"can@mng.users\")\n role_add_users = Role.objects.create(right_add_users=True)\n Access.objects.create(\n perimeter_id=self.hospital3.id,\n profile=self.prof_that_can_add_users,\n role=role_add_users\n )\n\n self.creation_data = dict(\n provider_id=self.user_empty.provider_id,\n user=self.user_empty.pk,\n firstname=self.user_empty.firstname,\n lastname=self.user_empty.lastname,\n email=self.user_empty.email\n )\n self.basic_create_case = CreateCase(\n data=self.creation_data,\n retrieve_filter=ProfileCaseRetrieveFilter(\n user_id=self.user_empty.pk, source=MANUAL_SOURCE),\n user=None, status=None, success=None,\n )\n\n\nclass ProfileCheckTests(ProfileCreateTests):\n objects_url = \"/profiles/check\"\n check_view = ProfileViewSet.as_view({'post': 'check_existing_user'})\n\n @mock.patch('admin_cohort.conf_auth.check_id_aph')\n def check_check_case(self, case: CheckCase, mock_check: Mock):\n mock_check.return_value = case.mocked_value\n\n request = self.factory.post(\n self.objects_url, data=dict(user_id=case.checked_id), format='json')\n force_authenticate(request, case.user)\n\n response = self.__class__.check_view(request)\n response.render()\n\n self.assertEqual(\n response.status_code, case.status,\n msg=(f\"{case.description}\"\n + (f\" -> {prettify_json(response.content)}\"\n if response.content else \"\")),\n )\n if case.success:\n if case.to_find is None:\n self.assertIsNone(response.data)\n else:\n res = CheckedProfile(response.data)\n try:\n res.assert_match_id_resp(case.to_find)\n except Exception as e:\n self.fail(f\"{case.description} - {e}\")\n mock_check.assert_called() if case.mock_called \\\n else mock_check.assert_not_called()\n\n def setUp(self):\n super(ProfileCheckTests, self).setUp()\n\n # cannot_add_users\n self.user_that_cannot_add_users, self.prof_that_cannot_add_users = \\\n new_user_and_profile(email=\"cannot@mng.users\")\n role_all_but_add_users = Role.objects.create(\n **dict([(r, True) for r in self.all_rights\n if r != 'right_add_users']))\n Access.objects.create(\n perimeter_id=self.aphp.id,\n profile=self.prof_that_cannot_add_users,\n role=role_all_but_add_users\n )\n self.user_random, self.prof_random = \\\n new_user_and_profile(email=\"ran@do.m\")\n\n self.unexisting_user_id = str(random.randint(0, 10000000))\n\n while self.unexisting_user_id in [\n u.provider_username for u in User.objects.all()\n ]:\n self.unexisting_user_id = str(random.randint(0, 10000000))\n\n self.base_id_resp: IdResp = IdResp(\n firstname='testFn',\n lastname='testLn',\n user_id=self.user_random.provider_username,\n email='em@ai.l',\n )\n self.base_case = CheckCase(\n success=True,\n mock_called=True,\n mocked_value=self.base_id_resp,\n status=status.HTTP_200_OK,\n user=self.user_that_can_add_users,\n )\n\n def test_check_profile(self):\n # As a user with right_add_users,\n # I can check the existence of a user on the control API,\n # and it returns User and Manual profile if it exists\n self.check_check_case(self.base_case.clone(\n mocked_value=self.base_id_resp,\n to_find=CheckedProfile(dict(\n firstname=self.base_id_resp.firstname,\n lastname=self.base_id_resp.lastname,\n email=self.base_id_resp.email,\n user_id=self.base_id_resp.user_id,\n user=self.user_random.__dict__,\n manual_profile=self.prof_random.__dict__,\n )),\n checked_id=random_str(1),\n ))\n\n def test_check_profile_not_existing(self):\n # As a user with right_add_users,\n # I can check the existence of a user on the control API,\n # and it returns None if the API's response is empty\n self.check_check_case(self.base_case.clone(\n mocked_value=None,\n to_find=None,\n status=status.HTTP_204_NO_CONTENT,\n checked_id=random_str(1),\n ))\n\n def test_check_profile_not_existing_user(self):\n # As a user with right_add_users,\n # I can check the existence of a user on the control API,\n # and it returns with empty user and profile if user is not in database\n self.check_check_case(self.base_case.clone(\n mocked_value=IdResp(**{**self.base_id_resp.__dict__,\n 'user_id': self.unexisting_user_id}),\n to_find=CheckedProfile(dict(\n firstname=self.base_id_resp.firstname,\n lastname=self.base_id_resp.lastname,\n email=self.base_id_resp.email,\n user_id=self.base_id_resp.user_id,\n )),\n checked_id=random_str(1),\n ))\n\n def test_check_profile_not_existing_profile(self):\n # As a user with right_add_users,\n # I can check the existence of a user on the control API,\n # and it returns with empty profile if user has no manual profile\n user_random_no_profile: User = User.objects.create(\n provider_username=self.unexisting_user_id,\n email=''\n )\n self.check_check_case(self.base_case.clone(\n mocked_value=IdResp(**{**self.base_id_resp.__dict__,\n 'user_id': self.unexisting_user_id}),\n to_find=CheckedProfile(dict(\n firstname=self.base_id_resp.firstname,\n lastname=self.base_id_resp.lastname,\n email=self.base_id_resp.email,\n user_id=self.base_id_resp.user_id,\n user=user_random_no_profile.__dict__,\n )),\n checked_id=random_str(1),\n ))\n\n def test_err_check_profile_missing_param(self):\n # As a user with all the rights, I cannot call it\n # without providing 'user_id' parameter\n self.check_check_case(self.base_case.clone(\n mocked_value=self.base_id_resp,\n mock_called=False,\n checked_id=None,\n status=status.HTTP_400_BAD_REQUEST,\n success=False,\n to_find=None,\n ))\n\n def test_err_check_profile_unauthorized(self):\n # As a user with everything but right_add_users,\n # I cannot check the existence of a user on the control API\n self.check_check_case(self.base_case.clone(\n mocked_value=self.base_id_resp,\n mock_called=False,\n checked_id=None,\n status=status.HTTP_403_FORBIDDEN,\n user=self.user_that_cannot_add_users,\n success=False,\n to_find=None,\n ))\n\n\nclass ProfileCreateWithUserTests(ProfileCreateTests):\n def setUp(self):\n super(ProfileCreateWithUserTests, self).setUp()\n\n # cannot_add_users\n self.user_that_cannot_add_users, self.prof_that_cannot_add_users = \\\n new_user_and_profile(email=\"cannot@mng.users\")\n role_all_but_add_users = Role.objects.create(\n **dict([(r, True) for r in self.all_rights\n if r != 'right_add_users']))\n Access.objects.create(\n perimeter_id=self.aphp.id,\n profile=self.prof_that_cannot_add_users,\n role=role_all_but_add_users\n )\n\n def test_create_as_user_admin(self):\n # As a user with right_add_users, I can create a new profile for a user\n # that has no manual profile yet\n case = self.basic_create_case.clone(\n user=self.user_that_can_add_users,\n success=True,\n status=status.HTTP_201_CREATED,\n )\n self.check_create_case(case)\n\n def test_error_create_as_simple_user(self):\n # As a user with everything but right_add_users,\n # I cannot create a new profile\n case = self.basic_create_case.clone(\n user=self.user_that_cannot_add_users,\n success=False,\n status=status.HTTP_403_FORBIDDEN,\n )\n self.check_create_case(case)\n\n def test_error_create_when_existing_profile(self):\n # As a user with right_add_users, I cannot create a new profile to a\n # user that already has a manual profile\n existing_profile: Profile = Profile.objects.create(\n source=MANUAL_SOURCE, user=self.user_empty, manual_is_active=True,\n )\n\n case = self.basic_create_case.clone(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_400_BAD_REQUEST,\n retrieve_filter=ProfileCaseRetrieveFilter(\n user_id=self.user_empty.pk, source=MANUAL_SOURCE,\n exclude=dict(id=existing_profile.id)\n ),\n )\n self.check_create_case(case)\n\n def test_error_create_with_forbidden_fields(self):\n # As a user with right_add_users, when creating a new manual profile\n # specifying a source will return 400.\n cases = [self.basic_create_case.clone(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_400_BAD_REQUEST,\n data={\n **self.creation_data,\n k: v,\n }\n ) for (k, v) in dict(source=\"not_manual\").items()]\n [self.check_create_case(case) for case in cases]\n\n def test_create_manual_fields_replacing_fields(self):\n # As a user with right_add_users, when creating a new manual profile\n # the fields valid_start_datetime, valid_end_datetime and is_active will\n # actually fill manual_valid_start_datetime, etc.\n case = self.basic_create_case.clone(\n user=self.user_that_can_add_users,\n success=True,\n status=status.HTTP_201_CREATED,\n data={\n **self.creation_data,\n 'valid_start_datetime': timezone.now() - timedelta(days=10),\n 'valid_end_datetime': timezone.now() + timedelta(days=10),\n 'is_active': False,\n })\n self.check_create_case(case)\n\n def test_error_create_with_both_field_and_manual_version(self):\n # As a user with right_add_users, when creating a new manual profile\n # specifying a value to one of the previous fields AND to its manual_\n # version will return 400.\n\n cases = [self.basic_create_case.clone(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_400_BAD_REQUEST,\n data={\n **self.creation_data,\n k: v,\n f\"manual_{k}\": v,\n }\n ) for (k, v) in dict(\n valid_start_datetime=(timezone.now() - timedelta(days=10),\n timezone.now() - timedelta(days=10)),\n valid_end_datetime=(timezone.now() + timedelta(days=10),\n timezone.now() + timedelta(days=10)),\n is_active=(False, True),\n ).items()]\n [self.check_create_case(case) for case in cases]\n\n\nclass ProfileCreateWithoutUserTests(ProfileCreateTests):\n def setUp(self):\n super(ProfileCreateWithoutUserTests, self).setUp()\n\n self.test_prov_id = random.randint(0, 100000)\n self.test_username = str(random.randint(0, 10000000))\n self.test_email = \"new@empty.user\"\n\n def check_create_case_without_user(self, case: CreateCase):\n self.check_create_case(case.clone(\n retrieve_filter=ProfileCaseRetrieveFilter(\n user_id=self.test_username, source=MANUAL_SOURCE),\n data=dict(\n user_id=self.test_username,\n firstname=self.user_empty.firstname,\n lastname=self.user_empty.lastname,\n email=self.test_email\n ),\n ))\n\n inst = User.objects.filter(\n firstname=self.user_empty.firstname,\n lastname=self.user_empty.lastname,\n email=self.test_email,\n provider_username=self.test_username,\n provider_id=self.test_prov_id,\n ).first()\n\n if case.success:\n self.assertIsNotNone(inst)\n else:\n self.assertIsNone(inst)\n\n @mock.patch('accesses.serializers.check_id_aph')\n @mock.patch('accesses.serializers.get_provider_id')\n def test_create_as_user_admin_without_user(\n self, mock_get_prov: Mock, mock_check_id_aph: Mock):\n # As a user with right_add_users, I can create a profile for a\n # non existing user, this will also create a User\n mock_check_id_aph.return_value = dict()\n mock_get_prov.return_value = self.test_prov_id\n\n case = self.basic_create_case.clone(\n user=self.user_that_can_add_users,\n success=True,\n status=status.HTTP_201_CREATED,\n )\n self.check_create_case_without_user(case)\n\n mock_check_id_aph.assert_called_once()\n mock_get_prov.assert_called_once()\n\n @mock.patch('accesses.serializers.check_id_aph')\n @mock.patch('accesses.serializers.get_provider_id')\n def test_err_create_with_forbidden_id(\n self, mock_get_prov: Mock, mock_check_id_aph: Mock):\n # As a user with right_add_users, I cannot create a profile for a\n # non existing user if id is not validated with check_id_aph\n mock_check_id_aph.side_effect = Exception()\n mock_get_prov.return_value = self.test_prov_id\n\n case = self.basic_create_case.clone(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_400_BAD_REQUEST,\n )\n self.check_create_case_without_user(case)\n mock_check_id_aph.assert_called_once()\n mock_get_prov.assert_not_called()\n\n @mock.patch('accesses.serializers.check_id_aph')\n @mock.patch('accesses.serializers.get_provider_id')\n def test_err_create_provider_id_not_found(\n self, mock_get_prov: Mock, mock_check_id_aph: Mock):\n # As a user with right_add_users, I cannot create a profile for a\n # non existing user if provider_id is not found by get_provider_id\n mock_check_id_aph.return_value = dict()\n mock_get_prov.side_effect = Exception()\n\n case = self.basic_create_case.clone(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_400_BAD_REQUEST,\n )\n self.check_create_case_without_user(case)\n mock_check_id_aph.assert_called_once()\n mock_get_prov.assert_called_once()\n\n\nclass ProfilePatchTests(ProfileTests):\n def setUp(self):\n super(ProfilePatchTests, self).setUp()\n # USERS\n # empty_user\n self.user_empty: User = User.objects.create(\n provider_username=str(random.randint(0, 10000000)),\n lastname=\"empty-last\",\n firstname=\"empty-last\",\n email=\"em@pty.user\",\n )\n\n # can_edit_users\n self.user_that_can_edit_users, self.prof_that_can_edit_users = \\\n new_user_and_profile(email=\"can@mng.users\")\n role_edit_users = Role.objects.create(right_edit_users=True)\n Access.objects.create(\n perimeter_id=self.hospital3.id,\n profile=self.prof_that_can_edit_users,\n role=role_edit_users\n )\n\n # cannot_edit_users\n self.user_that_cannot_edit_users, self.prof_that_cannot_edit_users = \\\n new_user_and_profile(email=\"cannot@mng.users\")\n role_all_but_edit_users = Role.objects.create(\n **dict([(r, True) for r in self.all_rights\n if r != 'right_edit_users']))\n Access.objects.create(\n perimeter_id=self.aphp.id,\n profile=self.prof_that_cannot_edit_users,\n role=role_all_but_edit_users\n )\n\n self.created_data = dict(\n provider_id=self.user_empty.provider_id,\n user=self.user_empty,\n firstname=self.user_empty.firstname,\n lastname=self.user_empty.lastname,\n email=self.user_empty.email,\n is_active=True,\n )\n self.base_data_to_update = dict(\n provider_name='new',\n firstname='new',\n lastname='new',\n email='new',\n provider_id=random.randint(0, 1000000),\n is_active=False,\n valid_start_datetime=timezone.now() - timedelta(days=2),\n valid_end_datetime=timezone.now() + timedelta(days=2),\n )\n self.basic_patch_case = PatchCase(\n initial_data=self.created_data,\n data_to_update=self.base_data_to_update,\n user=None, status=None, success=None,\n )\n\n def test_patch_as_user_admin(self):\n # As a user with right_edit_users, I can edit a profile\n case = self.basic_patch_case.clone(\n user=self.user_that_can_edit_users,\n success=True,\n status=status.HTTP_200_OK,\n )\n self.check_patch_case(case)\n\n def test_error_patch_as_simple_user(self):\n # As a user with everything but right_edit_users,\n # I cannot edit a profile\n case = self.basic_patch_case.clone(\n user=self.user_that_cannot_edit_users,\n success=False,\n status=status.HTTP_403_FORBIDDEN,\n )\n self.check_patch_case(case)\n\n def test_error_patch_with_forbidden_fields(self):\n # As a user with all the rights,\n # I cannot edit a profile with certain fields\n other_user_empty = User.objects.create(email=\"other_em@pty.user\")\n cases = [self.basic_patch_case.clone(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_400_BAD_REQUEST,\n data_to_update={k: v}\n ) for (k, v) in dict(user=other_user_empty.pk, source='other').items()]\n [self.check_patch_case(case) for case in cases]\n\n\nclass ProfileDeleteTests(ProfileTests):\n def setUp(self):\n super(ProfileDeleteTests, self).setUp()\n # empty_user\n self.user_empty: User = User.objects.create(\n provider_username=str(random.randint(0, 10000000)),\n lastname=\"empty-last\",\n firstname=\"empty-last\",\n email=\"em@pty.user\",\n )\n\n def test_error_delete_user_as_main_admin(self):\n # As a user will all the rights,\n # I cannot delete a profile\n cases = [DeleteCase(\n user=self.user_full_admin,\n success=False,\n status=status.HTTP_403_FORBIDDEN,\n data_to_delete=dict(user=self.user_empty, source=s)\n ) for s in [MANUAL_SOURCE, 'RandomSource']]\n [self.check_delete_case(case) for case in cases]\n","sub_path":"accesses/tests/tests_view_profiles.py","file_name":"tests_view_profiles.py","file_ext":"py","file_size_in_byte":32662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"267590612","text":"##\n## Por cada clave de la columna 5 (cadena de tres letras), obtenga\n## el valor mas pequeño y el valor mas grande asociado a esa clave.\n##\n## aaa,0,6\n## bbb,4,7\n## ccc,0,1\n## ddd,5,5\n## eee,0,0\n## fff,4,9\n## ggg,3,3\n## hhh,6,8\n## iii,2,7\n## jjj,2,5\n##\n\nimport pandas as pd\nfrom itertools import chain\n# read file\nfile = pd.read_csv('data.csv',sep='\\t',header=None)\n\n# create needed data\nfile['data'] = file[4].str.split(',')\nprueba = [[b.split(',') for b in a] for a in list(file['data'])]\nprueba2 = list(chain(*list(chain(*prueba))))\n\n# creates dataframe with needed data\nprueba3 = pd.DataFrame([a.split(':') for a in prueba2], columns = ['dic','value'])\n\n# creates max and mins\n\nmaxim = prueba3.groupby('dic').max()['value']\nminim = prueba3.groupby('dic').min()['value']\n\nprueba4 = []\nfor a,x in enumerate(maxim):\n prueba4.append(list(maxim.index)[a] + \",\" \n + str(list(minim)[a]) + ',' \n + str(list(maxim)[a])\n )\n\nprint(\"\\n\".join(prueba4))","sub_path":"q06.py","file_name":"q06.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"51750607","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot\nfrom pymongo import MongoClient\n\nmongo_uri = \"mongodb://admin:admin@ds021182.mlab.com:21182/c4e\"\nclient = MongoClient (mongo_uri)\ndb = client.get_default_database()\ncustomers = db['customers']\n\nwom = customers.find({\"ref\":\"wom\"}).count()\nevents = customers.find({\"ref\":\"events\"}).count()\nads = customers.find({\"ref\":\"ads\"}).count() \n\nlabels = ['wom','events','ads']\nvalues = [wom,events,ads]\nexplode = [0.02,0.02,0.02]\ncolors = ['blue','red','yellow']\n\nprint ('Customers are acquired from wom:' ,wom,\n '\\nCustomers are acquired from events:' ,events,\n '\\nCustomers are acquired from ads:',ads)\n\npyplot.title('Customers group by references')\npyplot.pie(values,\n labels=labels,\n explode=explode,\n colors=colors,\n autopct='%1.1f%%',\n shadow=True)\npyplot.axis (\"equal\")\npyplot.show()","sub_path":"lab01/homework/chocopie.py","file_name":"chocopie.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"523557007","text":"class Solution:\n def trap(self, height) -> int:\n n = len(height)\n if n < 2:\n return 0\n result = 0\n stack = []\n for i in range(n):\n if not stack:\n stack.append(i)\n elif height[i] < height[stack[-1]]:\n stack.append(i)\n else:\n while stack and height[stack[-1]] <= height[i]:\n x = height[stack.pop()]\n if stack:\n result += (min(height[stack[-1]], height[i]) - x) * (i - stack[-1] - 1)\n stack.append(i)\n return result\n\n\ns = Solution()\nprint(s.trap([0, 1, 0, 3, 2, 1, 0, 1, 3, 2, 2, 2, 1]))\n# print(s.trap([0,1,0,2,1,0,1,3,2,1,2,1]))\n","sub_path":"leetcode/2020/trapping-rain-water.py","file_name":"trapping-rain-water.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"216767886","text":"#Fianl Yaer Project\n#Group No.: 09\n\n'''\n Solving Travelling Salesman Problem(TSP) using PSO(Particle Swarm Optimization)\n Discrete PSO for PSO\n\n References:\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.258.7026&rep=rep1&type=pdf\n\n'''\n\n\nimport pandas as pd\nimport numpy as np\n\ndef read_tsp(filename):\n\n '''\n Read a .tsp file into a pandas DataFrame\n\n The .tsp files can be found in the TSPLIB project. Currently, the library\n only considers the possibility of a 2D map.\n '''\n\n with open(filename) as f:\n\n dimension = None\n node_coord_start = None\n lines = f.readlines()\n\n i = 0\n while not dimension or not node_coord_start:\n line = lines[i]\n if line.startswith('DIMENSION :'):\n dimension = int(line.split()[-1])\n if line.startswith('NODE_COORD_SECTION'):\n node_coord_start = i\n i += 1\n\n f.seek(0)\n\n cities = pd.read_csv(f, skiprows = node_coord_start + 1,\n sep = ' ', names = ['city', 'x', 'y'],\n dtype = {'city': str ,'x': np.float64, 'y': np.float64},\n header = None, nrows = dimension)\n\n return cities\n","sub_path":"My_Project/inputoutput.py","file_name":"inputoutput.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196548443","text":"from unittest import TestCase\n\nfrom primers.offtargets import offtargets\n\n\nclass TestOfftargets(TestCase):\n \"\"\"Test offtarget detection.\"\"\"\n\n def test_offtargets(self):\n \"\"\"Find and cache offtarget binding sites.\"\"\"\n\n # GTGGCTAGCC is one by removed from GTGGCTAGGC in seq\n parent = \"CTGACTCTACTTGGAAATGTGGCTAGGCCTTTGCCCACGCACCTGATCGGTCCTGTGGCTAGCCTCGTTTGCTTTTTAGGACCGGATGAACTACAGAGCATTGCAAGAATC\"\n seq = \"CTGACTCTACTTGGAAATGTGGCTAGGCCTT\"\n\n ot = offtargets(seq, parent)\n\n self.assertEqual(0, ot[0])\n self.assertEqual(len(seq), len(ot))\n self.assertTrue(any(o for o in ot))\n","sub_path":"tests/offtargets_test.py","file_name":"offtargets_test.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214488663","text":"#!/share/pkg.7/python3/3.6.10/install/bin/python3\nimport sys\nimport os\n#import random\nimport sqlite3\nimport cgi\nimport cgitb\ncgitb.enable()\nform = cgi.FieldStorage()\n#form = \"y\"\nif form:\n # Connect to the database.\n connection = sqlite3.connect(\"/restricted/projectnb/casa/_jychung/_gene2protein/db_g2p/gene2protein_v1.db\")\n cursor = connection.cursor()\n submit = form.getvalue(\"submit\")\n #submit = \"y\"\n if submit:\n plot_file1 = form.getvalue(\"plot_file1\")\n if plot_file1:\n print('Content-type: text/html\\n')\n os.system(\"rm -f %s\" % plot_file1)\n #os.system(\"rm -f %s\" % plot_file2)\n\nelse:\n print('Content-type: text/html\\n')\n","sub_path":"_gene2protein/web/cgi-bin/Delete_IsoPlot.py","file_name":"Delete_IsoPlot.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"279991864","text":"'''\n This file contains the code for the bubble plot.\n'''\n\nimport plotly.express as px\nimport plotly.io as pio\nimport hover_template\n\n\ndef get_plot(my_df, gdp_range, co2_range):\n '''\n Generates the bubble plot.\n\n The x and y axes are log scaled, and there is\n an animation between the data for years 2000 and 2015.\n\n The markers' maximum size is 30 and their minimum\n size is 5.\n\n Args:\n my_df: The dataframe to display\n gdp_range: The range for the x axis\n co2_range: The range for the y axis\n Returns:\n The generated figure\n '''\n fig = px.scatter(my_df, x=\"GDP\", y=\"CO2\",\n size=\"Population\", color=\"Continent\",\n log_x=True, log_y=True,\n range_x=gdp_range, range_y=co2_range,\n animation_frame=\"Year\", animation_group=\"Country Name\",\n custom_data=['Country Name'],\n size_max=30)\n fig.update_traces(marker=dict(sizemin=5))\n return fig\n\n\ndef update_animation_hover_template(fig):\n '''\n Sets the hover template of the figure,\n as well as the hover template of each\n trace of each animation frame of the figure\n\n Args:\n fig: The figure to update\n Returns:\n The updated figure\n '''\n \n fig.update_traces(hovertemplate=hover_template.get_bubble_hover_template())\n for x, nframe in enumerate(fig.frames):\n for y, ndata in enumerate(nframe.data):\n fig.frames[x].data[y].hovertemplate = hover_template.get_bubble_hover_template() \n return fig\n\n\ndef update_animation_menu(fig):\n '''\n Updates the animation menu to show the current year, and to remove\n the unnecessary 'Stop' button.\n\n Args:\n fig: The figure containing the menu to update\n Returns\n The updated figure\n '''\n fig.update_layout(updatemenus=[dict(\n buttons=[\n {\n \"label\": \"Animate\",\n \"method\": \"animate\"\n },\n {\"visible\": False}\n ], )])\n\n fig.update_layout(sliders=[dict(currentvalue={\"prefix\": \"Date for year: \"}, )])\n return fig\n\n\ndef update_axes_labels(fig):\n '''\n Updates the axes labels with their corresponding titles.\n\n Args:\n fig: The figure to be updated\n Returns:\n The updated figure\n '''\n fig.update_yaxes(title=\"CO2 emissions per capita (metric tonnes)\")\n fig.update_xaxes(title=\"GDP per capita ($ USD)\")\n return fig\n\n\ndef update_template(fig):\n '''\n Updates the layout of the figure, setting\n its template to 'simple_white'\n\n Args:\n fig: The figure to update\n Returns\n The updated figure\n '''\n fig.update_layout(template=pio.templates['simple_white'])\n return fig\n\n\ndef update_legend(fig):\n '''\n Updated the legend title\n\n Args:\n fig: The figure to be updated\n Returns:\n The updated figure\n '''\n fig.update_layout(legend_title=\"Legend\")\n return fig\n","sub_path":"TP4/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"481023245","text":"import click\nimport string\n\nfrom hypothesis import given\nfrom hypothesis.strategies import text, lists, integers\nimport pytest\n\nfrom pierone.validators import validate_incident_id, validate_team\n\n@given(\n valid_incident_number=integers(0, 100000),\n invalid_incident_id=text(string.ascii_letters)\n)\ndef test_validate_incident_id(valid_incident_number, invalid_incident_id):\n valid_incident_id = \"INC-{}\".format(valid_incident_number)\n assert validate_incident_id(None, None, valid_incident_id) == valid_incident_id\n with pytest.raises(click.BadParameter):\n validate_incident_id(None, None, invalid_incident_id)\n\n@given(\n lower_case_letter=text(string.ascii_lowercase, min_size=1, max_size=1),\n upper_case_letters=text(string.ascii_uppercase, min_size=1),\n lower_case_letters=text(string.ascii_lowercase, min_size=10),\n digits=text(string.digits, min_size=10),\n)\ndef test_validate_team(lower_case_letter, upper_case_letters, lower_case_letters, digits):\n assert validate_team(None, None, lower_case_letters) == lower_case_letters\n assert validate_team(None, None, lower_case_letter + digits) == lower_case_letter + digits\n\n with pytest.raises(click.BadParameter):\n # Team names need at least two chars\n validate_team(None, None, lower_case_letter)\n\n with pytest.raises(click.BadParameter):\n # Team names cannot start with a digit\n validate_team(None, None, digits + lower_case_letter)\n\n with pytest.raises(click.BadParameter):\n # Team names cannot contain upper case chars\n validate_team(None, None, upper_case_letters)","sub_path":"tests/test_validators.py","file_name":"test_validators.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120152798","text":"def max_and_min_values(values):\n\tmax_min = [] #declared an empty list\n\tif values != []: #check if list in empty\n\t\tvalue_max = max(values) #declare a variable with max value in the list\n\t\tvalue_min = min(values)#declare a variable with min value in the list\n\n\t\tmax_min.append(value_max) #adding both the values in the list\n\t\tmax_min.append(value_min)\n\t\treturn max_min #return the new list with the values\n\telse:\n\t\treturn \"Empty list\" #raise error if the list is empty\n","sub_path":"max_and_min_value.py","file_name":"max_and_min_value.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"59638487","text":"import sys\nimport json\nimport itertools\nimport re\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport click\n\n\nIdent = namedtuple(\"Ident\", [\"ident\", \"type\"])\n\nBASE = Path(__file__).parents[0]\nIDENT_FILE = BASE / Path('idents_clean.txt')\n#IDENT_FILE = BASE / Path('identer_test.txt')\n\nJSON_FILE = BASE / Path(\"identer.json\")\nREFNR_FILE = BASE / Path(\"refnr.txt\")\n\nREGIONS = (\"DK\", \"EE\", \"GL\", \"SJ\", \"FO\", \"SE\", \"FI\")\nREGIONS_REGEX = \"|\".join(REGIONS)\n\n\ndef has_region(ident: str) -> bool:\n if ident[0:3].strip() in REGIONS:\n return True\n else:\n return False\n\n\ndef parse_ident_type(ident: str, region: str) -> str:\n ident = ident.strip() # We don't want unwanted whitespace poluting the search\n\n if re.match(r\"^\\D\\w\\w\\w$\", ident):\n return \"GNSS\"\n\n if re.match(r\"^\\w{0,2}[\\w\\s]-\\d{2}-[\\w|\\s|\\.]{1,6}([.]\\d{1,4})?$\", ident):\n return \"landsnr\"\n\n if re.match(r\"^81\\s?\\d{3}$\", ident) and region not in REGIONS:\n return \"jessen\"\n\n if re.match(r\"^G\\.[IM]\\.\\d{1,4}(/\\d{2,4})?(.\\d)?$\", ident):\n return \"GI\"\n\n if re.match(r\"^\\d{4}/\\w\", ident):\n return \"ekstern\"\n\n # matcher identer i stil med \"201 051.2010\", 3 423.1 og \"10 001\"\n if re.match(r\"^\\d{1,6}(([.]\\d{1,4})|([a-zA-Z]\\d?))?$\", ident):\n return \"station\"\n\n return \"diverse\"\n\n\ndef parse_idents():\n\n # get a list of refnr\n with open(REFNR_FILE) as f:\n refnumre = [int(l) for l in f.readlines()]\n\n identer = []\n\n with open(IDENT_FILE, \"r\") as ident_fil:\n with click.progressbar(\n ident_fil, label=\"Parsing idents\", length=len(refnumre)\n ) as identer_progress:\n for line, refnr in zip(identer_progress, refnumre):\n\n # Split before region when only one space between idents\n line = re.sub(\"(\\w)(\\s{1,})\" + f\"({REGIONS_REGEX})\", r\"\\1~\\3\", line)\n #print(line)\n\n # Reduce to only one space between region and ident\n line = re.sub(fr\"({REGIONS_REGEX}) (\\s*)(\\w)\", r\"\\1 \\3\", line)\n #print(line)\n\n # Split on two or more spaces\n line = re.sub(r\"(\\w)(\\s{2,})(\\w)\", r\"\\1~\\3\", line)\n #print(line)\n\n # list(dict.fromkeys()) removes duplicate entries and preserves order\n idents = list(dict.fromkeys(line.strip().split(\"~\")))\n #print(idents)\n\n temp_idents = []\n\n for text in idents:\n if has_region(text):\n code = text[2:].strip()\n region = text[0:3].strip()\n else:\n code = text.strip()\n region = \"\"\n\n # Strip all singular spaces\n # code = re.sub(\"(\\S)(\\s)(\\S)\", r'\\1\\3', code).strip()\n code = re.sub(r\"(\\s)(\\S)\", r\"\\2\", code).strip()\n\n # Add country code if present\n ident_type = parse_ident_type(code, region)\n\n # A few GNSS ident duplicates exists, i.e. \"DK NORD\" and\n # \"FO NORD\". We don't want to include the region for Danish\n # GNSS idents\n if ident_type == \"GNSS\" and region == \"DK\":\n region = \"\"\n ident = (region + \" \" + code).strip()\n\n if ident == \"\":\n continue\n\n idt = {\"1\": refnr, \"2\": ident_type, \"3\": ident}\n if idt in temp_idents:\n continue\n temp_idents.append(idt)\n\n identer.extend(temp_idents)\n\n\n with open(JSON_FILE, \"w\") as out:\n json.dump(identer, out, indent=4)\n\ndef test():\n test_data = {\n (\"EE\", \"872 S\"): \"diverse\",\n (\"DK\", \"872.461\"): \"station\",\n (\"GL\", \"1 049\"): \"diverse\",\n (\"DK\", \"1 049.461\"): \"diverse\",\n (\"SJ\", \"50 280 068\"): \"diverse\",\n (\"DK\", \"TERN\"): \"GNSS\",\n (\"DK\", \"K -01-06663\"): \"landsnr\",\n (\"DK\", \"K.K.663\"): \"diverse\",\n (\"DK\", \"K -01-06742\"): \"landsnr\",\n (\"DK\", \"1-00-06266\"): \"landsnr\",\n (\"DK\", \"F.K.266\"): \"diverse\",\n (\"DK\", \"K -01-09003\"): \"landsnr\",\n (\"DK\", \"G.M.1405/1406.1\"): \"GI\",\n (\"DK\", \"G.M.1404\"): \"GI\",\n (\"DK\", \"9904/14 496\"): \"ekstern\",\n (\"DK\", \"8025/827-6015\"): \"ekstern\",\n (\"DK\", \"3 985 K 1\"): \"diverse\",\n (\"DK\", \"K -18- A\"): \"landsnr\",\n (\"DK\", \"G.P.245\"): \"diverse\",\n (\"DK\", \"K -41- A.21\"): \"landsnr\",\n (\"DK\", \"K -41- V.5\"): \"landsnr\",\n (\"GL\", \"F1001\"): \"diverse\",\n (\"DK\", \"68-09-09011.1981\"): \"landsnr\",\n (\"DK\", \"69-01- V.4\"): \"landsnr\",\n (\"DK\", \"4078/13 611 H\"): \"ekstern\",\n (\"DK\", \"G.M.902\"): \"GI\",\n (\"DK\", \"G.I.1452\"): \"GI\",\n (\"DK\", \"G.M.1166.1\"): \"GI\",\n (\"DK\", \"G.M.110\"): \"GI\",\n (\"DK\", \"24-01-00032.1\"): \"landsnr\",\n (\"DK\", \"G.M.90\"): \"GI\",\n (\"DK\", \"G.M.1\"): \"GI\",\n (\"DK\", \"G.M.901/902\"): \"GI\",\n (\"DK\", \"G.M.35/36.1\"): \"GI\",\n (\"SE\", \"22-45-100501\"): \"ekstern\"\n\n }\n\n for (region, ident), identtype in test_data.items():\n determined_type = parse_ident_type(ident, region)\n assert identtype == determined_type, f\"{identtype} != {determined_type} ({region}, {ident})\"\n\n print(\"All tests passed!\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == 'test':\n test()\n else:\n parse_idents()","sub_path":"pre-migration/identer/parse_idents.py","file_name":"parse_idents.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305755640","text":"import pulp\nimport datetime\nfrom peak_forecaster.thermal_util import get_sst, get_charge_cop\nfrom optimizer_engine.cop import farenheit_to_celsius, celsius_to_farenheit\nfrom peak_forecaster.lt_optimizer import Optimizer\nfrom peak_forecaster import savings\n\ndef run_iterative_optimizer(loads, config, verbose=False):\n optimizer_config = config['optimizer_config']\n\n bill_calculator = savings.Savings(config['site_id']).bill_calculator\n optimize_energy = True\n if 'optimize_energy' in config:\n optimize_energy = config['optimize_energy']\n targets = []\n time_frame = f\"{config['start']} - {config['end']}\"\n for load in loads:\n\n previous_total_savings = 0\n previous_diff = 0\n iteration_limit = 10\n optimizer_config['start'] = load.iloc[0]['timestamp']\n optimizer_config['end'] = load.iloc[-1]['timestamp']\n count = 0\n\n print(f'Starting optimization for {time_frame}')\n while True:\n count += 1\n\n optimizer = Optimizer(optimizer_config,\n optimize_energy=optimize_energy)\n target = optimizer.solve(load)\n\n print(\n f'Iteration {count} - {pulp.LpStatus[optimizer.frame.status]}')\n\n baseline_demand = bill_calculator.calculate_demand_bill(\n target, load_column='baseline')\n baseline_energy = bill_calculator.calculate_energy_bill(\n target, load_column='baseline')\n\n ideal_demand = bill_calculator.calculate_demand_bill(target)\n ideal_energy = bill_calculator.calculate_energy_bill(target)\n\n demand_savings = baseline_demand - ideal_demand\n energy_savings = baseline_energy - ideal_energy\n\n total_savings = demand_savings + energy_savings\n\n savings_diff = abs(total_savings - previous_total_savings)\n\n if verbose:\n print(\"Solved for count {}\".format(count))\n print(\"Total Savings {}\".format(total_savings))\n print(\"Delta in savings {}\".format(savings_diff))\n\n if count == iteration_limit:\n print(\"Warning: Optimizer iteration limit reached\")\n break\n\n if (savings_diff < 10 or abs(savings_diff - previous_diff) < 1):\n break\n\n previous_total_savings = total_savings\n previous_diff = savings_diff\n\n sst_df = get_sst(target.soc, config)\n oat_df = farenheit_to_celsius(target.temperature)\n\n load = load.assign(cop_charge=get_charge_cop(sst_df, oat_df))\n\n target.drop(columns=['timestamp'], inplace=True)\n target['sst'] = list(\n map(celsius_to_farenheit, get_sst(target['soc'], config)))\n targets.append(target)\n\n print(f\"Completed optimizing for {time_frame}\\n\")\n\n return targets\n","sub_path":"peak_forecaster/iterative_optimizer.py","file_name":"iterative_optimizer.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"473447890","text":"\nimport leaderboard\nimport os\nimport gameplayer\nimport multiprocessing as mp\nclass Worker:\n def __init__(self,id_,server):\n m = mp.Manager()\n self.outgoing = m.Queue()\n self.incoming = m.Queue()\n self.process = mp.Process(target = gameplayer.Client,args = (id_,self.incoming,self.outgoing))\n self.server = server\n self.id = id_\n self.status = 'stopped'\n def wait_message(self):\n while(self.incoming.empty()):\n pass\n return self.incoming.get()\n def start(self):\n if(self.status!='stopped'):\n raise Exception(\"Attempted to started an already running worker\")\n self.process.start()\n m = self.wait_message()\n if(m[0] == 'ready'):\n self.status = 'idle'\n \n def play_match(self,match):\n p1,p2 = match\n self.match = match\n self.outgoing.put(('play',p1.id,p2.id))\n x =self.wait_message()\n if(x[0] == 'started'):\n self.status = 'playing'\n else:\n self.status = 'N/A'\n def process_queue(self):\n if(not self.incoming.empty()):\n command = self.incoming.get()\n flag = command[0]\n if(flag == 'result'):\n self.server.match_complete(self,self.match,command[1])\n self.status='idle' \n\nclass Server:\n on_match_complete = set()\n def __init__(self,worker_count,create_model_function,name):\n self.models = {}\n self.name = name\n self.gene_size = leaderboard.cal_model_wlen(create_model_function())\n self.workers = []\n self.worker_count =worker_count\n self.create_model_function=create_model_function\n def match_complete(self,worker,match,result):\n for i in self.on_match_complete:\n i(match,result)\n def start(self):\n self.spawn_workers()\n print(\"Prediction Server Started\")\n def spawn_workers(self):\n leaderboard.printProgressBar(0,self.worker_count,prefix='Starting Workers..'.ljust(17),suffix=\"%d/%d\"%(0,self.worker_count))\n for i in range(self.worker_count):\n self.workers.append( Worker(i,self))\n self.workers[i].start()\n self.workers[i].outgoing.put(('gs',self.gene_size))\n self.workers[i].outgoing.put(('lb',self.name))\n leaderboard.printProgressBar(i+1,self.worker_count,prefix='Starting Workers..'.ljust(17),suffix=\"%d/%d\"%(i+1,self.worker_count))\n def play_matches(self,matches):\n mq = list(matches)\n while(len(mq)>0):\n for i in self.workers:\n if(len(mq)==0):\n break\n if(i.status == 'idle'):\n i.play_match(mq.pop())\n leaderboard.printProgressBar(len(matches)-len(mq),len(matches),prefix='Playing...'.ljust(17),suffix=\"%d/%d\"%(len(matches)-len(mq),len(matches)))\n else:\n i.process_queue()\n self.wait_all()\n def wait_all(self):\n x=0\n while(True):\n\n x+=1\n active =False\n for i in self.workers:\n i.process_queue()\n if(i.status!='idle'):\n active=True\n if(not active):\n break\nif __name__ == '__main__':\n import keras","sub_path":"predection_server.py","file_name":"predection_server.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"317606781","text":"import tensorflow as tf\n\n\ndef build_summaries():\n '''\n Makes TensorFlow Summary Ops.\n Written for DDPG. Can extend for others.\n '''\n episode_reward = tf.Variable(0.)\n tf.compat.v1.summary.scalar(\"Reward\", episode_reward)\n episode_average_max_q = tf.Variable(0.)\n tf.compat.v1.summary.scalar(\"Qmax Value\", episode_average_max_q)\n\n summary_vars = [episode_reward, episode_average_max_q]\n summary_ops = tf.compat.v1.summary.merge_all()\n\n return summary_ops, summary_vars\n","sub_path":"utils/tf_utils.py","file_name":"tf_utils.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"502583629","text":"import io\nimport os.path\nimport re\nfrom setuptools import setup\n\n\n# https://packaging.python.org/en/latest/single_source_version\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get('encoding', 'utf8')\n ) as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')\n\n\nwith open('README.adoc') as f:\n long_description = f.read()\n\n\nsetup(\n name='pynote',\n version=find_version('note'),\n description='Manage notes on the commandline',\n long_description=long_description,\n author='Stefan Tatschner',\n author_email='rumpelsepp@sevenbyte.org',\n url='https://github.com/rumpelsepp/pynote',\n license='MIT',\n install_requires=['arrow', 'tabulate'],\n scripts=['note'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Natural Language :: English',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292546874","text":"#!/usr/bin/env python\n# encoding: utf-8\n__author__ = 'Wayne'\n__date__ = '2017/5/8'\n\n\nimport time\nfrom celery_app import app\n\n@app.task\ndef multiply(x, y):\n time.sleep(2)\n return x * y","sub_path":"tools/celery_demo/celery_app/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"644664873","text":"# -*- coding: utf-8 -*-\n\n# 结论\n# 函数声明/定义时,形参的顺序是位置形参、可变形参、关键词形参、可变关键词形参\n# 函数调用时\n# 参数的输入顺序必须是位置实参在前,关键词实参在后\n# 位置形参也可以用关键词实参来填\n\ndef foo(a, b=1, c=2, *var_params, x, **keyword_params):\n print('a={0}, b={1}, c={2}, var_args={3}, x={5}, keyword_params={4}'.format(a,b,c,var_params,keyword_params, x))\n\nif __name__ == '__main__':\n foo(10, 11, 'a', 'b', 'c', 'd', y=2, z=3, x=1)\n _ = '%.3f, %d, %s' % (1.1234, 10, 1234)\n print(_)\n\n\n","sub_path":"parameter_demo.py","file_name":"parameter_demo.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"308893459","text":"from flask import Flask, request\nfrom flask import make_response\nfrom flask import jsonify\nimport json\n\nfrom lib import indexer\nfrom lib.column import Column\nfrom lib.source import Source\nfrom lib.utils import get_new_index_name\nfrom main.semantic_labeler import SemanticLabeler\n\nimport logging\nimport os\nfrom shutil import copyfile\n\n\n# logging\nlogFormatter = logging.Formatter(\"%(asctime)s [%(levelname)-10.10s] %(module)s: %(message)s\")\nrootLogger = logging.getLogger()\nrootLogger.setLevel(logging.INFO)\n\nfileHandler = logging.handlers.RotatingFileHandler('karma-server.log', mode='w',\n maxBytes=0.5 * 10 ** 9,\n backupCount=5)\nfileHandler.setFormatter(logFormatter)\nfileHandler.setLevel(logging.INFO)\nrootLogger.addHandler(fileHandler)\n\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setLevel(logging.WARNING)\nconsoleHandler.setFormatter(logFormatter)\nrootLogger.addHandler(consoleHandler)\n\n# logging for elasticsearch\nes_logger = logging.getLogger('elasticsearch')\nes_logger.propagate = False\nes_logger.setLevel(logging.ERROR)\nes_logger_handler = logging.handlers.RotatingFileHandler('karma-elasticsearch-base.log',\n maxBytes=0.5*10**9,\n backupCount=3, mode='w')\nes_logger.addHandler(es_logger_handler)\n\nes_tracer = logging.getLogger('elasticsearch.trace')\nes_tracer.propagate = False\nes_tracer.setLevel(logging.ERROR)\nes_tracer_handler = logging.handlers.RotatingFileHandler('karma-elasticsearch-full.log',\n maxBytes=0.5*10**9,\n backupCount=3, mode='w')\nes_tracer.addHandler(es_tracer_handler)\n\n# logging for py4j\npy4j_logger = logging.getLogger('py4j')\npy4j_logger.propagate = False\npy4j_logger.setLevel(logging.ERROR)\npy4j_logger_handler = logging.handlers.RotatingFileHandler('karma-py4j.log',\n maxBytes=0.5*10**9,\n backupCount=3, mode='w')\npy4j_logger.addHandler(py4j_logger_handler)\n\n# logger = logging.getLogger('mainLog')\n# logger.propagate = False\n# logger.setLevel(logging.DEBUG)\n# # create file handler\n# fileHandler = logging.handlers.RotatingFileHandler('elasticsearclk.log',\n# maxBytes=10**6,\n# backupCount=3)\n# fileHandler.setLevel(logging.INFO)\n# # create console handler\n# consoleHandler = logging.StreamHandler()\n# consoleHandler.setLevel(logging.INFO)\n# # create formatter and add it to the handlers\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# consoleHandler.setFormatter(formatter)\n# fileHandler.setFormatter(formatter)\n# # add the handlers to logger\n# logger.addHandler(consoleHandler)\n# logger.addHandler(fileHandler)\n\n\n# logging.basicConfig(filename='app.log',\n# level=logging.DEBUG, filemode='w',\n# format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')\n\n__author__ = 'alse'\n\nservice = Flask(__name__)\n\nSEMANTIC_TYPE_URL = \"/semantic_type\"\nCOLUMN_URL = \"/column\"\nFIRST_TIME_URL = \"/ftu\"\nUPLOAD_FOLDER = \"/data/\" # TODO change this to model folder\nTEST_URL = \"/test\"\nRESET_URL = \"/reset\"\nsemantic_labeler = SemanticLabeler()\ndomains = [\"soccer\", \"dbpedia\", \"museum\", \"weather\", \"weapons\"]\n\nservice.config['JSON_AS_ASCII'] = False\n\n@service.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n\n\n@service.errorhandler(414)\ndef bad_uri(err=None):\n logging.error(\"Bad uri {}: {}.\".format(request.url, err))\n message = {\n 'status': 414,\n 'message': err + \" \" + request.url,\n }\n resp = jsonify(message)\n resp.status_code = 414\n return resp\n\n\ndef error(message=\"\"):\n with service.app_context():\n print(\"Error message: \", message)\n response = make_response()\n response.status_code = 500\n response.headers = {\n \"X-Status-Reason\": message,\n \"message\": message\n }\n return response\n\n\ndef allowed_file(filename, extensions):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in extensions\n\n\n@service.route('/')\ndef hello():\n resp = jsonify(\"Karma DSL running here!\")\n resp.status_code = 200\n return resp\n\n\n@service.route(SEMANTIC_TYPE_URL, methods=['POST', 'PUT'])\ndef add_semantic_type(column=None, semantic_type=None):\n try:\n if not (column and semantic_type):\n column = request.json[\"column\"]\n semantic_type = request.json[\"semantic_type\"]\n logging.info(\"Adding semantic type: {}\".format(semantic_type))\n column_name = column.keys()[0]\n\n if column and semantic_type and column_name:\n source = Source(column_name)\n source.read_data_from_dict(column)\n source.set_semantic_type(semantic_type, column_name)\n _id = get_new_index_name(semantic_type, column_name)\n source.save(index_config={\"name\": _id, \"size\": 0})\n resp = jsonify({\"index_name\": _id})\n resp.status_code = 200\n return resp\n except Exception as e:\n return error(\"Semantic type adding failed: {}\".format(e.args))\n\n\n@service.route(SEMANTIC_TYPE_URL, methods=[\"DELETE\"])\ndef delete_semantic_type():\n semantic_type = request.json[\"semantic_type\"]\n _id = get_new_index_name(semantic_type, \"*\")\n if not indexer.delete_column(index_config={\"name\": _id, \"size\": 0}):\n logging.error(\"Unable to delete semantic type.\")\n return error(\"Unable to delete semantic type.\")\n resp = jsonify(\"Deleted semantic type \" + str(semantic_type))\n resp.status_code = 200\n return resp\n\n\n# NOTE. This is to add many columns to a given semantic type in a bulk\n@service.route(SEMANTIC_TYPE_URL + \"/bulk\", methods=['POST'])\ndef add_semantic_type_bulk():\n semantic_type = request.json[\"semantic_type\"]\n columns = request.json[\"columns\"]\n\n for column in columns:\n add_semantic_type(column, semantic_type)\n\n resp = jsonify(\"Bulk adding semantic type \" + str(semantic_type))\n resp.status_code = 200\n return resp\n\n\n@service.route(COLUMN_URL, methods=[\"DELETE\"])\ndef delete_column():\n logging.info(\"Deleting column...\")\n semantic_type = request.json[\"semantic_type\"]\n column_name = request.json[\"column_name\"]\n _id = get_new_index_name(semantic_type, column_name)\n if not indexer.delete_column(index_config={\"name\": _id, \"size\": 0}):\n logging.error(\"Unable to delete semantic type.\")\n return error(\"Unable to delete semantic type.\")\n resp = jsonify(\"Column deleted: \" + str(column_name))\n resp.status_code = 200\n return resp\n\n\n@service.route(COLUMN_URL, methods=[\"POST\"])\ndef get_semantic_type():\n logging.info(\"Getting semantic type...\")\n # jsonContent = json.dumps(request.json, ensure_ascii=False)\n if \"header\" not in request.json or \"values\" not in request.json:\n logging.error(\"Either header or values not in request.\")\n return bad_uri(\"Either header or values not in request.\")\n header = request.json[\"header\"]\n values = request.json[\"values\"]\n if not(isinstance(values, list)):\n logging.error(\"values must be a list\")\n return bad_uri(\"values must be a list\")\n if \"source\" not in request.json:\n source = None\n else:\n source = request.json[\"source\"]\n\n try:\n column = Column(header, source)\n for element in values:\n # logging.debug(\"Add element: {}\".format(element))\n # logging.debug(\"column: {}\".format(column))\n column.add_value(element)\n\n result = semantic_labeler.predict_semantic_type_for_column(column)\n resp = jsonify(result)\n resp.status_code = 200\n return resp\n except Exception as e:\n logging.error(\"Get semantic type: {}\".format(e))\n return error(\"Getting semantic type failed: {}\".format(e.args))\n\n\n@service.route(FIRST_TIME_URL, methods=[\"GET\"])\ndef first_time():\n logging.info(\"First time setup...\")\n try:\n semantic_labeler.reset()\n semantic_labeler.read_data_sources([\"weapons\"])\n semantic_labeler.train_semantic_types([\"weapons\"])\n semantic_labeler.train_random_forest([11], [\"weapons\"])\n\n # semantic_labeler.read_data_sources([\"soccer\", \"dbpedia\", \"museum\", \"weather\"])\n # semantic_labeler.train_semantic_types([\"soccer\", \"dbpedia\", \"museum\", \"weather\"])\n # semantic_labeler.train_random_forest([11], [\"soccer\"])\n\n # semantic_labeler.read_data_sources([\"soccer\", \"dbpedia\", \"museum\",\"flights\", \"weather\", \"phone\"])\n # semantic_labeler.train_semantic_types([\"soccer\", \"dbpedia\", \"museum\", \"flights\", \"weather\", \"phone\"])\n # semantic_labeler.train_random_forest([11], [\"soccer\"])\n # semantic_labeler.write_data_sources(limit=None, filter_unknown=False)\n resp = jsonify(\"Training complete.\")\n resp.status_code = 200\n return resp\n except Exception as e:\n logging.error(\"First time setup: {}\".format(e))\n return error(str(e.args[0]) + \" \"+str(e.args))\n\n\n@service.route(RESET_URL, methods=[\"POST\"])\ndef reset_semantic_labeler():\n \"\"\"\n This endpoint is needed to clean elastic search server, reset model and all read in data sources.\n It is crucial to reset elastic search before retraining.\n \"\"\"\n logging.info(\"Resetting the labeler\")\n try:\n semantic_labeler.reset()\n resp = jsonify(\"DSL reset!\")\n resp.status_code = 200\n return resp\n except Exception as e:\n logging.error(\"Semantic labeler reset: {}\".format(e.args))\n return error(\"Semantic labeler reset failed: {}\".format(e.args))\n\n\n@service.route(TEST_URL, methods=[\"GET\"])\ndef test_service():\n logging.info(\"Running test\")\n try:\n semantic_labeler.read_data_sources([\"soccer\", \"weather\"])\n semantic_labeler.train_random_forest([5], [\"soccer\"])\n semantic_labeler.test_semantic_types(\"weather\", [3])\n logging.info(\"Test complete\")\n resp = jsonify(\"Tests complete\")\n resp.status_code=200\n return resp\n except Exception as e:\n logging.error(\"Test: {}\".format(e.args))\n return error(\"Test failed due to: \"+str(e.args[0])+\" \"+str(e.args))\n\n\n@service.route('/domain', methods=[\"POST\"])\ndef read_folder():\n \"\"\"\n Index domain with data sources with semantic labeler.\n :return:\n \"\"\"\n if \"folder\" not in request.json:\n return bad_uri(\"missing parameter: 'folder' not in request\")\n folder_name = request.json[\"folder\"]\n logging.info(\"Indexing data sources from folder {}\".format(folder_name))\n try:\n semantic_labeler.read_data_sources([folder_name])\n logging.info(\"Listing folders for response.\")\n return list_folder()\n except Exception as e:\n logging.error(\"Indexing data sources: {}\".format(e))\n return error(\"Folder indexing failed due to: \"+str(e.args[0])+\" \"+str(e.args))\n\n\n@service.route('/folder', methods=[\"GET\"])\ndef list_folder():\n \"\"\"\n List available folders with data sources on the server.\n :return:\n \"\"\"\n logging.info(\"Listing folders\")\n resp = jsonify({\"folder_names\": list(semantic_labeler.dataset_map.keys())})\n resp.status_code = 200\n # TODO: implement\n return resp\n\n\n@service.route(SEMANTIC_TYPE_URL, methods=[\"GET\"])\ndef train_semantic_types():\n \"\"\"\n Train semantic types for a list of folders.\n :return:\n \"\"\"\n if request.json is None:\n return bad_uri(\"JSON missing\")\n if \"folder\" not in request.json:\n return bad_uri(\"missing parameter: 'folder' not in request\")\n folders = request.json[\"folder\"]\n if not(isinstance(folders, list)):\n return bad_uri(\"wrong parameter: 'folder' not list\")\n logging.info(\"Training semantic types for {}\".format(folders))\n semantic_labeler.train_semantic_types(folders)\n resp = jsonify(\"Semantic types trained.\")\n resp.status_code = 200\n # TODO: implement\n return resp\n\n\n@service.route('/train', methods=[\"POST\"])\ndef train_logistic_regression():\n \"\"\"\n Train logistic regression:\n train_sizes\n folder_names\n :return:\n \"\"\"\n if request.json is None:\n return bad_uri(\"JSON missing\")\n if \"folder\" not in request.json or \"size\" not in request.json:\n return bad_uri(\"missing parameter: 'folder' or/and 'size' not in request\")\n folders = request.json[\"folder\"]\n if not(isinstance(folders, list)):\n return bad_uri(\"wrong parameter: 'folder' not list\")\n train_sizes = request.json[\"size\"]\n if not(isinstance(train_sizes, list)):\n return bad_uri(\"wrong parameter: 'size' not list\")\n try:\n logging.info(\"Training logistic regression for {}\".format(folders))\n semantic_labeler.train_random_forest(train_sizes, folders)\n resp = jsonify(\"Logistic regression trained.\")\n resp.status_code = 200\n # TODO: implement\n return resp\n except Exception as e:\n logging.error(\"Training: {}\".format(e))\n return error(\"Training failed due to: {}\".format(e))\n\n\n@service.route('/predict', methods=[\"GET\"])\ndef predict_logistic_regression():\n \"\"\"\n Predict for all sources in the folder specified in the request json:\n folder\n The specified folder must be indexed by the semantic labeler already.\n :return:\n \"\"\"\n if request.json is None:\n return bad_uri(\"JSON missing\")\n if \"folder\" not in request.json:\n return bad_uri(\"missing parameter: 'folder' not in request\")\n folders = request.json[\"folder\"]\n logging.info(\"Predicting semantic types for {}\".format(folders))\n ## rather run predict per each data source separately!!!\n try:\n result = semantic_labeler.predict_folder_semantic_types(folders)\n resp = jsonify(result)\n resp.status_code = 200\n return resp\n except Exception as e:\n logging.error(\"Prediction for the folder failed: {}\".format(e))\n return error(\"Prediction for the folder failed due to: {}\".format(e))\n\n\n@service.route('/copy', methods=[\"POST\"])\ndef copy_data():\n \"\"\"\n Create folder with the specified name which holds sources specified in the request.\n Sources to be put into the folder should already exist on the server within a domain folder.\n Domain folders are fixed and listed in the global variable domains.\n We just copy necessary files from the domain folders into the specified folder.\n This method is useful to create train and test data folders.\n :return:\n \"\"\"\n logging.info(\"Creating new folder\")\n if request.json is None:\n return bad_uri(\"JSON missing\")\n if \"folder\" not in request.json or \"files\" not in request.json:\n return bad_uri(\"missing parameter: 'folder' or/and 'files' not in request\")\n requested_folder_name = request.json[\"folder\"]\n requested_file_names = request.json[\"files\"]\n if not(isinstance(requested_file_names, list)):\n return bad_uri(\"wrong parameter: 'files' not list\")\n try:\n\n # creating folder: it should contain subfolders data and model\n new_folder = os.path.join(semantic_labeler.data_folder, requested_folder_name)\n folder_sources = os.path.join(new_folder, \"data\")\n folder_models = os.path.join(new_folder, \"model\")\n if not(os.path.exists(new_folder)):\n logging.info(\"Creating folder: {}\".format(new_folder))\n os.makedirs(new_folder)\n if not(os.path.exists(folder_sources)):\n os.makedirs(folder_sources)\n logging.info(\"Creating folder: {}\".format(folder_sources))\n else:\n # delete all sources from the folder\n logging.info(\"Cleaning {}\".format(folder_sources))\n [os.remove(os.path.join(folder_sources, f)) for f in os.listdir(folder_sources)]\n if not(os.path.exists(folder_models)):\n logging.info(\"Creating folder: {}\".format(folder_models))\n os.makedirs(folder_models)\n else:\n # delete all models from the folder\n [os.remove(os.path.join(folder_models, f)) for f in os.listdir(folder_models)]\n logging.info(\"Cleaning {}\".format(folder_models))\n\n copied_files = 0\n for folder_name in domains:\n folder_path = os.path.join(semantic_labeler.data_folder, folder_name)\n data_folder_path = os.path.join(folder_path, \"data\")\n model_folder_path = os.path.join(folder_path, \"model\")\n for filename in os.listdir(data_folder_path):\n if filename in requested_file_names:\n src = os.path.join(data_folder_path, filename)\n dst = os.path.join(folder_sources, filename)\n logging.info(\"Coping source {}\".format(src))\n # we copy source to the train folder\n copyfile(src, dst)\n if os.path.exists(model_folder_path):\n src = os.path.join(model_folder_path, filename+\".model.json\")\n dst = os.path.join(folder_models, filename+\".model.json\")\n logging.info(\"Coping model {}\".format(src))\n # we copy model to the train folder\n copyfile(src, dst)\n copied_files += 1\n logging.info(\"Indexing new folder: {}\".format(requested_folder_name))\n semantic_labeler.read_data_sources([requested_folder_name])\n\n resp = jsonify({\"new_folder\": requested_folder_name, \"copied_sources\": copied_files})\n resp.status_code = 200\n return resp\n except Exception as e:\n logging.error(\"Creating new folder: {}\".format(e))\n return error(\"Folder creation failed due to: {}\".format(e))\n\n\nif __name__ == \"__main__\":\n service.run(debug=True, port=8000, host=\"0.0.0.0\")\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":18195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192077941","text":"# coding:utf8\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def maxDepth(self, root: TreeNode) -> int:\n #return self.maxDepth_v1(root)\n return self.maxDepth_v2(root)\n def maxDepth_v1(self, root: TreeNode) -> int:\n '''\n 递归\n '''\n if not root:\n return 0\n return max(self.maxDepth_v1(root.left), self.maxDepth_v1(root.right)) + 1\n\n def maxDepth_v2(self, root: TreeNode) -> int:\n '''\n bfs\n '''\n queue = [root] if root else []\n res = 0\n while queue:\n for _ in range(len(queue)):\n curNode = queue.pop(0)\n if curNode.left:\n queue.append(curNode.left)\n if curNode.right:\n queue.append(curNode.right)\n res += 1\n\n\n return res\n\n","sub_path":"leetcode_everyday/pastqing_104.py","file_name":"pastqing_104.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544297478","text":"#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\n#\r\n# Project: heartbeat\r\n#\r\n# File: log.py\r\n# Use: Keeps track of the log\r\n#\r\n# Copyright (C) 2012 KHH\r\n# Some Rights Reserved\r\n#\r\n# Author: KHH , (C) 2012\r\n#\r\n# Please consult the LICENSE and COPYING files\r\n# in the doc directory for further information\r\n#\r\n\r\nfrom __future__ import print_function\r\nimport time\r\nimport sys\r\n\r\nimport main\r\n\r\n# Log levels\r\nNONE = 0x00 \r\nFATAL_ERROR = 0x01\r\nERROR = 0x02\r\nVERBOSE = 0x10\r\nDEBUG = 0x20\r\nLEVELS = {\r\n\tNONE: 'NONE',\r\n\tFATAL_ERROR: 'FATAL_ERROR',\r\n\tERROR: 'ERROR',\r\n\tVERBOSE: 'VERBOSE',\r\n\tDEBUG: 'VERBSE',\r\n}\r\nRLEVELS = {\r\n\t'NONE': NONE,\r\n\t'FATAL_ERROR': FATAL_ERROR,\r\n\t'FATAL': FATAL_ERROR,\r\n\t'ERROR': ERROR,\r\n\t'VERBOSE': VERBOSE,\r\n\t'DEBUG': DEBUG,\r\n}\r\n\r\n# Message codes\r\nCODE_MASK = 0xFF0\r\nCODE_TODO = 0x000\r\nCODE_LOG = 0x100\r\nCODE_LOG_ERROR = 0x100\r\nCODE_OPTS = 0x110\r\nCODE_OPTS_ERROR = 0x110\r\nCODE_OPTS_DEBUG = 0x111\r\nCODE_OPTS_UNKNOWN_ARGUMENT = 0x112\r\nCODE_OPTS_INVALID_VALUE = 0x113\r\nCODE_OPTS_MALFORMED = 0x114\r\nCODE_SETTINGS = 0x120\r\nCODE_SETTINGS_INVALID = 0x120\r\nCODE_SETTINGS_DEBUG = 0x121\r\nCODE_GUI = 0x200\r\nCODE_GUI_DEBUG = 0x202\r\nCODE_NET = 0x300\r\nCODE_NET_DEBUG = 0x302\r\n\r\n\r\nl = [] \r\n\r\n# Find prefix function\r\ndef prefix(level, code):\r\n\tif level == FATAL_ERROR:\r\n\t\tr = \"Fatal error: \"\r\n\telif level == ERROR:\r\n\t\tr = \"Error: \"\r\n\telse:\r\n\t\tr = \"\"\r\n\t\r\n\tc = code&CODE_MASK\r\n\tif c == CODE_LOG:\r\n\t\tr += 'Log: '\r\n\telif c == CODE_OPTS:\r\n\t\tr += 'Opts: '\r\n\telif c == CODE_TODO:\r\n\t\tr += 'Todo: '\r\n\telif c == CODE_SETTINGS:\r\n\t\tr += 'Settings: '\r\n\telif c == CODE_GUI:\r\n\t\tr += \"GUI: \"\r\n\telif c == CODE_NET:\r\n\t\tr += \"Net: \"\r\n\telse:\r\n\t\tr += '?: '\r\n\treturn r\r\n\r\n# Logging function\r\ndef log(level, code, info):\r\n\ttry:\r\n\t\tLEVELS[level]\r\n\texcept KeyError:\r\n\t\tlog(ERROR, CODE_LOG_ERROR, 'Unknown log level {}'.format(level))\r\n\t\treturn\r\n\tl.append((time.time(), level, code, info))\r\n\ttry:\r\n\t\tif RLEVELS[main.settings['console']['verbosity'].upper()] >= level:\r\n\t\t\tif level <= ERROR:\r\n\t\t\t\tprint('{}{}'.format(prefix(level, code), info), file=sys.stderr)\r\n\t\t\t\tsys.stderr.flush()\r\n\t\t\telse:\r\n\t\t\t\tprint('{}{}'.format(prefix(level, code), info))\r\n\t\t\t\tsys.stdout.flush()\r\n\texcept KeyError:\r\n\t\tlog(FATAL_ERROR, CODE_SETTINGS_INVALID, \"console.verbosity value '{}'\".format(main.settings['console']['verbosity']))\r\n\t\tsys.exit(1)\r\n\r\n# Aliases\r\ndef error(code, info):\r\n\tlog(ERROR, code, info)\r\ndef verbose(code, info):\r\n\tlog(VERBOSE, code, info)\r\ndef debug(code, info):\r\n\tlog(DEBUG, code, info)\r\ndef todo(info):\r\n\tlog(VERBOSE, CODE_TODO, info)\r\n","sub_path":"src/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347922227","text":"class showError(Exception):#Exception是异常的顶级父类 \n def __init__(self,name):\n super().__init__()#加上\n self.name = name\n self.error = \"input %s is error\"%(self.name)\n\n\n\ntry:\n name = input(\"请输入一个名字\")\n if name == \"老王\":\n raise showError(name)\nexcept showError as ret:\n print(ret.error) \n #输入老王就报错\n","sub_path":"07day/04-自定义异常.py","file_name":"04-自定义异常.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"374900025","text":"import random\n\ncoloured_balls = [\"green\", \"yellow\", \"white\", \"black\", \"orange\", \"red\", \"purple\", \"blue\"]\n#functions to be imported\ndef random_cpu():\n import random\n \"\"\"function randomly chooses a combination of 4 coloured balls for the computer ie. the Mastercode\n \n @Input : random function generates randomly a 4 coloured ball combination\n @Output : generates the MasterCode for the game to be cracked by the player\"\"\"\n \n return random.choices(coloured_balls, k=4)\n\ndef player_select():\n \"\"\"Each round - ask the player to make a 4 coloured choice\n \n @Input : the player has to input 4 coloured ball\n @Output : 4 ball combination\"\"\"\n \n print(\"choose 4 coloured balls (among green, yellow, white, black, orange, red, purple, blue\")\n print(\"Do or Do not. There is no try. good luck padawan\")\n \n player_input_ball_list = []\n \n for i in range(4):\n player_input_ball = input()\n while player_input_ball not in coloured_balls:\n player_input_ball = input(\"\\n Wait a minute ! please choose a colour among the 8 coloured balls. \\\n i.e. green, yellow, white, black, orange,red,purple,blue. \\\n No more, No less \\n\")\n player_input_ball_list.append(player_input_ball)\n \n print(\"\\ncongrats, you have chosen\", player_input_ball_list, \"a wise choice \\n\")\n \n return player_input_ball_list\n\ndef each_round_check(computer_Mastercode,player_guess):\n \"\"\"check whether the player has guessed correctly the Master code\n \n @Input : round's player guess\n @Output : Computer gives feedback whether the player's getting close or not\"\"\"\n \n comparing_list_temp = [] \n \n for i,j in zip(computer_Mastercode,player_guess):\n if i == j:\n print(\"That ball\",j,\"is in the right place\")\n elif j in computer_Mastercode:\n comparing_list_temp.append(j)\n else:\n print(\"not even close mate ! There is no\",j,\"in my code\")\n if comparing_list_temp != []:\n print(\"\\nThose items\", comparing_list_temp, \"are in the Mastercode but in different location\")\n\ndef finale_result(player_guess,computer_Mastercode):\n \"\"\"Check the finale result of the game\n \n @Input : each round check whether the player is right OR after 10 rounds, the game ends\n @Output : gives the name of the winner - the Codebreaker OR the Mastercode.\n There is no middle ground\"\"\"\n \n if player_guess == computer_Mastercode:\n print(\"\\ncongrats, you have broken the code, you're a Grand Master !\")\n else:\n print(\"I knew you did not have what it takes to break the Mastercode. Try again, my very young padawan\") ","sub_path":"Refactored version with updates/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549635701","text":"\n\nfrom xai.brain.wordbase.nouns._saucepan import _SAUCEPAN\n\n#calss header\nclass _SAUCEPANS(_SAUCEPAN, ):\n\tdef __init__(self,): \n\t\t_SAUCEPAN.__init__(self)\n\t\tself.name = \"SAUCEPANS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"saucepan\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_saucepans.py","file_name":"_saucepans.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"622580756","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom copy import copy, deepcopy\r\nfrom openpyxl.worksheet.cell_range import CellRange, MultiCellRange\r\n\r\nclass Merge():\r\n\r\n def __init__(self, rdsheet, wtsheet):\r\n self.rdsheet = rdsheet\r\n self.wtsheet = wtsheet\r\n self.prepare()\r\n\r\n @classmethod\r\n def get_sheet_maps(cls, rdsheet):\r\n pass\r\n\r\n def prepare(self):\r\n pass\r\n\r\n def merge_cell(self, rdrowx, rdcolx, wtrowx, wtcolx):\r\n rdcoords2d = (rdrowx, rdcolx)\r\n if rdcoords2d in self._top_left_map:\r\n if self._merge_ranges.get(rdcoords2d):\r\n crange = self._merge_ranges.get(rdcoords2d)\r\n self.add_cell_range(rdcoords2d, crange)\r\n self._merge_ranges[rdcoords2d] = (wtrowx, wtrowx, wtcolx, wtcolx)\r\n else:\r\n _top_left = self._already_set.get(rdcoords2d)\r\n if _top_left:\r\n rlo, rhi, clo, chi = self._merge_ranges.get(_top_left)\r\n self._merge_ranges[_top_left] = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n\r\n def add_cell_range(self, rdcoords2d, crange):\r\n pass\r\n\r\n def merge_mcell(self, rdrowx, rdcolx, wtrowx, wtcolx, wt_top):\r\n rdcoords2d = (rdrowx, rdcolx)\r\n if rdcoords2d in self._top_left_map:\r\n rlo, rhi, clo, chi = self._merge_ranges.get(rdcoords2d)\r\n self._merge_ranges[rdcoords2d] = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n else:\r\n _top_left = self._already_set.get(rdcoords2d)\r\n if _top_left:\r\n rlo, rhi, clo, chi = self._merge_ranges.get(_top_left)\r\n self._merge_ranges[_top_left] = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n else:\r\n self.merge_single_cell(wtrowx, wtcolx, wt_top, rdcoords2d)\r\n\r\n def merge_single_cell(self, wtrowx, wtcolx, wt_top, rdcoords2d):\r\n pass\r\n\r\n def finish(self):\r\n pass\r\n\r\nclass MergeCell(Merge):\r\n\r\n @classmethod\r\n def get_sheet_maps(cls, rdsheet):\r\n _map = {}\r\n _nfa = {}\r\n for crange in rdsheet.merged_cells.ranges:\r\n rlo, rhi, clo, chi = crange.min_row, crange.max_row, crange.min_col, crange.max_col\r\n _map[(rlo, clo)] = (rlo, rhi, clo, chi)\r\n for rowx in range(rlo, rhi + 1):\r\n for colx in range(clo, chi + 1):\r\n _nfa[(rowx, colx)] = (rlo, clo)\r\n rdsheet.mc = (_map, _nfa)\r\n\r\n def prepare(self):\r\n self._merge_ranges = {}\r\n self._top_left_map, self._already_set = self.rdsheet.mc\r\n\r\n def add_cell_range(self, rdcoords2d, crange):\r\n rlo, rhi, clo, chi = crange\r\n cr = CellRange(min_row=rlo, max_row=rhi, min_col=clo, max_col=chi)\r\n self.wtsheet.merged_cells.add(cr)\r\n\r\n def merge_single_cell(self, wtrowx, wtcolx, wt_top, rdcoords2d):\r\n key = (wt_top, rdcoords2d)\r\n single_cell_cr = self._merge_ranges.get(key)\r\n if single_cell_cr:\r\n rlo, rhi, clo, chi = single_cell_cr\r\n else:\r\n rlo, rhi, clo, chi = wt_top, wtrowx, wtcolx, wtcolx\r\n self._merge_ranges[key] = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n\r\n def finish(self):\r\n for key, crange in self._merge_ranges.items():\r\n self.add_cell_range(key, crange)\r\n self._merge_ranges.clear()\r\n\r\nclass DataValidation(Merge):\r\n\r\n @classmethod\r\n def get_sheet_maps(cls, rdsheet):\r\n _map = {}\r\n _nfa = {}\r\n _orig_map = {}\r\n for dv in rdsheet.data_validations.dataValidation:\r\n for crange in dv.ranges:\r\n rlo, rhi, clo, chi = crange.min_row, crange.max_row, crange.min_col, crange.max_col\r\n _map[(rlo, clo)] = (rlo, rhi, clo, chi)\r\n _orig_map[(rlo, clo)] = dv\r\n for rowx in range(rlo, rhi + 1):\r\n for colx in range(clo, chi + 1):\r\n _nfa[(rowx, colx)] = (rlo, clo)\r\n rdsheet.dv = _map, _nfa, _orig_map\r\n\r\n def prepare(self):\r\n self._merge_ranges = {}\r\n self.dv_copies = {}\r\n self._top_left_map, self._already_set, self._orig_map = self.rdsheet.dv\r\n\r\n def add_cell_range(self, rdcoords2d, crange):\r\n rlo, rhi, clo, chi = crange\r\n cr = CellRange(min_row=rlo, max_row=rhi, min_col=clo, max_col=chi)\r\n dv = self.dv_copies.get(rdcoords2d)\r\n if not dv:\r\n dv = copy(self._orig_map.get(rdcoords2d))\r\n dv.ranges = MultiCellRange()\r\n self.dv_copies[rdcoords2d] = dv\r\n dv.ranges.add(cr)\r\n\r\n def finish(self):\r\n for key,crange in self._merge_ranges.items():\r\n self.add_cell_range(key, crange)\r\n for key,dv in self.dv_copies.items():\r\n self.wtsheet.data_validations.append(dv)\r\n self.dv_copies.clear()\r\n self._merge_ranges.clear()\r\n\r\nfrom collections import defaultdict\r\n\r\nclass Image(Merge):\r\n\r\n def __init__(self, image, image_list):\r\n self.image = image\r\n self.image_list = image_list\r\n self.prepare()\r\n\r\n @classmethod\r\n def get_sheet_maps(cls, rdsheet, image, image_dict):\r\n _nfa = {}\r\n _from = image.anchor._from\r\n _to = image.anchor.to\r\n rlo = _from.row + 1\r\n clo = _from.col + 1\r\n rhi = _to.row + 1\r\n chi = _to.col + 1\r\n _top_left = (rlo, clo)\r\n for rowx in range(rlo, rhi + 1):\r\n for colx in range(clo, chi + 1):\r\n _nfa[(rowx, colx)] = (rlo, clo)\r\n count = image_dict[_top_left]\r\n image_dict[_top_left] += 1\r\n image._top_left = _top_left\r\n image._bottom_right = (rhi, chi)\r\n image._already_set = _nfa\r\n image.image_key = (rlo, clo, count)\r\n\r\n def prepare(self):\r\n self._merge_range = None\r\n self._top_left = self.image._top_left\r\n self._already_set = self.image._already_set\r\n #self.image_key = self.image.image_key\r\n self.image_ref_map = {}\r\n self.image_copy_map = {}\r\n\r\n def merge_cell(self, rdrowx, rdcolx, wtrowx, wtcolx):\r\n rdcoords2d = (rdrowx, rdcolx)\r\n if rdcoords2d == self._top_left:\r\n if self._merge_range:\r\n self.add_image_copy(self._merge_range)\r\n self._merge_range = (wtrowx, wtrowx, wtcolx, wtcolx)\r\n else:\r\n _top_left = self._already_set.get(rdcoords2d)\r\n if _top_left and self._merge_range:\r\n rlo, rhi, clo, chi = self._merge_range\r\n self._merge_range = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n\r\n def merge_mcell(self, rdrowx, rdcolx, wtrowx, wtcolx, wt_top):\r\n rdcoords2d = (rdrowx, rdcolx)\r\n if rdcoords2d == self._top_left:\r\n if self._merge_range:\r\n rlo, rhi, clo, chi = self._merge_range\r\n self._merge_range = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n else:\r\n _top_left = self._already_set.get(rdcoords2d)\r\n if _top_left and self._merge_range:\r\n rlo, rhi, clo, chi = self._merge_range\r\n self._merge_range = (rlo, max(rhi, wtrowx), clo, max(chi, wtcolx))\r\n\r\n def set_image_ref(self, image_ref, key):\r\n if image_ref:\r\n self.image_ref_map[key] = image_ref\r\n\r\n def add_image_copy(self, crange):\r\n rlo, rhi, clo, chi = crange\r\n image = deepcopy(self.image)\r\n _from = image.anchor._from\r\n _to = image.anchor.to\r\n _from.row = rlo - 1\r\n _from.col = clo - 1\r\n _to.row = rhi - 1\r\n _to.col = chi - 1\r\n self.image_copy_map[(rlo,clo)] = image\r\n\r\n def finish(self):\r\n if self._merge_range:\r\n self.add_image_copy(self._merge_range)\r\n self._merge_range = None\r\n for key,image in self.image_copy_map.items():\r\n ref = self.image_ref_map.get(key)\r\n if ref:\r\n image.ref = ref\r\n self.image_list.add_image(image)\r\n self.image_copy_map.clear()\r\n self.image_ref_map.clear()\r\n\r\nclass ImageList(Merge):\r\n\r\n @classmethod\r\n def get_sheet_maps(cls, rdsheet):\r\n image_dict = defaultdict(int)\r\n for image in rdsheet._images:\r\n Image.get_sheet_maps(rdsheet, image, image_dict)\r\n\r\n def prepare(self):\r\n self.image_map = {}\r\n self.image_rhi = 0\r\n for image in self.rdsheet._images:\r\n self.image_map[image.image_key] = Image(image, self)\r\n rhi, chi = image._bottom_right\r\n self.image_rhi = max(self.image_rhi, rhi)\r\n self.images = []\r\n\r\n def set_image_ref(self, image_ref, image_key):\r\n rowx, colx, image_key = image_key\r\n image = self.image_map.get(image_key)\r\n\r\n if not image:\r\n return\r\n image.set_image_ref(image_ref, (rowx, colx))\r\n\r\n def add_image(self, image):\r\n self.images.append(image)\r\n\r\n def merge_cell(self, rdrowx, rdcolx, wtrowx, wtcolx):\r\n for img in self.image_map.values():\r\n img.merge_cell(rdrowx, rdcolx, wtrowx, wtcolx)\r\n\r\n def merge_mcell(self, rdrowx, rdcolx, wtrowx, wtcolx, wt_top):\r\n for img in self.image_map.values():\r\n img.merge_mcell(rdrowx, rdcolx, wtrowx, wtcolx, wt_top)\r\n\r\n def fix(self):\r\n wtsheet_rhi = self.wtsheet.max_row\r\n wtsheet_chi = self.wtsheet.max_column\r\n rdsheet_rhi = self.rdsheet.max_row\r\n for row in range(self.image_rhi - rdsheet_rhi):\r\n wtrowx = wtsheet_rhi + row + 1\r\n rdrowx = rdsheet_rhi + row + 1\r\n for colx in range(1, wtsheet_chi+1):\r\n self.merge_cell(rdrowx, colx, wtrowx, colx)\r\n\r\n def finish(self):\r\n self.fix()\r\n for img in self.image_map.values():\r\n img.finish()\r\n self.wtsheet._images = self.images\r\n\r\nclass Merger(object):\r\n\r\n def __init__(self, rdsheet, wtsheet):\r\n mc = MergeCell(rdsheet, wtsheet)\r\n dv = DataValidation(rdsheet, wtsheet)\r\n image_list = ImageList(rdsheet, wtsheet)\r\n self.image_list = image_list\r\n self.mergers = [mc, dv, image_list]\r\n\r\n @classmethod\r\n def get_sheet_maps(cls, rdsheet):\r\n MergeCell.get_sheet_maps(rdsheet)\r\n DataValidation.get_sheet_maps(rdsheet)\r\n ImageList.get_sheet_maps(rdsheet)\r\n\r\n def merge_cell(self, rdrowx, rdcolx, wtrowx, wtcolx):\r\n for merger in self.mergers:\r\n merger.merge_cell(rdrowx, rdcolx, wtrowx, wtcolx)\r\n\r\n def merge_mcell(self, rdrowx, rdcolx, wtrowx, wtcolx, wt_top):\r\n for merger in self.mergers:\r\n merger.merge_mcell(rdrowx, rdcolx, wtrowx, wtcolx, wt_top)\r\n\r\n def set_image_ref(self, image_ref, image_key):\r\n self.image_list.set_image_ref(image_ref, image_key)\r\n\r\n def finish(self):\r\n for merger in self.mergers:\r\n merger.finish()","sub_path":"xltpl/merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":10851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181721967","text":"from pico2d import *\nfrom random import *\nKPU_WIDTH, KPU_HEIGHT = 800, 600\n\ndef hand_events():\n global running\n global dir\n global x, y\n global m_x, m_y\n events = get_events()\n if m_x == x and y == m_y:\n m_x = randint(0, KPU_WIDTH)\n m_y = randint(1, KPU_HEIGHT)\n\n if x < m_x or m_x < x:\n if x < m_x:\n dir = 100\n x += 1\n if y < m_y:\n y += 1\n else:\n y -= 1\n else:\n dir = 1\n x -= 1\n if y < m_y: y += 1\n else: y -= 1\n\n if y < m_y or y > m_y:\n\n if y < m_y: y += 1\n else: y -= 1\n\n if x < m_x or x > m_x:\n if x < m_x:\n dir = 100\n x += 1\n else:\n dir = 1\n x -= 1\n for event in events:\n if event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n running = False\n\n pass\n\n\nopen_canvas(KPU_WIDTH, KPU_HEIGHT)\n\nhand = load_image('hand_arrow.png')\nkpu_ground = load_image('KPU_GROUND.png')\ncharacter = load_image('animation_sheet.png')\n\nrunning = True\n\nx, y = KPU_WIDTH // 2, KPU_HEIGHT // 2\nframe = 0\ndir = 0\nhide_cursor()\nm_x = randint(0, KPU_WIDTH)\nm_y = randint(0, KPU_HEIGHT)\nhand.draw(m_x, m_y)\nevents = get_events()\n\nwhile running:\n clear_canvas()\n kpu_ground.draw(KPU_WIDTH // 2, KPU_HEIGHT // 2)\n character.clip_draw(frame * 100, dir, 100, 100, x, y)\n hand.draw(m_x, m_y)\n update_canvas()\n frame = (frame + 1) % 8\n running_events()\n get_events()\n delay(0.0001)\n\n\nclose_canvas()\n","sub_path":"DRILL07/hand_arrow.py","file_name":"hand_arrow.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49499887","text":"# Post_title_scorer class scores each chunk of text from the html \n# based on the patterns of a post-title\n# returns the total score of the chunk\n\nfrom difflib import SequenceMatcher\nfrom urllib.parse import urlsplit\nimport re\n\nclass post_title_scorer:\n\n def __init__(self, response, text_node, text, node, node_depth):\n self.response = response\n self.text_node = text_node\n self.text = text\n self.node = node\n self.node_depth = node_depth\n \n def total_score(self):\n\n # checking if the parent node is h1\n h1_score = 1 if self.node == 'h1' else 0\n\n\n # checking if the attributes of parent node contains 'title' or any simillar words\n attributes = ''.join(self.text_node.xpath('@*').extract())\n title_words = ['title', 'headline', 'caption']\n flag = 0\n for title_word in title_words:\n if attributes.find(title_word) != -1:\n flag = 1\n attr_score = 1 if flag == 1 else 0\n\n\n # matching simillarity with text between title tags\n title_text = self.response.xpath('//title//text()').extract_first()\n title_score = SequenceMatcher(None, self.text, title_text).ratio()\n\n\n # matching simillarity with texts in resource path of url\n path = urlsplit(self.response.url).path\n path_text = re.sub(r'-|/',' ', path)\n path_score = SequenceMatcher(None, self.text, path_text).ratio()\n\n\n # checking the depth of node in html tree\n pos_score = 1 - self.node_depth\n\n\n # checking if the text ends with full stop(.)\n if self.text:\n punc_score = 0 if self.text[-1] == '.' else 1\n else:\n punc_score = 0\n\n\n # checking the character length\n len_score = 1 if len(self.text) <= 200 else 0\n\n\n\n patterns = [h1_score, attr_score, title_score, path_score, pos_score, punc_score, len_score]\n weights = [3, 3, 2, 2, 1, 1, 1]\n\n # calculating total score of each text\n total_score = 0\n\n for i in range(len(patterns)):\n total_score += patterns[i] * weights[i]\n\n return total_score\n","sub_path":"WebContent/anol_project/crawlermachines/crawlermachine7/metadata/post_title.py","file_name":"post_title.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297579241","text":"### This program intends to mark certain journal to GSHF;\n### Author: Ye Gao\n### Date: 2017-11-1\n\n\nimport os\n\n\n\nfile = open('RootPath.dat', 'r')\nroot = (file.read()).replace(\"\\n\", \"\") # read root path from RootPath.dat; \nfile.close()\n\n\npath = root.replace('root', '') + 'root.html'\nfile = open(path, 'r')\nJournalName = 'edu.hk'\nweb = (file.read()).replace(JournalName + \"

\", JournalName + \" dl.acm.org\") \nfile.close()\n\nfile = open(path,'wb')\nfile.write(web)\nfile.close()\n\n\n","sub_path":"markjournal.py","file_name":"markjournal.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36815527","text":"import time\n\n\nclass Anchor(set):\n \"\"\"An anchor is where a new word can be placed; has a set of allowable letters.\"\"\"\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\nLETTERS = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\nANY = Anchor(LETTERS) # The anchor that can be any letter\n\nPOINTS = dict(A=1, B=3, C=3, D=2, E=1, F=4, G=2, H=4, I=1, J=8, K=5, L=1, M=3, N=1, O=1, P=3, Q=10, R=1, S=1,\n T=1, U=1, V=4, W=4, X=8, Y=4, Z=10, _=0)\n\nHAND_PREFIXES_CACHE = dict()\n\nACROSS, DOWN = (1, 0), (0, 1) # Directions that words can go\n\nNOPLAY = None\n\n\ndef bonus_template(quadrant):\n \"\"\"Make a board from the upper-left quadrant.\"\"\"\n return mirror(map(mirror, quadrant.split()))\n\n\ndef mirror(sequence):\n return sequence + sequence[-2:: -1]\n\n\nSCRABBLE = bonus_template(\"\"\"\n|||||||||\n|3..:...3\n|.2...;..\n|..2...:.\n|:..2...:\n|....2...\n|.;...;..\n|..:...:.\n|3..:...*\n\"\"\")\n\nWWF = bonus_template(\"\"\"\n|||||||||\n|...3..;.\n|..:..2..\n|.:..:...\n|3..;...2\n|..:...:.\n|.2...3..\n|;...:...\n|...:...*\n\"\"\")\n\nBONUS = WWF\n\nDW, TW, DL, TL = '23:;'\n\n\ndef show(board):\n \"\"\"Print the board and the BONUS[j][i] entries if no letter in the board.\"\"\"\n for (row_index, row) in enumerate(board):\n squares = []\n for (col_index, square) in enumerate(row):\n if is_letter(square):\n squares.append(square)\n else:\n squares.append(BONUS[row_index][col_index])\n print(\" \".join(squares))\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef readwordlist(filename):\n \"\"\"Read the words from a file and return a set of the words and a set of the prefixes.\"\"\"\n with open(filename) as file:\n text = file.read().upper()\n words = text.split()\n wordset = set()\n prefixset = set()\n for word in words:\n wordset.add(word)\n prefixset.update(set(prefixes(word)))\n return wordset, prefixset\n\n\ndef prefixes(word):\n \"\"\"A list of the initial sequences of a word, not including the complete word.\"\"\"\n return [word[:i] for i in range(len(word))]\n\n\nWORDS, PREFIXES = readwordlist('words4k.txt')\n\n\ndef find_words(letters, prefix='', results=None):\n \"\"\"Return all words in the dictionary WORDS that can be constructed from the specified letters.\n\n Note: This function ignores the game board\n \"\"\"\n if results is None:\n results = set()\n if prefix in WORDS:\n results.add(prefix)\n if prefix not in PREFIXES:\n return\n for L in letters:\n find_words(letters.replace(L, '', 1), prefix + L, results)\n return results\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef best_play(hand, board):\n \"\"\"Return the highest-scoring play, or None.\"\"\"\n plays = all_plays(hand, board)\n if not plays:\n return NOPLAY\n return sorted(plays, reverse=True)[0]\n\n\ndef make_play(play, board):\n \"\"\"Put the word down on the board.\"\"\"\n (score, (i, j), (di, dj), word) = play\n for letter in word:\n board[j][i] = letter\n i += di\n j += dj\n return board\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef all_plays(hand, board):\n \"\"\"Find all word plays in both directions.\n\n A play is a (score, pos, dir, word) tuple, where pos is an (i, j) pair, and dir is ACROSS or DOWN.\n \"\"\"\n hplays = horizontal_plays(hand, board) # set of ((i, j), word)\n vplays = horizontal_plays(hand, transpose(board)) # set of ((j, i), word)\n results = set()\n for (score, position, word) in hplays:\n results.add((score, position, ACROSS, word))\n for (score, position, word) in vplays:\n results.add((score, (position[1], position[0]), DOWN, word))\n return results\n\n\ndef transpose(matrix):\n \"\"\"Transpose the board.\n\n e.g. [[1,2,3], [4,5,6]] is transposed to [[1, 4], [2, 5], [3, 6]]\n \"\"\"\n # or [[M[j][i] for j in range(len(M))] for i in range(len(M[0]))]\n return map(list, zip(*matrix))\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef calculate_score(board, pos, direction, hand, word):\n \"\"\"Return the total score for this play.\"\"\"\n total, cross_total, word_multiplier = 0, 0, 1\n starti, startj = pos\n di, dj = direction\n other_direction = DOWN if direction == ACROSS else ACROSS\n for (n, L) in enumerate(word):\n i, j = starti + n * di, startj + n * dj\n sq = board[j][i]\n b = BONUS[j][i]\n word_multiplier *= (1 if is_letter(sq) else\n 3 if b == TW else 2 if b in (DW, '*') else 1)\n letter_mult = (1 if is_letter(sq) else\n 3 if b == TL else 2 if b == DL else 1)\n total += POINTS[L] * letter_mult\n if isinstance(sq, set) and sq is not ANY and direction is not DOWN:\n cross_total += cross_word_score(board, L, (i, j), other_direction)\n return cross_total + word_multiplier * total\n\n\ndef cross_word_score(board, L, pos, direction):\n \"\"\"Return the score of a word made in the other direction from the main word.\"\"\"\n i, j = pos\n (j2, word) = find_cross_word(board, i, j)\n return calculate_score(board, (i, j2), DOWN, L, word.replace('.', L))\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef horizontal_plays(hand, board):\n \"\"\"Find all horizontal plays -- (score, (i, j), word) triplets -- across all rows.\"\"\"\n results = set()\n for (j, row) in enumerate(board[1:-1], 1):\n set_anchors(row, j, board)\n for (i, word) in row_plays(hand, row):\n score = calculate_score(board, (i, j), ACROSS, hand, word)\n results.add((score, (i, j), word))\n return results\n\n\ndef set_anchors(row, j, board):\n \"\"\"Update the board with available anchor squares for horizontal plays in row j.\n\n Anchors are empty squares with a neighboring letter. Some are restricted\n by cross-words to be only a subset of letters in the alphabet.\n \"\"\"\n for (i, sq) in enumerate(row[1: -1], start=1):\n neighbor_list = (N, S, E, W) = neighbors(board, i, j)\n # Anchors are squares adjacent to a letter. Plus the '*' square.\n if sq == '*' or (is_empty(sq) and any(map(is_letter, neighbor_list))):\n if is_letter(N) or is_letter(S):\n # Find letters that fit with the cross (vertical) word\n (j2, w) = find_cross_word(board, i, j)\n row[i] = Anchor(L for L in LETTERS if w.replace('.', L) in WORDS)\n # Unrestricted empty square -- any letter will fit.\n else:\n row[i] = ANY\n\n\ndef neighbors(board, i, j):\n \"\"\"Return a list of the contents of the four neighboring squares, in the order N,S,E,W.\"\"\"\n return [board[j-1][i], board[j+1][i], board[j][i+1], board[j][i-1]]\n\n\ndef find_cross_word(board, i, j):\n \"\"\"Find the vertical word that crosses board[j][i].\n\n Return (j2, w), where j2 is the starting row, and w is the word\n \"\"\"\n sq = board[j][i]\n w = sq if is_letter(sq) else '.'\n for j2 in range(j, 0, -1):\n sq2 = board[j2 - 1][i]\n if is_letter(sq2):\n w = sq2 + w\n else:\n break\n for j3 in range(j + 1, len(board)):\n sq3 = board[j3][i]\n if is_letter(sq3):\n w = w + sq3\n else:\n break\n return j2, w\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef row_plays(hand, row):\n \"\"\"Return the set of legal plays in the specified row.\n\n A row play is a (start, 'WORD') pair,\n \"\"\"\n results = set()\n # for each anchor and for each legal prefix, add all legal suffixes and save any valid words in results\n for (i, square) in enumerate(row[1: -1], start=1):\n if isinstance(square, Anchor):\n prefix, max_size = legal_prefix(i, row)\n # there are already letters in the board, to the left of this anchor\n if prefix:\n start = i - len(prefix)\n add_suffixes(hand, prefix, start, row, results, anchored=False)\n # the board is empty to the left of this anchor\n else:\n for prefix in find_prefixes(hand):\n if len(prefix) <= max_size:\n start = i - len(prefix)\n add_suffixes(removed(hand, prefix), prefix, start, row, results, anchored=False)\n return results\n\n\ndef legal_prefix(i, row):\n \"\"\"A legal prefix of an anchor at row[i] is either a string of letters already on the board,\n or new letters that fit into an empty space.\n\n Return the tuple (prefix_on_board, maxsize) to indicate this.\n \"\"\"\n start = i\n while is_letter(row[start - 1]):\n start -= 1\n # There is a prefix consisting of letters from the board\n if start < i:\n return ''.join(row[start: i]), i - start\n while is_empty(row[start - 1]) and not isinstance(row[start - 1], set):\n start -= 1\n return '', i - start\n\n\ndef is_letter(sq):\n return isinstance(sq, str) and sq in LETTERS\n\n\ndef is_empty(sq):\n \"\"\"Is this an empty square (no letters, but a valid position on board; excludes the border).\"\"\"\n return sq == '.' or sq == '*' or isinstance(sq, set)\n\n\ndef add_suffixes(hand, pre, start, row, results, anchored=True):\n \"\"\"Add all possible suffixes, and accumulate (start, word) pairs in results.\"\"\"\n i = start + len(pre)\n if pre in WORDS and anchored and not is_letter(row[i]):\n results.add((start, pre))\n if pre in PREFIXES:\n sq = row[i]\n if is_letter(sq):\n add_suffixes(hand, pre + sq, start, row, results)\n elif is_empty(sq):\n possibilities = sq if isinstance(sq, set) else ANY\n for L in hand:\n if L in possibilities:\n add_suffixes(hand.replace(L, '', 1), pre + L, start, row, results)\n return results\n\n\n# -----------------------------------------------------------------------------------------------------------------\n\n\ndef longest_words(hand, board_letters):\n \"\"\"Return all word plays, longest first.\"\"\"\n words = word_plays(hand, board_letters)\n return sorted(words, key=len, reverse=True)\n\n\ndef topn(hand, board_letters, n=10):\n \"\"\"Return a list of the top n words that hand can play, sorted by word score.\"\"\"\n words = word_plays(hand, board_letters)\n return sorted(words, key=word_score, reverse=True)[: n]\n\n\ndef word_score(word):\n \"\"\"The sum of the individual letter point scores for this word.\"\"\"\n return sum(POINTS[letter] for letter in word)\n\n\ndef word_plays(hand, board_letters):\n \"\"\"Find all word plays from hand that can be made to abut with a letter on board.\n\n Find prefix + L + suffix; L from board_letters, rest from hand\n \"\"\"\n results = set()\n for prefix in find_prefixes(hand, '', set()):\n for L in board_letters:\n add_suffixes(removed(hand, prefix), prefix + L, results)\n return results\n\n\ndef removed(letters, remove):\n \"\"\"Return a string of letters, but with each letter in remove removed once.\"\"\"\n for L in remove:\n letters = letters.replace(L, '', 1)\n return letters\n\n\ndef find_prefixes(hand, prefix='', results=None):\n \"\"\"Find all prefixes (of words) that can be made from letters in hand.\n\n The set of prefixes from the last hand is cached to speed-up searching of valid word plays\n across the set of anchor positions in the board.\n \"\"\"\n global HAND_PREFIXES_CACHE\n\n if prefix == '' and (hand in HAND_PREFIXES_CACHE):\n return HAND_PREFIXES_CACHE[hand]\n if prefix in PREFIXES:\n results.add(prefix)\n for L in hand:\n find_prefixes(hand.replace(L, '', 1), prefix + L, results)\n if prefix == '':\n HAND_PREFIXES_CACHE = {hand: results}\n return results\n\n\ndef add_suffixes(hand, prefix, results):\n \"\"\"Return the set of words that can be formed by extending pre with letters in hand.\"\"\"\n if prefix in WORDS:\n results.add(prefix)\n if prefix not in PREFIXES:\n return\n for L in hand:\n add_suffixes(hand.replace(L, '', 1), prefix + L, results)\n return results\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/Scrabble.py","file_name":"Scrabble.py","file_ext":"py","file_size_in_byte":12435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475056448","text":"textData = \"0btw50.txt\";\r\n\r\nwith open(textData) as f:\r\n lines = f.readlines()\r\n\r\n\r\nchar = ['?', ',', '!','amp;', '-','...','\\\\n', '\\\\',' ', '.']\r\nfor index in range(0,len(lines)):\r\n for i in range(0, 9):\r\n lines[index] = lines[index].replace(char[i],' ')\r\n\r\n'''\r\nfor i in range(0,len(lines)):\r\n lines[i].append(':')\r\n'''\r\n\r\nfile = open(\"new.txt\", \"w\")\r\nfor i in range(0, len(lines)):\r\n file.write(lines[i])\r\nfile.close()","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23835710","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\n\nheaders = ['Column1', 'Column2', 'Column3', 'Column4', 'Column5', 'Column6', 'Column7','Column8', 'Column9', 'Column10', 'Column11', 'Column12', 'Column13', 'Column14','Column15', 'Column16', 'Column17', 'Column18', 'Column19', 'Column20', 'Column21','Column22', 'Column23', 'Column24', 'Column25', 'Column26', 'Column27', 'Column28','Column29', 'Column30', 'Column31', 'Column32', 'Column33', 'Column34', 'Class']\ndata = pd.read_csv(\"Ionosphere.csv\", header=None, names=headers)\n\nx = data['Column1', 'Column2', 'Column3', 'Column4', 'Column5', 'Column6', 'Column7','Column8', 'Column9', 'Column10', 'Column11', 'Column12', 'Column13', 'Column14','Column15', 'Column16', 'Column17', 'Column18', 'Column19', 'Column20', 'Column21','Column22', 'Column23', 'Column24', 'Column25', 'Column26', 'Column27', 'Column28','Column29', 'Column30', 'Column31', 'Column32', 'Column33', 'Column34']\nx=np.array(x)\nx[:5]\n\none_hot_encoder = OneHotEncoder(sparse=False)\ny = data.Class\ny = one_hot_encoder.fit_transform(np.array(y).reshape(-1, 1))\ny[:5]\nMAX_ITER = 20\n\ndef sigmoid_util(x):\n return 1 / (1 + np.exp(-x))\n\ndef calc_util(x):\n return np.multiply(x, 1-x)\n\ndef randomWeightInitialize(nodes):\n layers, weights = len(nodes), []\n for i in range(1, layers):\n w = [[np.random.uniform(-1, 1) for k in range(nodes[i-1] + 1)] for j in range(nodes[i])]\n weights.append(np.matrix(w))\n return weights\n\ndef forwardPropagation(x, weights, layers):\n activation, layerInput = [x], x\n for j in range(layers):\n outputVal = sigmoid_util(np.dot(layerInput, weights[j].T))\n activation.append(outputVal)\n layerInput = np.append(1, outputVal)\n return activation\n\n\ndef backPropagation(y, activation, weights, layers):\n outputFinal = activation[-1]\n error = np.matrix(y - outputFinal)\n for j in range(layers, 0, -1):\n currr = activation[j]\n if(j > 1):\n prev = np.append(1, activation[j-1])\n else:\n prev = activation[0]\n delta = np.multiply(error, calc_util(currr))\n weights[j-1] += lr * np.multiply(delta.T, prev)\n w = np.delete(weights[j-1], [0], axis=1) \n error = np.dot(delta, w) \n return weights\n\ndef adjustWeights(X, Y, lr, weights):\n layers = len(weights)\n for i in range(len(X)):\n x, y = X[i], Y[i]\n x = np.matrix(np.append(1, x)) \n activations = forwardPropagation(x, weights, layers)\n weights = backPropagation(y, activations, weights, layers)\n return weights\n\ndef MLP(X_train, Y_train, epochs=20, nodes=[], lr=0.13):\n hidden_layers = len(nodes) - 1\n weights = randomWeightInitialize(nodes)\n for epoch in range(1, epochs+1):\n weights = adjustWeights(X_train, Y_train, lr, weights)\n if(epoch %20 == 0):\n x=1\n return weights\n\ndef predictClass(item, weights):\n layers = len(weights)\n item = np.append(1, item)\n activation = forwardPropagation(item, weights, layers)\n outputFinal = activation[-1].A1\n index = findMaxActivation(outputFinal)\n y = [0 for i in range(len(outputFinal))]\n y[index] = 1 \n return y \n\ndef findMaxActivation(output):\n m, index = output[0], 0\n for i in range(1, len(output)):\n if(output[i] > m):\n m, index = output[i], i\n return index\n\ndef calcAccuracy(X, Y, weights):\n correct = 0\n for i in range(len(X)):\n x, y = X[i], list(Y[i])\n guess = predictClass(x, weights)\n if(y == guess):\n correct += 1\n return correct / len(X)\n\ninput_layer = len(x[0]) \noutput_layer = len(y[0]) \n\nlayers = [input_layer, 4, output_layer]\nlr, epochs = 0.13, 100\n\nresults=[]\nfor i in range(MAX_ITER):\n x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.7,shuffle=True)\n weights = MLP(x_train, y_train, epochs=epochs, nodes=layers, lr=lr);\n results.append(calcAccuracy(x_test, y_test, weights))\n\nprint(\"Results \\n\" + str(results))\nprint(\"\\n\")\nprint(sum(results)/20*100)","sub_path":"mlp_ionosphere.py","file_name":"mlp_ionosphere.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"388205932","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/martijndevos/Documents/anydex-core/pyipv8/ipv8/peerdiscovery/churn.py\n# Compiled at: 2019-05-16 09:27:10\nfrom __future__ import absolute_import\nfrom random import sample\nfrom time import time\nfrom .discovery import DiscoveryStrategy\n\nclass RandomChurn(DiscoveryStrategy):\n \"\"\"\n Select random peers, ping them if inactive, remove them if unresponsive.\n \"\"\"\n\n def __init__(self, overlay, sample_size=8, ping_interval=10.0, inactive_time=27.5, drop_time=57.5):\n \"\"\"\n Random peer removal strategy.\n\n :param overlay: the overlay to sample peers from\n :param sample_size: the amount of peers to check at once\n :param ping_interval: time between pings in the range of inactive_time to drop_time\n :param inactive_time: time before pings are sent to check liveness\n :param drop_time: time after which a peer is dropped\n \"\"\"\n super(RandomChurn, self).__init__(overlay)\n self._pinged = {}\n self.sample_size = sample_size\n self.ping_interval = ping_interval\n self.inactive_time = inactive_time\n self.drop_time = drop_time\n\n def should_drop(self, peer):\n \"\"\"\n Have we passed the time before we consider this peer to be unreachable.\n \"\"\"\n if peer.last_response == 0:\n return False\n return time() > peer.last_response + self.drop_time\n\n def is_inactive(self, peer):\n \"\"\"\n Have we passed the time before we consider this peer to be inactive.\n \"\"\"\n if peer.last_response == 0:\n return False\n return time() > peer.last_response + self.inactive_time\n\n def take_step(self):\n \"\"\"\n Select a new (set of) peer(s) to investigate liveness for.\n \"\"\"\n with self.walk_lock:\n sample_size = min(len(self.overlay.network.verified_peers), self.sample_size)\n if sample_size:\n window = sample(self.overlay.network.verified_peers, sample_size)\n for peer in window:\n if self.should_drop(peer) and peer.address in self._pinged:\n self.overlay.network.remove_peer(peer)\n del self._pinged[peer.address]\n elif self.is_inactive(peer):\n if peer.address in self._pinged and time() > self._pinged[peer.address] + self.ping_interval:\n del self._pinged[peer.address]\n if peer.address not in self._pinged:\n self._pinged[peer.address] = time()\n packet = self.overlay.create_ping()\n self.overlay.endpoint.send(peer.address, packet)\n\n\nclass PingChurn(DiscoveryStrategy):\n\n def __init__(self, overlay, ping_interval=25):\n super(PingChurn, self).__init__(overlay)\n self.ping_interval = ping_interval\n\n def take_step(self):\n with self.walk_lock:\n self.overlay.routing_table.remove_bad_nodes()\n pinged = []\n now = time()\n for bucket in self.overlay.routing_table.trie.values():\n for node in bucket.nodes.values():\n if node.last_response + self.ping_interval <= now:\n self.overlay.ping(node).addErrback(lambda _: None)\n pinged.append(node)\n\n return pinged","sub_path":"pycfiles/anydex-0.1.0-py3-none-any/churn.py","file_name":"churn.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414524600","text":"import torch\nimport torchvision\nfrom PIL import Image\nimport helper_code.transforms as T\nimport cv2 as cv\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nimport time\nfrom math import floor\nimport pickle\n\n\ndef center_of_box(box):\n return (box[0] + box[2]) // 2, (box[1] + box[3]) // 2\n\n\ndef floor_lists(boxes):\n result = []\n for box in boxes:\n result.append(list(map(lambda x: floor(float(x)), box)))\n return result\n\n\ndef calc_area(box):\n return (box[2]-box[0])*(box[3]-box[1])\n\n\n\ndef adjusted_iou(box1, box2):\n iou_b = [max(box1[0], box2[0]),\n max(box1[1], box2[1]),\n min(box1[2], box2[2]),\n min(box1[3], box2[3])]\n if iou_b[2] min_score:\n add_im = True\n\n # setup while loop\n # deleting images in loop from array doesn't allow for a forloop\n j = 0\n run = True\n if len(good_boxes) == 0:\n run = False\n\n while run:\n if adjusted_iou(box, good_boxes[j]) > 0.8:\n if calc_area(box) < calc_area(good_boxes[j]):\n del good_boxes[j]\n del co[j]\n j -= 1\n else:\n add_im = False\n break\n j += 1\n if len(good_boxes) == j:\n run = False\n\n if add_im:\n good_boxes.append(box)\n co.append(center_of_box(box))\n return good_boxes, co\n\n\nclass Detector:\n \"\"\"\n Custom class for implementing self trained model.\n \"\"\"\n\n def __init__(self, path_model=\"./data/model/training_49_full.pt\"):\n # define device (gpu/cpu)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Evaluate on GPU.') if torch.cuda.is_available() else print('No GPU available, evaluating on CPU.')\n\n # loading model from dict\n self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn()\n in_features = self.model.roi_heads.box_predictor.cls_score.in_features\n self.model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 3)\n self.model.load_state_dict(torch.load(path_model))\n self.model.to(self.device)\n\n # OLD (can produce multiple errors!)\n # self.model = torch.load(path_model)\n\n # evaluation mode\n self.model.eval()\n torch.no_grad()\n\n # define transformation\n self.transform = T.Compose([T.ToTensor()])\n\n def detect_both_frames(self, left_frame, right_frame, min_score_h=0.9, min_score_m=0.9):\n # convert images to right format\n img_L = Image.fromarray(left_frame).convert(\"RGB\")\n img_R = Image.fromarray(right_frame).convert(\"RGB\")\n img_L, _ = self.transform(img_L, dict())\n img_R, _ = self.transform(img_R, dict())\n\n # imgs on gpu if available\n imgs = [img_L.to(self.device), img_R.to(self.device)]\n\n # evaluate images\n torch.cuda.synchronize()\n outputs = self.model(imgs) # this takes the most time\n\n # extracting from\n output_L, output_R = [{k: v.to(torch.device(\"cpu\")) for k, v in t.items()} for t in outputs]\n boxes_L, scores_L, labels_L = (floor_lists(output_L[\"boxes\"].tolist()),\n output_L[\"scores\"].tolist(),\n output_L[\"labels\"].tolist())\n\n boxes_R, scores_R, labels_R = (floor_lists(output_R[\"boxes\"].tolist()),\n output_R[\"scores\"].tolist(),\n output_R[\"labels\"].tolist())\n\n # split heads and masks\n boxes_L_h, scores_L_h, boxes_L_m, scores_L_m = split_arrays(boxes_L, scores_L, labels_L)\n boxes_R_h, scores_R_h, boxes_R_m, scores_R_m = split_arrays(boxes_R, scores_R, labels_R)\n\n # filter\n g_boxes_L_h, co_L_h = main_filter(boxes_L_h, scores_L_h, min_score_h)\n g_boxes_L_m, co_L_m = main_filter(boxes_L_m, scores_L_m, min_score_m)\n\n g_boxes_R_h, co_R_h = main_filter(boxes_R_h, scores_R_h, min_score_h)\n g_boxes_R_m, co_R_m = main_filter(boxes_R_m, scores_R_m, min_score_m)\n\n\n return [(co_L_h, co_R_h, g_boxes_L_h, g_boxes_R_h),\n (co_L_m, co_R_m, g_boxes_L_m, g_boxes_R_m)]\n\n\nif __name__ == \"__main__\":\n print(\"Extracting data from video and saving it.\")\n cap_1 = cv.VideoCapture('./data/videos/output_more_person_1.avi')\n cap_2 = cv.VideoCapture('./data/videos/output_more_person_0.avi')\n\n detector = Detector()\n\n all_info = []\n total_frames = int(cap_1.get(cv.CAP_PROP_FRAME_COUNT))\n print(\"Frames:\", total_frames)\n print(\"Est. time in min:\", total_frames*0.57/60)\n\n start = time.time()\n i = 0\n while cap_1.isOpened():\n ret_1, frame_1 = cap_1.read()\n if not ret_1:\n print(\"failed to grab frame_1\")\n break\n\n ret_2, frame_2 = cap_2.read()\n if not ret_2:\n print(\"failed to grab frame_2\")\n break\n # frame_1, frame_2 = cv.resize(frame_1, (480, 270)), cv.resize(frame_2, (480, 270))\n data = detector.detect_both_frames(frame_1, frame_2)\n boxes_h = data[0][2]\n boxes_m = data[1][2]\n\n all_info.append(detector.detect_both_frames(frame_1, frame_2))\n i += 1\n if i%10 == 0:\n print(i/total_frames*100)\n end = time.time()\n print(\"Total time:\", end-start)\n print(\"Total frames:\", len(all_info))\n print(\"Time/frame:\", (end-start)/len(all_info))\n\n print(all_info)\n pickle_out = open(\"data/video_data/more_person_new.pckl\", \"wb\")\n pickle.dump(all_info, pickle_out)\n pickle_out.close()\n\n cap_1.release()\n cap_2.release()","sub_path":"Method_Intersection/Detector_new.py","file_name":"Detector_new.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373395681","text":"from tkinter import *\r\n\r\nOPTIONS = [\r\n \"Hard\",\r\n \"Medium\",\r\n \"Easy\",\r\n ]\r\n\r\nbgColor = [\"#0A1B2E\", \"#77A0A9\", \"#6F7D8C\"] # Blue shades\r\ndifficultyColor = [\"#FF4242\", \"#FFC857\", \"#4DAA57\"] # Red, Yellow, Green\r\n\r\nroot = Tk()\r\nroot.title(\"Task Destroyer 9001\")\r\nroot.geometry(\"1100x500\")\r\n\r\n# Instantiate an empty master task list\r\ntask_items = []\r\ntask_descriptions = []\r\ntask_priorities = []\r\n\r\n# String Variable for difficulty dropdown\r\nv = StringVar(root)\r\nv.set(OPTIONS[0])\r\n\r\n\r\ndef addItem():\r\n input_text = taskEntry.get() # Getting the text from the text box input\r\n input_description = taskBody.get(\"1.0\", 'end')\r\n input_priority = v.get()\r\n task_items.append(input_text) # Appends new item to master task list\r\n task_descriptions.append(input_description.rstrip(\"\\n\"))\r\n task_priorities.append(input_priority)\r\n taskBox.insert('end', input_text[0:20].upper() + \"... //Priority: \"\r\n + input_priority) # Insert the text variable in the listbox\r\n taskEntry.delete(0, 'end') # Destroys text inside taskEntry\r\n taskBody.delete(\"1.0\", 'end')\r\n\r\n print(\"\\nitem added\")\r\n print(\"Current items in task_items:\", task_items)\r\n print(\"Current items in task_descriptions:\", task_descriptions)\r\n print(\"Current items in task_priorities:\", task_priorities)\r\n\r\n\r\ndef deleteItem():\r\n selectedItem = taskBox.curselection()\r\n taskBox.delete(selectedItem)\r\n item_index = ''.join(str(x) for x in selectedItem) # tuple->string\r\n task_items.pop(int(item_index))\r\n task_descriptions.pop(int(item_index))\r\n task_priorities.pop(int(item_index))\r\n\r\n print(\"\\nitem deleted\", selectedItem)\r\n print(\"Current items in task_items:\", task_items)\r\n print(\"Current items in task_descriptions:\", task_descriptions)\r\n print(\"Current items in task_priorities:\", task_priorities)\r\n\r\n\r\ndef deleteAll():\r\n taskBox.delete(0, len(task_items))\r\n task_items.clear()\r\n task_descriptions.clear()\r\n task_priorities.clear()\r\n\r\n print(\"\\nlist cleared\")\r\n print(\"Current items in task_items:\", task_items)\r\n print(\"Current items in task_descriptions:\", task_descriptions)\r\n print(\"Current items in task_priorities:\", task_priorities)\r\n\r\n\r\ndef taskDetails():\r\n taskDetailsFrame = Frame(root, width=350, height=300, pady=10,\r\n bg=bgColor[2])\r\n taskDetailsFrame.grid(row=0, column=2, sticky=N)\r\n Label(taskDetailsFrame,\r\n text=\"Task Details\",\r\n bg=bgColor[2],\r\n fg=\"white\").grid(row=0, column=0, columnspan=2, sticky=NSEW)\r\n\r\n selectedItem = taskBox.curselection()\r\n item_index = ''.join(str(x) for x in selectedItem)\r\n\r\n Label(taskDetailsFrame,\r\n text=\"Task: \" + task_items[int(item_index)],\r\n bg=bgColor[2],\r\n fg='white').grid(row=1, column=0, columnspan=2, sticky=W)\r\n Label(taskDetailsFrame,\r\n text=\"Priority:\",\r\n bg=bgColor[2],\r\n fg='white').grid(row=2, column=0, sticky=W)\r\n task_priority = Label(taskDetailsFrame,\r\n text=task_priorities[int(item_index)],\r\n bg=bgColor[2], fg='white', width=20)\r\n if task_priorities[int(item_index)] == OPTIONS[0]:\r\n task_priority.config(bg=difficultyColor[0])\r\n elif task_priorities[int(item_index)] == OPTIONS[1]:\r\n task_priority.config(bg=difficultyColor[1])\r\n elif task_priorities[int(item_index)] == OPTIONS[2]:\r\n task_priority.config(bg=difficultyColor[2])\r\n else:\r\n task_priority.config(bg='white')\r\n task_priority.grid(row=2, column=1, sticky=W)\r\n\r\n task_description = Text(taskDetailsFrame, width=25, wrap=WORD, height=3,\r\n bg=bgColor[2], fg='white')\r\n task_description.insert(\"1.0\", str(task_descriptions[int(item_index)]))\r\n task_description.config(state=DISABLED)\r\n task_description.grid(row=3, column=0, columnspan=2)\r\n\r\n Button(taskDetailsFrame, text=\"Close Details Pane\",\r\n command=taskDetailsFrame.destroy).grid(row=10)\r\n\r\n\r\n# Create and Layout Frames\r\nleftFrame = Frame(root, width=350, height=300, pady=10, padx=50, bg=bgColor[0])\r\nrightFrame = Frame(root, width=350, height=300, bg=bgColor[0])\r\n\r\nroot.grid_columnconfigure(1, weight=1)\r\nroot.grid_rowconfigure(0, weight=1)\r\nleftFrame.grid(row=0, column=0, sticky=\"nsew\")\r\nrightFrame.grid(row=0, column=1, sticky=\"nsew\")\r\n\r\n# Create Label Widgets\r\nenterTaskLabel = Label(leftFrame, text=\"Task Name: \", bg=bgColor[0],\r\n fg=\"white\")\r\ntaskBodyLabel = Label(leftFrame, text=\"Task Body: \", bg=bgColor[0], fg=\"white\")\r\npriorityLabel = Label(leftFrame, text=\"Task Priority: \", bg=bgColor[0],\r\n fg=\"white\")\r\nblankLabelPad = Label(leftFrame, text=\"\", width=5, bg=bgColor[0])\r\nblankLabelPad2 = Label(leftFrame, text=\"\", width=5, bg=bgColor[0])\r\nblankLabelPad3 = Label(leftFrame, text=\"\", width=5, bg=bgColor[0])\r\ntaskBoxLabel = Label(rightFrame, text=\"Tasks\", width=39, height=2,\r\n bg=bgColor[2], fg=\"white\")\r\n\r\n# Create Entry Widgets\r\ntaskEntry = Entry(leftFrame, width=67)\r\ntaskBody = Text(leftFrame, width=50, height=10)\r\n\r\n# Create Dropdown for Priorities\r\npriorityMenu = OptionMenu(leftFrame, v, *OPTIONS)\r\npriorityMenu.config(width=10, pady=15)\r\n\r\n# Create Button Widgets\r\naddItemButton = Button(leftFrame, text=\"ADD TASK\", command=addItem, width=56)\r\ndeleteItemButton = Button(rightFrame, text=\"Delete Selected Task\",\r\n command=deleteItem, width=38, pady=5)\r\ndeleteAllButton = Button(rightFrame, text=\"Delete All\", command=deleteAll,\r\n width=38, pady=5)\r\nexitAppButton = Button(leftFrame, text=\"Quit\", command=root.destroy, width=10,\r\n fg=\"red\")\r\nviewTaskButton = Button(rightFrame, text=\"View Task\", command=taskDetails,\r\n width=38, pady=10)\r\n\r\n# Create ListBox Widget\r\ntaskBox = Listbox(rightFrame, height=10, width=45, selectbackground=bgColor[1])\r\n\r\n# Put Widgets on screen\r\nenterTaskLabel.grid(row=1, column=0, sticky=W)\r\ntaskEntry.grid(row=1, column=1, sticky=W)\r\nblankLabelPad.grid(row=2)\r\ntaskBodyLabel.grid(row=3, column=0, sticky=NW)\r\ntaskBody.grid(row=3, column=1, sticky=W)\r\n\r\nblankLabelPad3.grid(row=4)\r\n\r\npriorityLabel.grid(row=5, column=0, sticky=W)\r\npriorityMenu.grid(row=5, column=1, sticky=W)\r\n\r\nblankLabelPad2.grid(row=6)\r\n\r\naddItemButton.grid(row=10, column=1, sticky=N)\r\ndeleteItemButton.grid(row=4, column=1, sticky=N)\r\ndeleteAllButton.grid(row=5, column=1, sticky=N)\r\nviewTaskButton.grid(row=6, column=1, sticky=N)\r\nexitAppButton.grid(row=11, column=1, sticky=S)\r\n\r\ntaskBoxLabel.grid(row=1, column=1, sticky=S)\r\ntaskBox.grid(row=2, column=1, sticky=NS)\r\n\r\nroot.mainloop()\r\n","sub_path":"todo_gui_10272020.py","file_name":"todo_gui_10272020.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475653279","text":"#!/usr/bin/env python\n\n#script to look at deltaE from fortran anacmdeltaE.f90 produces fits\n\nimport sys\nimport os.path\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use(\"cairo\")\nimport matplotlib.pyplot as plt\nimport math\n\nif len(sys.argv) > 1 :\n namemod = sys.argv[1] #to modify name when saving\nelse:\n namemod = \"\"\n\n\n\n#sample file to load\n#/home/a1627873/baryons/anacm/k13754/c00/8x8/neutron+/neutron.p000.to18.dt4.BF1.state0.spinR.fits.txt\n \n#plt.rc('text', usetex=True)\n#mpl.rcParams['text.latex.preamble'].append(r'\\usepackage{amsmath}')\n#mpl.rcParams['text.latex.preamble'].append(r'\\usepackage{amssymb}')\n\n\n#What size correlation matrix are we using? (usually 8)\nN = \"8\"\n\n#where are the files lcoated?\ninFile = \"/home/a1627873/baryons/anacm/k13754/c00/\"+N+\"x\"+N+\"/\"\n\n\n#which particle doing. i.e. neutron, proton\nparticle = \"neutron\"\n\n#which parity we have, e.g. + or -\nparity = \"+\"\n\n#which to and dt\nto = \"17\"\ndt = \"2\"\n\n#which state?\nstate = \"0\"\n\n#list of Bfields\n#Bfield = [\"BF-2\",\"BF-1\",\"BF0\",\"BF1\",\"BF2\"]\nBfield = \"BF1\"\n\n#dEpol or dEmu?\nrat = \"pol\"\n\ninFile = inFile+particle+parity+\"/\"+particle+\".\"+Bfield+\".p000.to\"+to+\".dt\"+dt+\".deltaE\"+rat+\".state\"+state+\".EnergyG\"\n\n\n#eutron.BF2.p000.to17.dt2.deltaEmu.state0.allEnergyG.shft2.fits.txt\n\nfitFile = inFile+\".shft2.fits.txt\"\n#if(not os.path.isfile(fitFile)):\n# fitFile = inFile+\".mass.fits.txt\"\nprint(fitFile)\n \ndEFile = inFile+\".shft2.txt\"\n#if(not os.path.isfile(massFile)):\n# massFile = inFile+\".mass.txt\"\n \n\nall_dE_fits = np.loadtxt(fitFile, dtype=[('t0',int),('t1',int),('dE',float),('error',float),('errorFit',float),('bias',float),('chi2',float),('chi2/dof',float),('goodness',float),('ndof',int)])\nall_dE_fits = all_dE_fits[np.lexsort((all_dE_fits['t0'],))]\n\n\nplt.figure(figsize=(8.3, 5.8), dpi=100)\n\nt_min = 17\nt_max = 40\n\nr = None\ndone = False\nwhile not done:\n i = (all_dE_fits['t0'] >= t_min) & (all_dE_fits['t0'] < t_max) & (all_dE_fits['t1'] == t_max)\n dE_fits = all_dE_fits[i]\n\n for r in dE_fits:\n print(\"{t0:d}\\t{t1:d}\\t{dE:E}\\t{err:E}\\t{chi2:f}\".format(t0=r['t0'], t1=r['t1'], dE=r['dE'], err=r['error'], chi2=r['chi2/dof']))\n\n t0 = int(raw_input(\"t0: \"))\n t1 = t_max\n\n r = dE_fits[np.where(dE_fits['t0'] == t0)]\n r = r[np.where(r['t1'] == t1)]\n r = r[0]\n\n print(\"dE: {dE:E} +/- {err:E}\".format(dE=r['dE'], err=r['error']))\n print(\"chi2/dof: {chi2:f}\".format(chi2=r['chi2/dof']))\n\n dE_data = np.loadtxt(dEFile, dtype=[('t',int),('dE',float),('error',float),('bias',float)])\n dE_data = dE_data[dE_data['t'] >= 16]\n #mass_data = mass_data[mass_data['t'] <= 40]\n plt.errorbar(dE_data['t'], dE_data['dE'], yerr=dE_data['error'])\n\n plt.axhspan(ymin=r['dE']-r['error'], ymax=r['dE']+r['error'], xmin=(t0-16)/24.0, xmax=(t1-16)/24.0, alpha=0.3, color='g')\n plt.axhline(y=r['dE'], xmin=(t0-16)/24.0, xmax=(t1-16)/24.0, color='g')\n\n plt.ylabel(\"$dE$\")\n plt.xlabel(\"$t$\")\n# plt.xlim([16,40])\n plt.xlim([16,30])\n# plt.ylim([-0.05,0.8])\n# plt.ylim([-0.2,0.2])\n# plt.ylim([-0.3,0.1])\n# plt.ylim([-0.1,0.3])\n# plt.ylim([0.8,-0.05])\n plt.ylim( [ 0.02, 0.08] )\n# plt.savefig(\"test.png\")\n plt.savefig(inFile+namemod+\".pyfit.png\")\n plt.show()\n # plt.savefig(\"/home/a1627873/mesons/scripts/test.png\") #should save the plot to test.png as isn't popping up\n\n \n response = raw_input(\"Is this an acceptable fit? (Y/n): \")\n switch = {\n '': True,\n 'y': True,\n 'Y': True,\n 'n': False,\n 'N': False,\n }\n while response not in switch:\n response = raw_input(\"Please enter 'y' or 'n': \")\n done = switch[response]\n if not done:\n t_max = int(raw_input(\"t1: \"))\n\n\n #slope(jj,ii,3:) = dE(jj,ii,3:) * (-NUCLEON_MASS*32*32*lattice_spacing**2)/( ii*Pi*3*HBARC**2)\n \n \nprint(\"dE: {dE:E} +/- {err:E}\".format(dE=r['dE'], err=r['error']))\nprint(\"chi2/dof: {chi2:f}\".format(chi2=r['chi2/dof']))\nprint(\"fit range: {lbound:d}-{ubound:d}\".format(lbound=t0, ubound=t1))\n\n\n\nprint(\"END OF FILE\")\n\n\n","sub_path":"old/fortrandE-plot-png.py","file_name":"fortrandE-plot-png.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"291794102","text":"import time\nimport pathlib\nimport tempfile\nimport numpy\ntry:\n import soundfile\n have_soundfile = True\nexcept ImportError:\n have_soundfile = False\ntry:\n import soundcard\n have_soundcard = True\nexcept ImportError:\n have_soundcard = False\ntry:\n import scipy.signal\n have_scipy = True\nexcept ImportError:\n have_scipy = False\ntry:\n import matplotlib\n import matplotlib.pyplot as plt\n have_pyplot = True\nexcept ImportError:\n have_pyplot = False\n\nfrom slab.signal import Signal\nfrom slab.filter import Filter\nfrom slab import DATAPATH\n\n# get a temporary directory for writing intermediate files\n_tmpdir = pathlib.Path(tempfile.gettempdir())\n\ntry: # try getting a previously set calibration intensity from file\n _calibration_intensity = numpy.load(DATAPATH + 'calibration_intensity.npy')\nexcept FileNotFoundError:\n _calibration_intensity = 0 #: Difference between rms intensity and measured output intensity in dB\n\n\nclass Sound(Signal):\n '''\n Class for working with sounds, including loading/saving, manipulating and playing.\n\n Examples:\n\n >>> import slab\n >>> import numpy\n >>> print(slab.Sound(numpy.ones([10,2]),samplerate=10))\n duration 1.0, samples 10, channels 2, samplerate 10\n >>> print(slab.Sound(numpy.ones([10,2]),samplerate=10).channel(0))\n duration 1.0, samples 10, channels 1, samplerate 10\n\n **Properties**\n\n >>> sig = slab.Sound.tone()\n >>> sig.level = 80\n >>> sig.level\n 80.0\n\n **Generating sounds**\n\n All sound generating methods can be used with durations arguments in samples (int) or seconds (float).\n One can also set the number of channels by setting the keyword argument nchannels to the desired value.\n\n **Plotting**\n\n >>> vowel = slab.Sound.vowel(vowel='a', duration=.5, samplerate=8000)\n >>> vowel.ramp()\n >>> vowel.spectrogram(dyn_range = 50)\n >>> vowel.spectrum(low=100, high=4000, log_power=True)\n >>> vowel.waveform(start=0, end=.1)\n\n '''\n # instance properties\n\n def _get_level(self):\n '''\n Returns level in dB SPL (RMS) assuming array is in Pascals.\n In the case of multi-channel sounds, returns an array of levels\n for each channel, otherwise returns a float.\n '''\n if self.nchannels == 1:\n rms_value = numpy.sqrt(numpy.mean(numpy.square(self.data-numpy.mean(self.data))))\n if rms_value == 0:\n rms_dB = 0\n else:\n rms_dB = 20.0*numpy.log10(rms_value/2e-5)\n return rms_dB + _calibration_intensity\n chans = self.channels()\n levels = [c.level for c in chans]\n return numpy.array(levels)\n\n def _set_level(self, level):\n '''\n Sets level in dB SPL (RMS) assuming array is in Pascals. `level`\n should be a value in dB, or a tuple of levels, one for each channel.\n '''\n rms_dB = self._get_level()\n if self.nchannels > 1:\n level = numpy.array(level)\n if level.size == 1:\n level = level.repeat(self.nchannels)\n level = numpy.reshape(level, (1, self.nchannels))\n rms_dB = numpy.reshape(rms_dB, (1, self.nchannels))\n gain = 10**((level-rms_dB)/20.)\n self.data *= gain\n\n level = property(fget=_get_level, fset=_set_level, doc='''\n Can be used to get or set the rms level of a sound, which should be in dB.\n For single channel sounds a value in dB is used, for multiple channel\n sounds a value in dB can be used for setting the level (all channels\n will be set to the same level), or a list/tuple/array of levels. Use\n :meth:`slab.Sound.calibrate` to make the computed level reflect output intensity.\n ''')\n\n def __init__(self, data, samplerate=None):\n if isinstance(data, pathlib.Path): # Sound initialization from a file name (pathlib object)\n data = str(data)\n if isinstance(data, str): # Sound initialization from a file name (string)\n if samplerate is not None:\n raise ValueError('Cannot specify samplerate when initialising Sound from a file.')\n _ = Sound.read(data)\n self.data = _.data\n self.samplerate = _.samplerate\n else:\n # delegate to the baseclass init\n super().__init__(data, samplerate)\n\n # static methods (creating sounds)\n @staticmethod\n def read(filename):\n '''\n Load the file given by filename (.wav) and returns a Sound object.\n '''\n if not have_soundfile:\n raise ImportError(\n 'Reading wav files requires SoundFile (pip install git+https://github.com/bastibe/SoundFile.git')\n data, samplerate = soundfile.read(filename)\n return Sound(data, samplerate=samplerate)\n\n @staticmethod\n def tone(frequency=500, duration=1., phase=0, samplerate=None, nchannels=1):\n '''\n Returns a pure tone at frequency for duration, using the default\n samplerate or the given one.\n\n Arguments:\n frequency/phase: if single values, multiple channels can be specified with the `nchannels` argument.\n If sequences, one frequency or phase is used for each channel.\n '''\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n frequency = numpy.array(frequency)\n phase = numpy.array(phase)\n if frequency.size > nchannels and nchannels == 1:\n nchannels = frequency.size\n if phase.size > nchannels and nchannels == 1:\n nchannels = phase.size\n if frequency.size == nchannels:\n frequency.shape = (1, nchannels)\n if phase.size == nchannels:\n phase.shape = (1, nchannels)\n t = numpy.arange(0, duration, 1)/samplerate\n t.shape = (t.size, 1) # ensures C-order\n x = numpy.sin(phase + 2*numpy.pi * frequency * numpy.tile(t, (1, nchannels)))\n return Sound(x, samplerate)\n\n @staticmethod\n def harmoniccomplex(f0=500, duration=1., amplitude=0, phase=0, samplerate=None, nchannels=1):\n '''\n Returns a harmonic complex composed of pure tones at integer multiples of the fundamental frequency `f0`.\n\n Arguments:\n amplitude/phase: can be a single value or a sequence. In the former case the value is set for all harmonics,\n and harmonics up to 1/5th of the sampling frequency are generated. In the latter case each harmonic\n parameter is set separately, and the number of harmonics generated corresponds to the length of the\n sequence. Amplitudes are relateve to full scale (i.e. 0 corresponds to maximum intensity, -30 would be\n 30 dB softer).\n phase: can have a special non-numerical value, the string 'schroeder', in which case the harmonics are in\n Schoeder phase, producing a complex tone with minimal peak-to-peak amplitudes (Schroeder 1970).\n\n >>> sig = Sound.harmoniccomplex(f0=200, amplitude=[0,-10,-20,-30])\n >>> _ = sig.spectrum()\n '''\n samplerate = Sound.get_samplerate(samplerate)\n phases = numpy.array(phase).flatten()\n amplitudes = numpy.array(amplitude).flatten()\n if len(phases) > 1 or len(amplitudes) > 1:\n if (len(phases) > 1 and len(amplitudes) > 1) and (len(phases) != len(amplitudes)):\n raise ValueError('Please specify the same number of phases and amplitudes')\n nharmonics = max(len(phases), len(amplitudes))\n else:\n nharmonics = int(numpy.floor(samplerate/(5*f0)))\n if len(phases) == 1:\n phases = numpy.tile(phase, nharmonics)\n if len(amplitudes) == 1:\n amplitudes = numpy.tile(amplitude, nharmonics)\n freqs = numpy.linspace(f0, nharmonics * f0, nharmonics, endpoint=True)\n if isinstance(phase, str) and phase == 'schroeder':\n n = numpy.linspace(1, nharmonics, nharmonics, endpoint=True)\n phases = numpy.pi * n * (n + 1) / nharmonics\n out = Sound.tone(f0, duration, phase=phases[0], samplerate=samplerate, nchannels=nchannels)\n lvl = out.level\n out.level += amplitudes[0]\n for i in range(1, nharmonics):\n tmp = Sound.tone(frequency=freqs[i], duration=duration,\n phase=phases[i], samplerate=samplerate, nchannels=nchannels)\n tmp.level = lvl + amplitudes[i]\n out += tmp\n return out\n\n @staticmethod\n def whitenoise(duration=1.0, samplerate=None, nchannels=1, normalise=True):\n '''\n Returns a white noise. If the samplerate is not specified, the global\n default value will be used. nchannels = 2 produces uncorrelated noise (dichotic).\n See also :func:`Binaural.whitenoise`.\n\n >>> noise = Sound.whitenoise(1.0,nchannels=2)\n '''\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n x = numpy.random.randn(duration, nchannels)\n if normalise:\n for i in range(nchannels):\n x[:, i] = ((x[:, i] - numpy.amin(x[:, i])) /\n (numpy.amax(x[:, i]) - numpy.amin(x[:, i])) - 0.5) * 2\n return Sound(x, samplerate)\n\n @staticmethod\n def powerlawnoise(duration=1.0, alpha=1, samplerate=None, nchannels=1, normalise=True):\n '''\n Returns a power-law noise for the given duration.\n Spectral density per unit of bandwidth scales as 1/(f**alpha).\n\n Arguments:\n duration: duration of the output.\n alpha: power law exponent.\n samplerate: output samplerate\n\n >>> noise = Sound.powerlawnoise(0.2, 1, samplerate=8000)\n '''\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n n = duration\n n2 = int(n/2)\n f = numpy.array(numpy.fft.fftfreq(n, d=1.0/samplerate), dtype=complex)\n f.shape = (len(f), 1)\n f = numpy.tile(f, (1, nchannels))\n if n % 2 == 1:\n z = (numpy.random.randn(n2, nchannels) + 1j * numpy.random.randn(n2, nchannels))\n a2 = 1.0 / (f[1:(n2+1), :]**(alpha/2.0))\n else:\n z = (numpy.random.randn(n2-1, nchannels) + 1j * numpy.random.randn(n2-1, nchannels))\n a2 = 1.0 / (f[1:n2, :]**(alpha/2.0))\n a2 *= z\n if n % 2 == 1:\n d = numpy.vstack((numpy.ones((1, nchannels)), a2,\n numpy.flipud(numpy.conj(a2))))\n else:\n d = numpy.vstack((numpy.ones((1, nchannels)), a2,\n 1.0 / (numpy.abs(f[n2])**(alpha/2.0)) *\n numpy.random.randn(1, nchannels),\n numpy.flipud(numpy.conj(a2))))\n x = numpy.real(numpy.fft.ifft(d.flatten()))\n x.shape = (n, nchannels)\n if normalise:\n for i in range(nchannels):\n x[:, i] = ((x[:, i] - numpy.amin(x[:, i])) /\n (numpy.amax(x[:, i]) - numpy.amin(x[:, i])) - 0.5) * 2\n return Sound(x, samplerate)\n\n @staticmethod\n def pinknoise(duration=1.0, samplerate=None, nchannels=1, normalise=True):\n '''\n Returns pink noise, i.e :func:`powerlawnoise` with alpha=1.\n nchannels = 2 produces uncorrelated noise (dichotic).\n See also :func:`Binaural.pinknoise`.\n '''\n return Sound.powerlawnoise(duration, 1.0, samplerate=samplerate,\n nchannels=nchannels, normalise=normalise)\n\n @staticmethod\n def irn(frequency=100, gain=1, niter=4, duration=1.0, samplerate=None):\n '''\n Iterated ripple noise (IRN) is a broadband noise with temporal regularities,\n which can give rise to a perceptible pitch. Since the perceptual pitch to noise\n ratio of these stimuli can be altered without substantially altering their spectral\n content, they have been useful in exploring the role of temporal processing in pitch\n perception [Yost 1996, JASA]. The noise is obtained by adding attenuated and delayed\n versions of a white noise in the frequency domain.\n\n Arguments:\n frequency: the frequency of the resulting pitch in Hz\n gain: multiplicative factor of the repeated additions. Smaller values reduce the\n temporal regularities in the resulting IRN.\n niter: number of iterations of additions. Higher values increase pitch saliency.\n '''\n samplerate = Sound.get_samplerate(samplerate)\n delay = 1/frequency\n noise = Sound.whitenoise(duration, samplerate=samplerate)\n x = numpy.array(noise.data.T)[0]\n irn_add = numpy.fft.fft(x)\n n_samples, sample_dur = len(irn_add), float(1/samplerate)\n w = 2 * numpy.pi*numpy.fft.fftfreq(n_samples, sample_dur)\n d = float(delay)\n for k in range(1, niter+1):\n irn_add += (gain**k) * irn_add * numpy.exp(-1j * w * k * d)\n irn_add = numpy.fft.ifft(irn_add)\n x = numpy.real(irn_add)\n return Sound(x, samplerate)\n\n @staticmethod\n def click(duration=0.0001, samplerate=None, nchannels=1):\n 'Returns a click of the given duration (*100 microsec*).'\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n return Sound(numpy.ones((duration, nchannels)), samplerate)\n\n @staticmethod\n def clicktrain(duration=1.0, frequency=500, clickduration=0.0001, samplerate=None):\n 'Returns a series of n clicks (see :func:`click`) at a frequency of freq.'\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n clickduration = Sound.in_samples(clickduration, samplerate)\n interval = int(numpy.rint(1/frequency * samplerate))\n n = numpy.rint(duration/interval)\n oneclick = Sound.click(clickduration, samplerate=samplerate)\n oneclick.resize(interval)\n oneclick.repeat(n)\n return oneclick\n\n @staticmethod\n def chirp(duration=1.0, from_frequency=100, to_frequency=None, samplerate=None, kind='quadratic'):\n '''Returns a pure tone with increasing or decreasing frequency from and to given\n frequency endpoints using :func:`scipy.signal.chirp`.\n `kind` determines the type of ramp (see :func:`scipy.signal.chirp` for options).\n '''\n if not have_scipy:\n raise ImportError('Generating chirps requires Scipy.')\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n t = numpy.arange(0, duration, 1) / samplerate # generate a time vector\n t.shape = (t.size, 1) # ensures C-order\n if not to_frequency:\n to_frequency = samplerate / 2\n chirp = scipy.signal.chirp(\n t, from_frequency, t[-1], to_frequency, method=kind, vertex_zero=True)\n return Sound(chirp, samplerate=samplerate)\n\n @staticmethod\n def silence(duration=1.0, samplerate=None, nchannels=1):\n 'Returns a silent sound (all samples equal zero) for the given duration.'\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n return Sound(numpy.zeros((duration, nchannels)), samplerate)\n\n @staticmethod\n def vowel(vowel='a', gender=None, glottal_pulse_time=12, formant_multiplier=1, duration=1., samplerate=None, nchannels=1):\n '''\n Returns a vowel sound.\n\n Arguments:\n vowel: 'a', 'e', 'i', 'o', 'u', 'ae', 'oe', or 'ue' (pre-set format frequencies)\n or None for random formants in the range of the vowel formants.\n gender: 'male', 'female'; shortcut for setting glottal_pulse_time and formant_multiplier\n glottal_pulse_time: distance in milliseconds of glottal pulses (determines vocal trakt length)\n formant_multiplier: multiplier for the predefined formant frequencies (scales the voice pitch)\n '''\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n formant_freqs = {'a': (0.73, 1.09, 2.44), 'e': (0.36, 2.25, 3.0), 'i': (0.27, 2.29, 3.01),\n 'o': (0.35, 0.5, 2.6), 'u': (0.3, 0.87, 2.24), 'ae': (0.86, 2.05, 2.85), 'oe': (0.4, 1.66, 1.96),\n 'ue': (0.25, 1.67, 2.05)}\n if vowel is None:\n BW = 0.3\n formants = (0.22/(1-BW)+(0.86/(1+BW)-0.22/(1-BW))*numpy.random.rand(),\n 0.5/(1-BW)+(2.29/(1+BW)-0.5/(1-BW))*numpy.random.rand(),\n 1.96/(1-BW)+(3.01/(1+BW)-1.96/(1-BW))*numpy.random.rand())\n else:\n if vowel not in formant_freqs:\n raise ValueError(f'Unknown vowel: {vowel}')\n formants = formant_freqs[vowel]\n if gender == 'male':\n glottal_pulse_time = 12\n elif gender == 'female':\n glottal_pulse_time = 6\n formant_multiplier = 1.2 # raise formant frequencies by 20%\n formants = [formant_multiplier * f for f in formants] # scale each formant\n ST = 1000/samplerate\n times = ST * numpy.arange(duration)\n T05 = 2.5 # decay half-time for glottal pulses\n env = numpy.exp(-numpy.log(2)/T05 * numpy.mod(times, glottal_pulse_time))\n env = numpy.mod(times, glottal_pulse_time)**0.25 * env\n min_env = numpy.min(env[(times >= glottal_pulse_time/2) & (times <= glottal_pulse_time-ST)])\n env = numpy.maximum(env, min_env)\n out = numpy.zeros(len(times))\n for f in formants:\n A = numpy.min((0, -6*numpy.log2(f)))\n out = out + 10**(A/20) * env * numpy.sin(2 * numpy.pi *\n f * numpy.mod(times, glottal_pulse_time))\n if nchannels > 1:\n out = numpy.tile(out, (nchannels, 1))\n vowel = Sound(data=out, samplerate=samplerate)\n vowel.filter(frequency=0.75*samplerate/2, kind='lp')\n return vowel\n\n @staticmethod\n def multitone_masker(duration=1.0, low_cutoff=125, high_cutoff=4000, bandwidth=1/3, samplerate=None):\n '''\n Returns a noise made of ERB-spaced random-phase sinetones in the band between `low_cutoff` and `high_cutoff`.\n This noise does not have random amplitude variations and is useful for testing CI patients.\n See Oxenham 2014, Trends Hear.\n\n >>> sig = Sound.multitone_masker()\n >>> sig.ramp()\n >>> _ = sig.spectrum()\n '''\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n # get centre_freqs\n freqs, _, _ = Filter._center_freqs(\n low_cutoff=low_cutoff, high_cutoff=high_cutoff, bandwidth=bandwidth)\n rand_phases = numpy.random.rand(len(freqs)) * 2 * numpy.pi\n sig = Sound.tone(frequency=freqs, duration=duration,\n phase=rand_phases, samplerate=samplerate)\n # collapse across channels\n data = numpy.sum(sig.data, axis=1) / len(freqs)\n return Sound(data, samplerate=samplerate)\n\n @staticmethod\n def erb_noise(duration=1.0, low_cutoff=125, high_cutoff=4000, samplerate=None):\n '''\n Returns an equally-masking noise (ERB noise) in the band between `low_cutoff` and `high_cutoff`.\n\n >>> sig = Sound.erb_noise()\n >>> sig.ramp()\n >>> _ = sig.spectrum()\n '''\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n n = 2**(duration-1).bit_length() # next power of 2\n st = 1 / samplerate\n df = 1 / (st * n)\n frq = df * numpy.arange(n/2)\n frq[0] = 1 # avoid DC = 0\n lev = -10*numpy.log10(24.7*(4.37*frq))\n filt = 10.**(lev/20)\n noise = numpy.random.randn(n)\n noise = numpy.real(numpy.fft.ifft(numpy.concatenate(\n (filt, filt[::-1])) * numpy.fft.fft(noise)))\n noise = noise/numpy.sqrt(numpy.mean(noise**2))\n band = numpy.zeros(len(lev))\n band[round(low_cutoff/df):round(high_cutoff/df)] = 1\n fnoise = numpy.real(numpy.fft.ifft(numpy.concatenate(\n (band, band[::-1])) * numpy.fft.fft(noise)))\n fnoise = fnoise[:duration]\n return Sound(data=fnoise, samplerate=samplerate)\n\n @staticmethod\n def sequence(*sounds):\n 'Joins the sounds in the list `sounds` into a new sound object.'\n samplerate = sounds[0].samplerate\n for sound in sounds:\n if sound.samplerate != samplerate:\n raise ValueError('All sounds must have the same sample rate.')\n sounds = tuple(s.data for s in sounds)\n x = numpy.vstack(sounds)\n return Sound(x, samplerate)\n\n # instance methods\n def write(self, filename, normalise=True, fmt='WAV'):\n '''\n Save the sound as a WAV. If `normalise` is set to True, the maximal amplitude of the sound is normalised to 1.\n '''\n if not have_soundfile:\n raise ImportError(\n 'Writing wav files requires SoundFile (pip install SoundFile.')\n if isinstance(filename, pathlib.Path):\n filename = str(filename)\n if self.samplerate % 1:\n self = self.resample(int(self.samplerate))\n print('Sampling rate rounded to nearest integer for writing!')\n if normalise:\n soundfile.write(filename, self.data / numpy.amax(numpy.abs(self.data)), self.samplerate, format=fmt)\n else:\n soundfile.write(filename, self.data, self.samplerate, format=fmt)\n\n def ramp(self, when='both', duration=0.01, envelope=None):\n '''\n Adds an on/off ramp to the sound (in place).\n\n Arguments:\n when: Can take values 'onset', 'offset' or 'both'\n duration: The time over which the ramping happens (in samples or seconds)\n envelope: A ramping function, if not specified uses `sin(pi*t/2)**2`. The\n function should be a function of one variable `t` ranging from\n 0 to 1, and should increase from `f(0)=0` to `f(0)=1`. The\n reverse is applied for the offset ramp.\n '''\n when = when.lower().strip()\n if envelope is None:\n envelope = lambda t: numpy.sin(numpy.pi * t / 2) ** 2 # squared sine window\n sz = Sound.in_samples(duration, self.samplerate)\n multiplier = envelope(numpy.reshape(numpy.linspace(0.0, 1.0, sz), (sz, 1)))\n if when in ('onset', 'both'):\n self.data[:sz, :] *= multiplier\n if when in ('offset', 'both'):\n self.data[self.nsamples-sz:, :] *= multiplier[::-1]\n\n def repeat(self, n):\n 'Repeats the sound n times.'\n self.data = numpy.vstack((self.data,)*int(n))\n\n def copychannel(self, n):\n '''Copies a single-channel sound in place to make an n-channel sound.\n If a multi-channel sound is supplied, all channels except the first are silently dropped.'''\n self.data = numpy.repeat(self.channel(0), n, axis=1)\n\n @staticmethod\n def crossfade(sound1, sound2, overlap=0.01):\n '''\n Return a new sound that is a crossfade of sound1 and sound2 with a given `overlap`.\n\n >>> noise = Sound.whitenoise(duration=1.0)\n >>> vowel = Sound.vowel()\n >>> noise2vowel = Sound.crossfade(noise,vowel,overlap=0.4)\n >>> noise2vowel.play()\n '''\n if sound1.nchannels != sound2.nchannels:\n raise ValueError('Cannot crossfade sounds with unequal numbers of channels.')\n if sound1.samplerate != sound2.samplerate:\n raise ValueError('Cannot crossfade sounds with unequal samplerates.')\n overlap = Sound.in_samples(overlap, samplerate=sound1.samplerate)\n n_total = sound1.nsamples + sound2.nsamples - overlap\n silence = Sound.silence(sound1.nsamples - overlap,\n samplerate=sound1.samplerate, nchannels=sound1.nchannels)\n sound1.ramp(duration=overlap, when='offset')\n sound1.resize(n_total) # extend sound1 to total length\n sound2.ramp(duration=overlap, when='onset')\n sound2 = Sound.sequence(silence, sound2) # sound2 has to be prepended with silence\n return sound1 + sound2\n\n def pulse(self, pulse_frequency=4, duty=0.75):\n '''\n Apply a pulse envelope to the sound with a `pulse_frequency` and `duty` cycle (in place).\n '''\n pulse_period = 1/pulse_frequency\n n_pulses = round(self.duration / pulse_period) # number of pulses in the stimulus\n pulse_period = self.duration / n_pulses # period in s, fits into stimulus duration\n pulse_samples = Sound.in_samples(pulse_period * duty, self.samplerate) # duty cycle in s\n fall_samples = Sound.in_samples(5/1000, self.samplerate) # 5ms rise/fall time\n fall = numpy.cos(numpy.pi * numpy.arange(fall_samples) / (2 * (fall_samples)))**2\n pulse = numpy.concatenate((1-fall, numpy.ones(pulse_samples - 2 * fall_samples), fall))\n pulse = numpy.concatenate(\n (pulse, numpy.zeros(Sound.in_samples(pulse_period, self.samplerate)-len(pulse))))\n envelope = numpy.tile(pulse, n_pulses)\n # add an empty axis to get to the same shape as self.data: (n_samples, 1)\n envelope = envelope[:, None]\n # if data is 2D (>1 channel) broadcase the envelope to fit\n self.data *= numpy.broadcast_to(envelope, self.data.shape)\n\n def filter(self, frequency=100, kind='hp'):\n '''\n Filters a sound in place. This is a convenience function to avoid calling\n the Filter class for a standard low-, high-, bandpass, and bandstop filter.\n\n Arguments:\n frequency: edge frequency in Hz or tuple of frequencies for bandpass and bandstop.\n kind: 'lp', 'hp', bp, 'bs'\n\n >>> sig = Sound.whitenoise()\n >>> sig.filter(frequency=3000, kind='lp')\n >>> _ = sig.spectrum()\n '''\n n = min(1000, self.nsamples)\n filt = Filter.band(\n frequency=frequency, kind=kind, samplerate=self.samplerate, length=n)\n self.data = filt.apply(self).data\n\n def aweight(self):\n '''\n Returns A-weighted sound. A-weighting is applied to instrument-recorded sounds\n to account for the relative loudness of different frequencies perceived by the\n human ear. See: https://en.wikipedia.org/wiki/A-weighting'''\n if not have_scipy:\n raise ImportError('Applying a-weighting requires Scipy.')\n f1 = 20.598997\n f2 = 107.65265\n f3 = 737.86223\n f4 = 12194.217\n A1000 = 1.9997\n numerators = [(2 * numpy.pi * f4)**2 * (10**(A1000 / 20)), 0, 0, 0, 0]\n denominators = numpy.convolve(\n [1, 4 * numpy.pi * f4, (2 * numpy.pi * f4)**2], [1, 4 * numpy.pi * f1, (2 * numpy.pi * f1)**2])\n denominators = numpy.convolve(numpy.convolve(\n denominators, [1, 2 * numpy.pi * f3]), [1, 2 * numpy.pi * f2])\n b, a = scipy.signal.filter_design.bilinear(numerators, denominators, self.samplerate)\n data_chans = []\n for chan in self.channels():\n data = scipy.signal.lfilter(b, a, chan.data.flatten())\n data_chans.append(data) # concatenate channel data\n return Sound(data_chans, self.samplerate)\n\n @staticmethod\n def record(duration=1.0, samplerate=None):\n '''Record from inbuilt microphone. Note that most soundcards can only record at 44100 Hz samplerate.\n Uses SoundCard module if installed [recommended], otherwise uses SoX (duration must be in sec in this case).\n '''\n if have_soundcard:\n samplerate = Sound.get_samplerate(samplerate)\n duration = Sound.in_samples(duration, samplerate)\n mic = soundcard.default_microphone()\n data = mic.record(samplerate=samplerate, numframes=duration, channels=1)\n out = Sound(data, samplerate=samplerate)\n else: # use sox\n import subprocess\n try:\n subprocess.call(['sox', '-d', '-r', str(samplerate), str(_tmpdir / 'tmp.wav'), 'trim', '0', str(duration)])\n except:\n raise ImportError(\n 'Recording whithout SoundCard module requires SoX. Install: sudo apt-get install sox libsox-fmt-all OR pip install SoundCard.')\n time.sleep(duration)\n out = Sound('tmp.wav')\n return out\n\n def play(self, sleep=False):\n 'Plays the sound through the default device.'\n if have_soundcard:\n soundcard.default_speaker().play(self.data, samplerate=self.samplerate)\n else:\n self.write(_tmpdir / 'tmp.wav', normalise=False)\n Sound.play_file(_tmpdir / 'tmp.wav')\n if sleep: # all current play methods are blocking, there is no reason to sleep!\n time.sleep(self.duration)\n\n @staticmethod\n def play_file(fname):\n fname = str(fname) # in case it is a pathlib.Path object, get the name string\n from platform import system\n system = system()\n if system == 'Windows':\n import winsound\n winsound.PlaySound(fname, winsound.SND_FILENAME)\n elif system == 'Darwin': # MacOS\n import subprocess\n subprocess.call(['afplay', fname])\n else: # Linux\n import subprocess\n try:\n subprocess.call(['sox', fname, '-d'])\n except:\n raise NotImplementedError(\n 'Playing from files on Linux without SoundCard module requires SoX. Install: sudo apt-get install sox libsox-fmt-all or pip install SoundCard')\n\n def waveform(self, start=0, end=None, show=True, axis=None, **kwargs):\n '''\n Plots the waveform of the sound.\n\n Arguments:\n start, end: time or sample limits; if unspecified, shows the full waveform\n '''\n if not have_pyplot:\n raise ImportError('Plotting waveforms requires matplotlib.')\n start = self.in_samples(start, self.samplerate)\n if end is None:\n end = self.nsamples\n end = self.in_samples(end, self.samplerate)\n if axis is None:\n axis = plt.subplot()\n if self.nchannels == 1:\n axis.plot(self.times[start:end], self.channel(0)[start:end], **kwargs)\n elif self.nchannels == 2:\n axis.plot(self.times[start:end], self.channel(0)[start:end], label='left', **kwargs)\n axis.plot(self.times[start:end], self.channel(1)[start:end], label='right', **kwargs)\n axis.legend()\n else:\n for i in range(self.nchannels):\n axis.plot(self.times[start:end], self.channel(i)[start:end], label=f'channel {i}', **kwargs)\n plt.legend()\n axis.set(title='Waveform', xlabel='Time [sec]', ylabel='Amplitude')\n if show:\n plt.show()\n\n def spectrogram(self, window_dur=0.005, dyn_range=120, upper_frequency=None, other=None, show=True, axis=None, **kwargs):\n '''\n Plots a spectrogram of the sound.\n\n Arguments:\n window_dur: Duration of time window for short-term FFT (*0.005sec*)\n dyn_range: Dynamic range in dB to plot (*120*)\n other: If a sound object is given, subtract the waveform and plot the difference spectrogram.\n If plot is False, returns the values returned by :func:`scipy.signal.spectrogram`, namely\n freqs, times, power where power is a 2D array of powers, freqs are the corresponding frequencies,\n and times are the time bins.\n '''\n if not have_scipy:\n raise ImportError('Computing spectrograms requires Scipy.')\n if self.nchannels > 1:\n raise ValueError('Can only compute spectrograms for mono sounds.')\n if other is not None:\n x = self.data.flatten() - other.data.flatten()\n else:\n x = self.data.flatten()\n # set default for step_dur optimal for Gaussian windows.\n step_dur = window_dur/numpy.sqrt(numpy.pi)/8\n # convert window & step durations from seconds to numbers of samples\n window_nsamp = Sound.in_samples(window_dur, self.samplerate) * 2\n step_nsamp = Sound.in_samples(step_dur, self.samplerate)\n # make the window. A Gaussian filter needs a minimum of 6σ - 1 samples, so working\n # backward from window_nsamp we can calculate σ.\n window_sigma = (window_nsamp+1)/6\n window = scipy.signal.windows.gaussian(window_nsamp, window_sigma)\n # convert step size into number of overlapping samples in adjacent analysis frames\n noverlap = window_nsamp - step_nsamp\n # compute the power spectral density\n freqs, times, power = scipy.signal.spectrogram(\n x, mode='psd', fs=self.samplerate, scaling='density', noverlap=noverlap, window=window, nperseg=window_nsamp)\n if show or (axis is not None):\n if not have_pyplot:\n raise ImportError('Ploting spectrograms requires matplotlib.')\n p_ref = 2e-5 # 20 μPa, the standard reference pressure for sound in air\n power = 10 * numpy.log10(power / (p_ref ** 2)) # logarithmic power for plotting\n # set lower bound of colormap (vmin) from dynamic range.\n dB_max = power.max()\n vmin = dB_max-dyn_range\n cmap = matplotlib.cm.get_cmap('Greys')\n extent = (times.min(), times.max(), freqs.min(), upper_frequency or freqs.max())\n if axis is None:\n axis = plt.subplot()\n axis.imshow(power, origin='lower', aspect='auto',\n cmap=cmap, extent=extent, vmin=vmin, vmax=None, **kwargs)\n axis.set(title='Spectrogram', xlabel='Time [sec]', ylabel='Frequency [Hz]')\n if show:\n plt.show()\n else:\n return freqs, times, power\n\n def cochleagram(self, bandwidth=1/5, show=True, axis=None, **kwargs):\n '''\n Computes a cochleagram of the sound by filtering with a bank of cosine-shaped filters with given bandwidth\n (*1/5* th octave) and applying a cube-root compression to the resulting envelopes.\n If show is False, returns the envelopes.\n '''\n fbank = Filter.cos_filterbank(bandwidth=bandwidth, low_cutoff=20,\n high_cutoff=None, samplerate=self.samplerate)\n freqs = fbank.filter_bank_center_freqs()\n subbands = fbank.apply(self.channel(0))\n envs = subbands.envelope()\n envs.data[envs.data < 1e-9] = 0 # remove small values that cause waring with numpy.power\n envs = envs.data ** (1/3) # apply non-linearity (cube-root compression)\n if show or (axis is not None):\n if not have_pyplot:\n raise ImportError('Plotting cochleagrams requires matplotlib.')\n cmap = matplotlib.cm.get_cmap('Greys')\n if axis is None:\n axis = plt.subplot(111)\n axis.imshow(envs.T, origin='lower', aspect='auto', cmap=cmap)\n labels = list(freqs.astype(int))\n axis.yaxis.set_major_formatter(matplotlib.ticker.IndexFormatter(\n labels)) # centre frequencies as ticks\n axis.set_xlim([0, self.duration])\n axis.set(title='Cochleagram', xlabel='Time [sec]', ylabel='Frequency [Hz]')\n if show:\n plt.show()\n else:\n return envs\n\n def spectrum(self, low_cutoff=16, high_cutoff=None, log_power=True, axis=None, show=True, **kwargs):\n '''\n Returns the spectrum of the sound and optionally plots it.\n\n Arguments:\n low_cutoff/high_cutoff: If these are left unspecified, it shows the full spectrum, otherwise it shows\n only between `low` and `high` in Hz.\n log_power: If True it returns the log of the power.\n show: Whether to plot the output.\n If show=False, returns `Z, freqs`, where `Z` is a 1D array of powers\n and `freqs` are the corresponding frequencies.\n '''\n freqs = numpy.fft.rfftfreq(self.nsamples, d=1/self.samplerate)\n sig_rfft = numpy.zeros((len(freqs), self.nchannels))\n for chan in range(self.nchannels):\n sig_rfft[:, chan] = numpy.abs(numpy.fft.rfft(self.data[:, chan], axis=0))\n # scale by the number of points so that the magnitude does not depend on the length of the signal\n pxx = sig_rfft/len(freqs)\n pxx = pxx**2 # square to get the power\n if low_cutoff is not None or high_cutoff is not None:\n if low_cutoff is None:\n low_cutoff = 0\n if high_cutoff is None:\n high_cutoff = numpy.amax(freqs)\n I = numpy.logical_and(low_cutoff <= freqs, freqs <= high_cutoff)\n I2 = numpy.where(I)[0]\n Z = pxx[I2, :]\n freqs = freqs[I2]\n else:\n Z = pxx\n if log_power:\n Z[Z < 1e-20] = 1e-20 # no zeros because we take logs\n Z = 10 * numpy.log10(Z)\n if show or (axis is not None):\n if not have_pyplot:\n raise ImportError('Plotting spectra requires matplotlib.')\n if axis is None:\n axis = plt.subplot()\n axis.semilogx(freqs, Z, **kwargs)\n ticks_freqs = numpy.round(32000 * 2 **\n (numpy.arange(12, dtype=float)*-1))\n axis.set_xticks(ticks_freqs)\n axis.set_xticklabels(map(str, ticks_freqs.astype(int)))\n axis.grid()\n axis.set_xlim((freqs[1], freqs[-1]))\n axis.set_ylabel('Power [dB/Hz]') if log_power else plt.ylabel('Power')\n axis.set_title('Spectrum')\n if show:\n plt.show()\n else:\n return Z, freqs\n\n def spectral_feature(self, feature='centroid', mean='rms', frame_duration=None, rolloff=0.85):\n '''\n Computes one of several features of the spectrogram of a sound and returns either a\n new Signal with the feature value at each sample, or the average (*rms* or mean) feature value over all samples.\n Available features:\n `centroid` is the centre of mass of the short-term spectrum, and 'fwhm' is the width of a Gaussian of the same variance as the spectrum around the centroid.\n\n >>> sig = Sound.tone(frequency=500, nchannels=2)\n >>> round(sig.spectral_feature(feature='centroid')[0])\n 500.0\n\n `flux` is a measure of how quickly the power spectrum of a signal is changing, calculated by comparing the power spectrum for one frame against the power spectrum from the previous frame. Returns the root-mean-square over the entire stimulus of the change in power spectrum between adjacent time windows, measured as Euclidean distance.\n\n >>> sig = Sound.tone()\n >>> numpy.testing.assert_allclose(sig.spectral_feature(feature='flux'), desired=0, atol=1e-04)\n\n `flatness` measures how tone-like a sound is, as opposed to being noise-like.\n It is calculated by dividing the geometric mean of the power spectrum by the arithmetic mean. (Dubnov, Shlomo \"Generalization of spectral flatness measure for non-gaussian linear processes\" IEEE Signal Processing Letters, 2004, Vol. 11.)\n\n `rolloff` is the frequency at which the spectrum rolles off and is typically used to find a suitable low-cutoff\n frequency that retains most of the signal power (given as fraction in `rolloff`).\n '''\n if not frame_duration:\n if mean is not None:\n frame_duration = int(self.nsamples/2) # long frames if not averaging\n else:\n frame_duration = 0.05 # 50ms frames by default\n out_all = []\n for chan in self.channels():\n freqs, times, power = chan.spectrogram(window_dur=frame_duration, show=False)\n norm = power / power.sum(axis=0, keepdims=True) # normalize successive frames\n if feature == 'centroid':\n out = numpy.sum(freqs[:, numpy.newaxis] * norm, axis=0)\n elif feature == 'fwhm':\n cog = numpy.sum(freqs[:, numpy.newaxis] * norm, axis=0)\n sq_dist_from_cog = (freqs[:, numpy.newaxis] - cog[numpy.newaxis, :]) ** 2\n sigma = numpy.sqrt(numpy.sum(sq_dist_from_cog * norm, axis=0))\n out = 2 * numpy.sqrt(2 * numpy.log(2)) * sigma\n elif feature == 'flux':\n norm = numpy.c_[norm[:, 0], norm] # duplicate first frame to give 0 diff\n delta_p = numpy.diff(norm, axis=1) # diff now has same shape as norm\n out = numpy.sqrt((delta_p**2).sum(axis=0)) / power.shape[0]\n elif feature == 'rolloff':\n cum = numpy.cumsum(norm, axis=0)\n rolloff_idx = numpy.argmax(cum >= rolloff, axis=0)\n out = freqs[rolloff_idx] # convert from index to Hz\n elif feature == 'flatness':\n norm[norm == 0] = 1\n gmean = numpy.exp(numpy.log(power + 1e-20).mean(axis=0))\n amean = power.sum(axis=0) / power.shape[0]\n out = gmean / amean\n else:\n raise ValueError('Unknown feature name.')\n if mean is None:\n out = numpy.interp(self.times, times, out) # interpolate to sound samples\n elif mean == 'rms':\n out = numpy.sqrt(numpy.mean(out**2)) # average feature time series\n elif mean == 'average':\n out = out.mean()\n out_all.append(out) # concatenate channel data\n if mean is None:\n out_all = Signal(data=out_all, samplerate=self.samplerate) # cast as Signal\n return out_all\n\n def vocode(self, bandwidth=1/3):\n '''\n Returns a noise vocoded version of the sound by computing the envelope in different frequency subbands,\n filling these envelopes with noise, and collapsing the subbands into one sound. This removes most spectral\n information but retains temporal information in a speech signal.\n\n Arguments:\n bandwidth: width of the subbands in octaves\n '''\n fbank = Filter.cos_filterbank(length=self.nsamples, bandwidth=bandwidth,\n low_cutoff=30, pass_bands=True, samplerate=self.samplerate)\n subbands = fbank.apply(self.channel(0))\n envs = subbands.envelope()\n envs.data[envs.data < 1e-9] = 0 # remove small values that cause waring with numpy.power\n noise = Sound.whitenoise(duration=self.nsamples,\n samplerate=self.samplerate) # make white noise\n subbands_noise = fbank.apply(noise) # divide into same subbands as signal\n subbands_noise *= envs # apply envelopes\n subbands_noise.level = subbands.level\n return Sound(Filter.collapse_subbands(subbands=subbands_noise, filter_bank=fbank))\n\n def crest_factor(self):\n '''\n The crest factor is the ratio of the peak amplitude and the RMS value of a waveform\n and indicates how extreme the peaks in a waveform are. Returns the crest factor in dB.\n Numerically identical to the peak-to-average power ratio.\n '''\n jwd = self.data - numpy.mean(self.data)\n if numpy.any(jwd): # if not all elements are zero\n crest = numpy.abs(jwd).max() / numpy.sqrt(numpy.mean(numpy.square(jwd)))\n return 20 * numpy.log10(crest)\n return numpy.nan\n\n def onset_slope(self):\n '''\n Returns the centroid of a histogram of onset slopes as a measure of how many\n quick intensity increases the sound has. These onset-like features make the\n sound easier to localize via envelope ITD.\n '''\n env = self.envelope(kind='dB') # get envelope\n diffs = numpy.diff(env.data, axis=0) * self.samplerate # compute db change per sec\n diffs[diffs < 0] = 0 # keep positive changes (onsets)\n if diffs.max() == 0:\n return 0\n # compute histogram of differences\n hist, bins = numpy.histogram(diffs, range=(1, diffs.max()), bins=1000)\n bin_centers = (bins[:-1] + bins[1:]) / 2\n norm = hist / hist.sum() # normalize histogram so that it summs to 1\n return numpy.sum(bin_centers * norm) # compute centroid of histogram\n\n def frames(self, duration=1024):\n '''\n Returns a generator that steps through the sound in overlapping, windowed frames.\n Get the frame center times by calling `frametimes`.\n\n Arguments:\n duration: half-length of the returned frames in samples or seconds\n\n >>> windows = sig.frames()\n >>> for w in windows:\n >>>\t\tprocess(w) # process windowed frame here\n '''\n if not have_scipy:\n raise ImportError('Need scipy for time window processing.')\n window_nsamp = Sound.in_samples(duration, self.samplerate) * 2\n # step_dur optimal for Gaussian windows\n step_nsamp = numpy.floor(window_nsamp/numpy.sqrt(numpy.pi)/8).astype(int)\n # make the window, Gaussian filter needs a minimum of 6σ - 1 samples.\n window_sigma = numpy.ceil((window_nsamp+1)/6)\n window = numpy.tile(scipy.signal.windows.gaussian(\n window_nsamp, window_sigma), (self.nchannels, 1)).T\n idx = 0\n while idx + window_nsamp/2 < self.nsamples: # loop through windows, yield each one\n frame = Sound(self.data[idx:min(self.nsamples, idx +\n window_nsamp), :], samplerate=self.samplerate)\n frame.resize(window_nsamp) # in case the last window is too short\n frame *= window\n yield frame # return a new sound object\n idx += step_nsamp\n\n def frametimes(self, duration=1024):\n 'Returns the time points at the frame centers constructed by the `frames` method.'\n window_nsamp = Sound.in_samples(duration, self.samplerate) * 2\n step_nsamp = numpy.floor(window_nsamp/numpy.sqrt(numpy.pi)/8).astype(int)\n samplepoints = []\n idx = 0\n while idx + window_nsamp/2 < self.nsamples:\n samplepoints.append(min(idx + window_nsamp/2, self.nsamples))\n idx += step_nsamp\n return numpy.array(samplepoints) / self.samplerate # convert to array of time points\n\n\ndef calibrate(intensity=None, make_permanent=False):\n '''\n Calibrate the presentation intensity of a setup. Enter the calibration intensity, if you know it.\n If None, plays a 1kHz tone. Please measure actual intensity with a sound level meter and appropriate\n coupler. Set make_permanent to True to save a calibration file in slab.DATAPATH that is loaded on import.\n '''\n global _calibration_intensity\n if intensity is None:\n tone = Sound.tone(duration=5.0, frequency=1000) # make 1kHz tone\n print('Playing 1kHz test tone for 5 seconds. Please measure intensity.')\n tone.play() # play it\n intensity = input('Enter measured intensity in dB: ') # ask for measured intesnity\n intensity = intensity - tone.level # subtract measured from rms intensity\n # set and save\n _calibration_intensity = intensity\n if make_permanent:\n numpy.save(DATAPATH + 'calibration_intensity.npy', _calibration_intensity)\n\ndef apply_to_path(path='.', method=None, kwargs={}, out_path=None):\n '''\n Apply a function to all wav files in a given directory.\n\n Arguments:\n path: input path (str or pathlib.Path) from which wav files are collected for processing\n method: callable function to be applied to each file\n kwargs: dictionary of keyword arguments and values passed to the function.\n out_path: if is supplied, sounds are saved with their original file name in this directory\n\n >>> slab.apply_to_path('.', slab.Sound.spectral_feature, {'feature':'fwhm'})\n >>> slab.apply_to_path('.', slab.Sound.ramp, out_path='./modified')\n >>> slab.apply_to_path('.', slab.Sound.ramp, kwargs={'duration':0.3}, out_path='./test')\n '''\n if not callable(method):\n raise ValueError('Method must be callable.')\n if isinstance(path, str):\n path = pathlib.Path(path)\n if isinstance(out_path, str):\n out_path = pathlib.Path(out_path)\n files = path.glob('*.wav')\n results = dict()\n for file in files:\n sig = Sound(file)\n res = method(sig, **kwargs)\n if out_path:\n if hasattr(res, 'write'): # if objects with write methods were returned, write them to out_path\n res.write(out_path.joinpath(file.name))\n else: # otherwise assume the modification was in-place and write sig to out_path\n sig.write(out_path.joinpath(file.name))\n results[str(file.stem)] = res\n return results # a dictionary of results for each file name\n","sub_path":"slab/sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":49309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"622572476","text":"#####################\r\n###### imports ######\r\n#####################\r\n\r\nimport os\r\nimport dill\r\nif not os.getcwd().endswith('trading'): os.chdir('../../..') # local machine\r\nassert os.getcwd().endswith('trading'), 'Wrong path!'\r\nimport numerapi\r\nfrom numerai.dev.configs.evaluate_model_cfg import *\r\nimport pandas as pd\r\n\r\n\r\n\r\nmodel_obj = dill.load(open(MODEL_OBJ_FILEPATH, 'rb'))\r\n\r\n\r\n########################\r\n###### evaluation ######\r\n########################\r\n\r\n### run numerai analytics ###\r\n\r\npred_colname = RUN_MODEL_PARAMS['prediction_colname'] if \"RUN_MODEL_PARAMS['prediction_colname']\" in globals() else 'prediction'\r\n\r\nimportances = pd.DataFrame({(f, imp) for f, imp in zip(model_obj['final_features'], model_obj['model'].feature_importances_)})\\\r\n .rename(columns={0: 'feature', 1: 'importance'})\\\r\n .sort_values(by='importance', ascending=False)\r\n\r\n### corr_coefs for train / val / test ###\r\n\r\ntrain_era_scores = model_obj['df_pred'][model_obj['df_pred'][SPLIT_COLNAME].str.startswith('train')]\\\r\n .groupby(DATE_COL)\\\r\n .apply(calc_coef, TARGET, pred_colname)\r\n\r\nval_era_scores = model_obj['df_pred'][model_obj['df_pred'][SPLIT_COLNAME].str.startswith('val')]\\\r\n .groupby(DATE_COL)\\\r\n .apply(calc_coef, TARGET, pred_colname)\r\ntest_era_scores = model_obj['df_pred'][model_obj['df_pred'][SPLIT_COLNAME].str.startswith('test')]\\\r\n .groupby(DATE_COL)\\\r\n .apply(calc_coef, TARGET, pred_colname)\r\n\r\n\r\n### plot the coef scores / print the hit rates ###\r\n\r\ntrain_era_scores = pd.DataFrame(train_era_scores, columns=['era_score']).assign(era='train')\r\nval_era_scores = pd.DataFrame(val_era_scores, columns=['era_score']).assign(era='val')\r\ntest_era_scores = pd.DataFrame(test_era_scores, columns=['era_score']).assign(era='test')\r\nera_scores = pd.concat([train_era_scores, val_era_scores, test_era_scores])\r\n\r\nfig = px.line(era_scores.reset_index(), x=\"date\", y=\"era_score\", line_group='era')\r\nfig.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Done!')","sub_path":"numerai_project/dev/scripts/evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115450309","text":"from scapy.all import *\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport math\n\n##############################################################################################\n## Exactly the same as DNSReqEntropy.py, except for the different packet capture used ... ####\n##############################################################################################\n\n# Entropy calculation function\n# H(x) = sum [p(x)*log(1/p)] for i occurrences of x\ndef CalcEntropy(myFreqDict):\n h = 0.0\n for aKey in myFreqDict:\n # Calculate probability of each even occurrence\n prob = myFreqDict[aKey]/sum(myFreqDict.values())\n # Entropy formula\n h += prob * math.log((1/prob),2)\n return h\n\n# Read from pcap file\n#pktcap = rdpcap(\"TestPcaps/BingSearchHTTP.pcapng\")\n#pktcap = rdpcap(\"TestPcaps/HTTP.pcap\")\n#pktcap = rdpcap(\"TestPcaps/Google_BBC_HTTP_over_DNS.pcapng\")\n#pktcap = rdpcap(\"TestPcaps/HTTP_Normal_Surf.pcapng\")\n#pktcap = rdpcap(\"TestPcaps/HTTPoverDNS.pcap\")\npktcap = rdpcap(\"TestPcaps/HTTPoverSSHoverDNS.pcap\")\n\n#Calculate byte/character entropy per packet if it is a DNS request packet (destport=53)\n\nperPktCharEntropySeq = [CalcEntropy(Counter(bytes(pkt[IP][UDP][DNS]))) for pkt in pktcap if DNS in pkt and pkt[UDP].dport==53]\nprint(\"Type: \", type(perPktCharEntropySeq))\nprint(\"Length: \", len(perPktCharEntropySeq))\n\n# Plot of Entropy Values\nplt.plot(perPktCharEntropySeq, color=\"red\", marker=\"+\", linestyle=\"None\")\n#plt.scatter(perPktCharEntropySeq) # missing 'y' value ... but actually it's the x value that we need\nplt.show()\n\n","sub_path":"HTTPoSSHoDNSReqEntrpy.py","file_name":"HTTPoSSHoDNSReqEntrpy.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154778572","text":"from dataclasses import dataclass\n\n\n@dataclass()\nclass NodeEventModel(object):\n\n def __init__(self, node_id, event_type, peer_ip=None, peer_port=None, timestamp=None, tx_sync_networks=None):\n self.node_id = node_id\n self.type = event_type\n self.peer_ip = peer_ip\n self.peer_port = peer_port\n self.timestamp = timestamp\n self.tx_sync_networks = tx_sync_networks\n\n\nclass NodeEventType(object):\n PEER_CONN_ERR = \"PEER_CONN_ERR\"\n PEER_CONN_ESTABLISHED = \"PEER_CONN_ESTABLISHED\"\n PEER_CONN_CLOSED = \"PEER_CONN_CLOSED\"\n ONLINE = \"ONLINE\"\n OFFLINE = \"OFFLINE\"\n SID_SPACE_FULL = \"SID_SPACE_FULL\"\n BLOCKCHAIN_NODE_CONN_ERR = \"BLOCKCHAIN_NODE_CONN_ERR\"\n BLOCKCHAIN_NODE_CONN_ESTABLISHED = \"BLOCKCHAIN_NODE_CONN_ESTABLISHED\"\n REMOTE_BLOCKCHAIN_CONN_ERR = \"REMOTE_BLOCKCHAIN_CONN_ERR\"\n REMOTE_BLOCKCHAIN_CONN_ESTABLISHED = \"REMOTE_BLOCKCHAIN_CONN_ESTABLISHED\"\n TX_SERVICE_FULLY_SYNCED = \"TX_SERVICE_FULLY_SYNCED\"\n TX_SERVICE_SYNCED_IN_NETWORK = \"TX_SERVICE_SYNCED_IN_NETWORK\"\n","sub_path":"src/bxcommon/models/node_event_model.py","file_name":"node_event_model.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103728423","text":"\"\"\"Data table build\n\nCustom data structure building for AI training.\n\nSee DataTable description.\n\nAuthor: kotsky\n\n\"\"\"\n\nimport helper_methods as helper\n\n\nclass DataTable:\n \"\"\"\n\n DataTable is a data structure, which is generated from only\n numerical table-like data in a way, that it has the following attributes:\n\n head - shows all head names of each column\n\n table - contains _DataColumn object in dict way:\n table: { \"Column_name1\": _DataColumn obj,\n \"Column_name2\": _DataColumn obj, ... }\n\n _DataColumn obj contains:\n - .data: list\n - .max: float\n - .min: float\n - .mean: float\n - .scale_value - after scaling\n\n features - list which contains pointers on table,\n which user picked up as his features for learning (our x)\n\n target - column of control value, desired outcome of our AI model, (our y)\n\n Methods to use:\n * open_file(file_path) - can read words as well, transferring to numerical value and doing mapping\n * activate_feature(feature_name: str) - let user decide which feature to use in training set\n * deactivate_feature(feature_name: str) - let user decide which feature to disable in training set\n * add_new_feature([feature_name1, feature_name2, ...]: list of str, power: optional float) -\n user is free to add more features. Combine new feature from presented or\n make in power \"power\" some feature. This power activate new feature to be\n used in training set by default.\n Example:\n - add_new_feature([\"feat1\", \"feat2\"]) -> new feature feat1*feat2 will be created and\n - added to main table as new column.\n - add_new_feature([\"feat1\"], 2) -> new feature = feat1^2\n * select_target(target_name) - let user decide which outcome parameter is desired\n * max_scaling() - do scaling -1 <= x <= 1 of our table itself\n * plot(axis_name1: str, axis_name2: str) - show some figures of target vs features dependencies\n * copy() - to duplicate whole entire data structure DataTable\n * split_data(training_coeff: float) - return pointers, which shows which part of data is used for training/\n testing/cross-validation\n * shuffle() - to shuffle data\n * .ROUND_AFTER_COMA: int - to set user's desired round value. Default = 4\n * get_training_data() - returns training features set (defined by the user) and target set as arrays\n * get_cv_data() - returns CV features set (defined by the user) and target set as arrays\n * get_testing_data() - returns training features set (defined by the user) and target set as arrays\n * get_labels() - return training set labels and target name in same order as training set was generated\n\n \"\"\"\n\n class _DataColumn:\n \"\"\"\n\n Additional entity to have a clear solution.\n Represent a column of a main data table.\n\n \"\"\"\n\n ROUND_AFTER_COMA = 2\n\n def __init__(self, _data, mean=0, _max=float(\"-inf\"),\n _min=float(\"inf\"), _is_scaled=False,\n _is_centred=False, _scale_value=None):\n self.data = _data\n self.mean = mean\n self.max = _max\n self.min = _min\n self._is_scaled = _is_scaled\n self._is_centred = _is_centred\n self.scaled_value = _scale_value\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n self.count = 0\n self.end_count = len(self)\n return self\n\n def __next__(self):\n if self.count > self.end_count:\n raise StopIteration\n else:\n self.count += 1\n return self.count - 1\n\n def copy(self):\n new_entity = DataTable._DataColumn(self.data.copy(), self.mean, self.max,\n self.min, self._is_scaled, self._is_centred,\n self.scaled_value)\n return new_entity\n\n def reset(self):\n \"\"\"\n Reset attributes\n :return: None\n \"\"\"\n self._is_scaled = False\n self._is_centred = False\n self.min = float(\"inf\")\n self.max = float(\"-inf\")\n self.mean = 0\n\n def attribute_calculation(self):\n \"\"\"\n Calculate attributes once having data\n :return: None\n \"\"\"\n self.reset()\n for number in self.data:\n self.min = min(self.min, number)\n self.max = max(self.max, number)\n self.mean += number\n self.mean = round((self.mean / len(self)), self.ROUND_AFTER_COMA)\n\n def scaling(self):\n \"\"\"\n Does data scaling based on max(abs(max), abs(min))\n to fit range -1 <= x <= 1.\n Also, does increasing to same range if\n data is too small.\n :return: None | scaled column of data\n \"\"\"\n if self._is_scaled is True:\n return\n scaling_coefficient = max(abs(self.max), abs(self.min))\n self.scaled_value = scaling_coefficient\n if self.scaled_value == 0:\n return\n for idx in range(len(self)):\n self.data[idx] = round((self.data[idx] / scaling_coefficient),\n self.ROUND_AFTER_COMA)\n # update min-max-mean\n self.attribute_calculation()\n self._is_scaled = True\n\n _data_is_scaled: bool\n\n _YES = 'y'\n _NO = 'n'\n _TRAINING = \"training\"\n _CV = \"cv\"\n _TESTING = \"testing\"\n\n # CONFIG\n ROUND_AFTER_COMA = 2 # 0.455555=> 0.4556\n\n def __init__(self, file_path=None):\n self.head = []\n self.table = {}\n self.file_path = file_path\n self.features = {}\n self.target = {}\n self.class_dict = {} # contains words indexes per column = [{ \"word\" : 0 }, { 0 : \"word\"}]\n self._data_is_scaled = False\n self._split_pointers = {self._TRAINING: [[0, 0], False],\n self._CV: [[0, 0], False],\n self._TESTING: [[0, 0], False]}\n if file_path is not None:\n self.open_table(file_path)\n\n def __repr__(self):\n if self.head:\n for name in self.head:\n label = ' '\n if name in self.features:\n label = \"(f) \" # feature label\n elif self.target is not None and name in self.target:\n label = \"(t) \" # target label\n print(name, end=label)\n print(\"\")\n print_pretty = \"Its shape is {}x{}\".format(len(self.head),\n len(self))\n else:\n print_pretty = \"There is no table available. Please, upload table.\"\n return print_pretty\n\n def __len__(self):\n return len(self.table[self.head[0]])\n\n def is_split(self) -> bool:\n \"\"\"\n Did we split our data on training and test sets?\n :return True if data was split:\n \"\"\"\n return self._split_pointers[self._TRAINING][1]\n\n def copy(self):\n \"\"\"\n Copy entire data structure with brand new data location.\n :return: DataTable structure with copied data\n \"\"\"\n\n def _deepcopy(tree: dict) -> dict:\n new_tree = {}\n for key, value in tree.items():\n new_value = value.copy()\n new_tree[key] = new_value\n return new_tree\n\n def _repointing(keys: list, new_table: dict) -> dict:\n main_tree = {}\n for key in keys:\n main_tree[key] = new_table[key]\n return main_tree\n\n new_structure = DataTable()\n new_structure.head = self.head.copy()\n new_structure.table = _deepcopy(self.table)\n new_structure.file_path = self.file_path\n new_structure.features = _repointing(list(self.features.keys()), new_structure.table)\n new_structure.target = _repointing(list(self.target.keys()), new_structure.table)\n new_structure._data_is_scaled = self._data_is_scaled\n new_structure._split_pointers = _deepcopy(self._split_pointers)\n new_structure.class_dict = self.class_dict.copy()\n return new_structure\n\n def open_table(self, file_path=None):\n \"\"\"\n Upload data to main memory\n :param file_path: file path in the project\n :return: uploaded data and generated DataTable\n \"\"\"\n\n def _remove_slash_n(string):\n \"\"\"\n :param string: \"something\\n\"\n :return: \"something\"\n \"\"\"\n string = string[:-1]\n return string\n\n def _split_clean(_line):\n \"\"\"\n :param _line: \"apple,orange,something\\n\"\n :return: [\"apple\", \"orange\", \"something\"]\n \"\"\"\n _line = _remove_slash_n(_line)\n _line_split = _line.split(',')\n return _line_split\n\n if not self.file_path:\n print(\"Specify file path\")\n return\n\n file = open(file_path, \"r\")\n lines = file.readlines()\n file.close()\n self._data_is_scaled = False\n self.head = _split_clean(lines[0])\n\n self.__create_data_structure(len(lines) - 1)\n\n line_idx = len(lines) - 1\n\n while len(lines) > 1:\n line = lines.pop()\n line_split = _split_clean(line)\n\n # ['2.3', '4'] =>[2.3, 4.0]\n for _idx in range(len(line_split)):\n try:\n line_split[_idx] = float(line_split[_idx])\n except:\n word = line_split[_idx]\n\n column_name = self.head[_idx]\n\n if column_name not in self.class_dict:\n self.class_dict[column_name] = [{\"_count\": 0}, {}]\n\n word_count = self.class_dict[column_name][0][\"_count\"]\n if word not in self.class_dict[column_name][0]:\n self.class_dict[column_name][0][word] = word_count\n self.class_dict[column_name][1][word_count] = word\n self.class_dict[column_name][0][\"_count\"] += 1\n\n line_split[_idx] = self.class_dict[column_name][0][word]\n word_flag = True\n\n line_split_float = line_split\n\n for idx in range(len(self.head)):\n name_col = self.head[idx]\n number = line_split_float[idx]\n column = self.table[name_col]\n column.data[line_idx - 1] = number\n column.min = min(column.min, number)\n column.max = max(column.max, number)\n column.mean += number\n\n line_idx -= 1\n # do mean calculation\n for column_name in self.head:\n column = self.table[column_name]\n column.mean = round(column.mean / len(column), self.ROUND_AFTER_COMA)\n\n def _get_split_pointers(self):\n \"\"\"\n Get set of split pointers.\n :return: cv_flag shows if we have cross-validation set,\n also, returns set of pointers\n \"\"\"\n\n if self.is_split() is False:\n print(\"Data is not split. Prepare data first.\")\n return\n\n cv_flag = self._split_pointers[self._CV][1]\n if cv_flag is True:\n set_of_pointers = [self._split_pointers[self._TRAINING][0],\n self._split_pointers[self._CV][0],\n self._split_pointers[self._TESTING][0]]\n else:\n set_of_pointers = [self._split_pointers[self._TRAINING][0],\n self._split_pointers[self._TESTING][0]]\n return cv_flag, set_of_pointers\n\n def _get_training_pointers(self):\n return self._split_pointers[self._TRAINING][0] if self._split_pointers[self._TRAINING][1] else -1\n\n def _get_cv_pointers(self):\n return self._split_pointers[self._CV][0] if self._split_pointers[self._CV][1] else -1\n\n def _get_testing_pointers(self):\n return self._split_pointers[self._TESTING][0] if self._split_pointers[self._TESTING][1] else -1\n\n def _generate_data_by_pointers(self, mode=None):\n \"\"\"\n Generate a brand new data sets for further usage\n :param mode: training / cv / testing as string\n :return: features set, target set as arrays of data\n \"\"\"\n if mode == self._TESTING:\n set_of_pointers = self._get_testing_pointers()\n elif mode == self._CV:\n set_of_pointers = self._get_cv_pointers()\n else:\n set_of_pointers = self._get_training_pointers()\n\n if set_of_pointers == -1:\n print(\"There is no such data generated. Split first\")\n return\n if not self.features or not self.target:\n print(\"Data is not ready. Add features and target\")\n return\n start_p, end_p = set_of_pointers # inclusively\n features_set = []\n target_set = []\n target_name = self._get_target_name()\n for line_idx in range(start_p, end_p + 1):\n features_line = []\n for feature_name in self.features:\n features_line.append(self.features[feature_name].data[line_idx])\n features_set.append(features_line)\n target_set.append(self.target[target_name].data[line_idx])\n return features_set, target_set\n\n def get_training_data(self) -> (list, list):\n \"\"\"\n Generate training set from defined features and target\n :return: features set, target set as arrays\n \"\"\"\n return self._generate_data_by_pointers()\n\n def get_cv_data(self) -> (list, list):\n \"\"\"\n Generate cross-validation set from defined features and target\n :return: features set, target set as arrays\n \"\"\"\n return self._generate_data_by_pointers(mode=\"cv\")\n\n def get_testing_data(self) -> (list, list):\n \"\"\"\n Generate testing set from defined features and target\n :return: features set, target set as arrays\n \"\"\"\n return self._generate_data_by_pointers(mode=\"testing\")\n\n def get_labels(self) -> (str, str):\n \"\"\"\n\n :return: training set labels in same order as training set and target name\n \"\"\"\n if not self.features or not self.target:\n print(\"Define training set first. Add features and target\")\n return\n features_label = []\n for feature_name in self.features:\n features_label.append(feature_name)\n target_name = self._get_target_name()\n return features_label, target_name\n\n def get_column_data(self, column_name: str) -> list:\n \"\"\"\n Get data from the given column\n :param column_name: name from data table\n :return: list of data\n \"\"\"\n\n if column_name not in self.head:\n return []\n\n return self.table[column_name].data\n\n def plot(self, parameter1=None, parameter2=None,\n features2target=False, all2target=False,\n classifier=None) -> None:\n \"\"\"\n Plot 2D pictures.\n :param classifier: mark each dot as its class with a color\n :param parameter1: axis 1 column name\n :param parameter2: axis 2 column name\n :param features2target: plot all features to target\n :param all2target: plot all to target\n :return: figures 2D\n \"\"\"\n\n if all2target is True:\n if not self.target:\n print(\"There is no defined target. Please, select one\")\n return\n target_name = self._get_target_name()\n for column_name in self.head:\n if column_name in self.target:\n continue\n self._plot2d_helper(column_name, target_name, \"blue\")\n\n elif features2target is True:\n if not self.target:\n print(\"There is no defined target. Please, select one\")\n return\n target_name = self._get_target_name()\n for feature_name in self.features:\n if feature_name in self.target:\n continue\n self._plot2d_helper(feature_name, target_name, \"red\")\n\n elif parameter1 is not None and parameter2 is not None:\n if parameter1 in self.table and parameter2 in self.table:\n if classifier is None:\n self._plot2d_helper(parameter1, parameter2, \"green\")\n else:\n self._plot2d_helper_with_classifier(parameter1, parameter2, classifier)\n\n # if additional_to_draw is not None:\n # for function in additional_to_draw:\n # function()\n\n def _plot2d_helper_with_classifier(self, parameter1, parameter2, classifier):\n \"\"\"Draw a picture of 2 features, where data is market per its target class\"\"\"\n\n def _define_class_map(column_name: str, main_structure: DataTable):\n class_map = []\n if column_name in main_structure.class_dict:\n idx2class = main_structure.class_dict[column_name][1]\n for key in idx2class:\n word = idx2class[key]\n class_map.append(\"{} - {}\".format(key, word))\n return class_map if len(class_map) > 0 else ''\n\n import matplotlib.pyplot as plt\n\n parameter1_data = self.table[parameter1].data\n parameter2_data = self.table[parameter2].data\n\n target_name = classifier\n target_data = self.table[target_name].data\n\n target_class_map = self.class_dict[target_name][1]\n\n m_rows = len(parameter1_data)\n\n # class_colours = ['b', 'g', 'r']\n\n truth = [False, False, False]\n\n for m_idx in range(m_rows):\n f1 = parameter1_data[m_idx]\n f2 = parameter2_data[m_idx]\n t = target_data[m_idx]\n if t == 0:\n colour = 'b'\n label_ = 'Class 0 as ' + str(target_class_map[t])\n p = 0\n elif t == 1:\n colour = 'r'\n label_ = 'Class 1 as ' + str(target_class_map[t])\n p = 1\n else:\n colour = 'g'\n label_ = 'Class is undefined'\n p = 2\n\n if (p == 0 and not truth[p]) or (p == 1 and not truth[p]) or (p == 2 and not truth[p]):\n plt.scatter(f1, f2, color=colour, label=label_)\n truth[p] = True\n else:\n plt.scatter(f1, f2, color=colour)\n\n plt.title(target_name + ' vs ' + parameter1 + ' & ' + parameter2)\n plt.legend(loc='best')\n\n parameter1_classification = _define_class_map(parameter1, self)\n parameter2_classification = _define_class_map(parameter2, self)\n\n plt.xlabel(parameter1 + ' ' + str(parameter1_classification))\n plt.ylabel(parameter2 + ' ' + str(parameter2_classification))\n plt.show()\n\n def _get_target_name(self) -> str:\n target_name = list(self.target.keys())[0]\n return target_name\n\n def split_data(self, training_size: float, cv_size=None, shuffle=False) -> None:\n \"\"\"\n Split data according to user's preferences.\n :param training_size: 0.3 - 0.9 desired part of data to use for AI training\n :param cv_size: cross-validation data part to test different algorithms\n :param shuffle: do we want to shuffle first? True/False\n :return: assigned pointers is self._split_pointers which shows how the data\n is split on training/cv/testing sets. This is nice to do instead of\n copying data\n \"\"\"\n\n if not 0.3 <= training_size <= 0.9 or ((cv_size is not None) and (training_size + cv_size) >= 0.95):\n print(\"Wrong train-test-cv attitude\")\n return None\n\n if shuffle is True:\n self.shuffle()\n\n m = len(self)\n\n tr_p_st = 0\n tr_p_end = int(m * training_size)\n self._split_pointers[self._TRAINING] = [[tr_p_st, tr_p_end], True]\n\n ts_p_st = tr_p_end + 1\n ts_p_end = m - 1\n\n if cv_size is not None:\n cv_part = int(cv_size * m)\n cv_p_st = tr_p_end + 1\n cv_p_end = cv_p_st + cv_part\n self._split_pointers[self._CV] = [[cv_p_st, cv_p_end], True]\n ts_p_st = cv_p_end + 1\n\n self._split_pointers[self._TESTING] = [[ts_p_st, ts_p_end], True]\n\n if cv_size is not None:\n print(\"Data was split as follows: {} training set, {} cross-validation set and {} testing set\".\n format(training_size, cv_size, (1 - training_size - cv_size)))\n else:\n print(\"Data was split as follows: {} training set and {} testing set\".\n format(training_size, 1 - training_size))\n\n def shuffle(self) -> None:\n \"\"\"\n Random data shuffle.\n :return: shuffled data\n \"\"\"\n\n def _swap(array, idx1, idx2):\n array[idx1], array[idx2] = array[idx2], array[idx1]\n\n import random\n for idx in range(len(self)):\n random_idx1 = random.randint(0, len(self) - 1)\n random_idx2 = random.randint(0, len(self) - 1)\n while random_idx1 == random_idx2:\n random_idx2 = random.randint(0, len(self) - 1)\n for column_name in self.head:\n column_obj = self.table[column_name]\n _swap(column_obj.data, random_idx1, random_idx2)\n print(\"Shuffle was done\")\n\n def add_new_feature(self, features, power=None) -> None:\n \"\"\"\n Add new desired feature to use.\n :param features: feature str or list of features from main table, which user might\n want to combine to create a new feature like x3 = x1 * x2,\n where x3 - new feature, x1 and x2 - features from main table\n :param power: specific command to perform:\n if None -> new_feature = features[0] * features[1] * ...\n if 0.5 -> new_feature = sqrt(features[0]) for feature[0] >= 0\n if positive int -> new_feature = pow(features[0], command)\n :return: new column of added feature as native one\n \"\"\"\n\n def _validate_feature_name(_name: str, _head: dict):\n return _name in _head\n\n if type(features) == str:\n features = [features]\n\n if features is None:\n print(\"Type features' names in a list format\")\n return\n\n new_feature_name = ''\n new_column_obj = None\n\n if power is None:\n _validation_check = False\n for feature_name in features:\n if not _validate_feature_name(feature_name, self.table):\n proposed_feature_name = helper.check_spelling_helper(feature_name, self.head)\n user_input = self.__user_confirmation(feature_name, proposed_feature_name)\n if user_input[0].lower() == self._YES:\n\n feature_name = proposed_feature_name\n else:\n print(\"Skip {} feature\".format(feature_name))\n continue\n\n if new_column_obj is None:\n new_column_obj = self.table[feature_name].copy()\n new_column_obj.reset()\n else:\n _validation_check = True\n new_data = self.table[feature_name]\n for idx in range(len(new_column_obj)):\n new_column_obj.data[idx] = round((new_column_obj.data[idx] + new_data.data[idx]),\n self.ROUND_AFTER_COMA)\n\n new_feature_name += \"*\" + feature_name if len(new_feature_name) > 0 else feature_name\n\n if new_column_obj is not None and _validation_check is True:\n self._add_feature_helper(new_feature_name, new_column_obj)\n else:\n if _validation_check is False:\n print(\"We cannot create same feature as we have in our main table\")\n else:\n print(\"Please, write write power input\")\n else:\n if power <= 0:\n print(\"Set write power as a positive number\")\n return\n feature_name = features[0]\n new_feature_name = feature_name + '^' + \"({})\".format(power)\n new_column_obj = self.table[feature_name].copy()\n for idx in range(len(new_column_obj)):\n new_column_obj.data[idx] = round(pow(new_column_obj.data[idx], power),\n self.ROUND_AFTER_COMA)\n self._add_feature_helper(new_feature_name, new_column_obj)\n\n def max_scaling(self, column_name=None) -> None:\n \"\"\"\n Min-Max scaling of assigned column or all table.\n :param column_name: string column name which we want to scale\n :return: None\n \"\"\"\n if column_name is not None:\n column = self.table[column_name]\n column.scaling()\n print(\"Column {} was scaled\".format(column_name))\n else:\n if self._data_is_scaled is True:\n return\n self._data_is_scaled = True\n for column_name in self.table:\n column = self.table[column_name]\n column.scaling()\n print(\"Column {} was scaled\".format(column_name))\n\n def deactivate_feature(self, feature_name):\n \"\"\"\n Remove feature from the training set.\n :param feature_name: feature name as a string or list of strings\n :return: None\n \"\"\"\n\n def _validate_feature_name(_name: str, _head: dict):\n return _name in _head\n\n if type(feature_name) == list:\n for internal_feature_name in feature_name:\n self.deactivate_feature(internal_feature_name)\n return\n\n if _validate_feature_name(feature_name, self.table):\n if feature_name in self.features:\n del self.features[feature_name]\n print(\"Feature {} was disabled from the training set\".format(feature_name))\n else:\n proposed_name = helper.check_spelling_helper(feature_name, self.head)\n if proposed_name is not None:\n print(\"You made a typo mistake. Did you mean {}?\".format(proposed_name))\n print(\"Type y/n\")\n user_input = input()\n if user_input[0].lower() == self._YES:\n self.deactivate_feature(proposed_name)\n else:\n print(\"Nothing was done\")\n\n def select_target(self, target_name: str):\n \"\"\"\n Select target to be used from self.table for AI.\n :param target_name: target name per table as string\n :return: None\n \"\"\"\n\n if not len(self.target):\n self.activate_features(target_name, is_target=True)\n else:\n if not target_name == self._get_target_name():\n print(\"Do you want to replace existed {} target? Enter y/n\".format(self.target))\n user_input = input()\n if user_input[0].lower() == self._YES:\n self.activate_features(target_name, is_target=True)\n\n def activate_features(self, feature_name, is_target=False) -> None:\n \"\"\"\n Select feature to be used from self.table for AI.\n :param feature_name: feature name per table as string or list of features string\n :param is_target: are we setting feature or target?\n :return: None\n \"\"\"\n\n def _validate_feature_name(_name: str, _head: dict):\n return _name in _head\n\n if type(feature_name) == list:\n for internal_feature_name in feature_name:\n self.activate_features(internal_feature_name)\n return\n\n if _validate_feature_name(feature_name, self.table):\n if is_target is False:\n self.features[feature_name] = self.table[feature_name]\n print(\"Feature {} was added\".format(feature_name))\n else:\n self.target[feature_name] = self.table[feature_name]\n print(\"Target {} was added\".format(feature_name))\n else:\n proposed_name = helper.check_spelling_helper(feature_name, self.head)\n if proposed_name is not None:\n print(\"You made a typo mistake in {}. Did you mean {}?\".format(feature_name, proposed_name))\n print(\"Type y/n\")\n user_input = input()\n if user_input[0].lower() == self._YES:\n self.activate_features(proposed_name, is_target)\n else:\n print(\"There is no table. Upload it before this operation\")\n\n def get_min_max_features(self) -> list:\n \"\"\"Return min-max values of each feature columns in list [min, max] format\"\"\"\n if not self.features:\n print(\"There are no defined features\")\n return []\n storage = [[0 for x in range(2)] for y in range(len(self.features))]\n for idx, key in enumerate(self.features):\n column = self.features[key]\n storage[idx] = [column.min, column.max]\n return storage\n\n def _plot2d_helper(self, axis1: str, axis2: str, colour: str) -> None:\n \"\"\"\n Actual 2D plot.\n :param axis1: column name\n :param axis2: column name\n :param colour: \"blue\", \"green\", etc.\n :return: pictures\n \"\"\"\n import matplotlib.pyplot as plt\n plt.scatter(self.table[axis1].data, self.table[axis2].data, color=colour)\n plt.xlabel(axis1)\n plt.ylabel(axis2)\n plt.show()\n\n def _add_feature_helper(self, new_feature_name: str, new_column_obj: _DataColumn) -> None:\n \"\"\"\n Add feature column to main data table.\n :param new_feature_name: its feature name as a string\n :param new_column_obj: _DataColumn obj with data\n :return: new features in main data\n \"\"\"\n new_column_obj.attribute_calculation()\n self.table[new_feature_name] = new_column_obj\n self.head.append(new_feature_name)\n self.features[new_feature_name] = new_column_obj\n print(\"New created feature {} was added\".format(new_feature_name))\n print(\"This {} feature is added to the list of training set\".format(new_feature_name))\n self._data_is_scaled = False\n\n def __create_data_structure(self, m: int) -> None:\n for name in self.head:\n self.table[name] = self._DataColumn(m * [0])\n self.table[name].ROUND_AFTER_COMA = self.ROUND_AFTER_COMA\n\n @staticmethod\n def __user_confirmation(word: str, proposed_word: str):\n \"\"\"\n Script to ask user if proposed word is ok or not.\n :param word:\n :param proposed_word:\n :return:\n \"\"\"\n if proposed_word is None:\n print(\"There is no table. Please, provide it first\")\n return\n print(\"You made a typo mistake in {}. Did you mean {}?\".format(word, proposed_word))\n print(\"Type y/n\")\n user_input = input()\n return user_input\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":31621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"531476759","text":"# D4 Sökning och Sortering\r\n\r\n#Del 1 - olika algoritmer löser samma problem\r\nimport timeit\r\nimport random\r\n\r\n\r\n\r\nclass Track():\r\n ''' Klass som representerar en låt med attributen, id, track_time, artist_name song_title '''\r\n def __init__(self, track_id, track_time, artist_name,song_title):\r\n\r\n self.track_id = track_id\r\n self.track_time = track_time\r\n self.artist_name = artist_name\r\n self.song_title = song_title\r\n\r\n\r\n def __lt__(self,other):\r\n '''metod som jämför om attributet atrist_name är mindre mellan 2 objekt. '''\r\n if self.artist_name < other.artist_name:\r\n return True\r\n\r\n else:\r\n return False\r\n\r\n\r\n def __repr__(self):\r\n return (self.track_id + ' ' + self.track_time + ' ' + self.artist_name + ' ' + self.song_title)\r\n \r\n \r\n\r\n\r\n\r\n'''läser in filen och lagrar varje låts attribut till Track-objekt och lägger in alla objekt i lista.\r\n UT: lista där varje element i listan är ett objekt.'''\r\ndef readfile():\r\n track_file=open('unique_tracks.txt','r',encoding='utf-8')\r\n tracks= track_file.readlines()\r\n TrackList_object=[]\r\n\r\n for i in tracks:\r\n i=i.strip()\r\n splitting_line= i.split('')\r\n track_id = splitting_line[0]\r\n track_time = splitting_line[1]\r\n artist_name = splitting_line[2]\r\n song_title = splitting_line[3]\r\n\r\n TrackList_object.append(Track(track_id ,track_time ,artist_name , song_title))\r\n\r\n track_file.close()\r\n\r\n return TrackList_object\r\n\r\n\r\n\r\n\r\n''' Definerar min hashtabell och lagrar varje objekt i en dictionary med artist_name som nyckel.\r\n UT: myDict(hastabell) med artist_name som nyckel och objekt som value.'''\r\ndef dictionary():\r\n\r\n myDict={}\r\n TrackList_object=readfile()\r\n for Object in TrackList_object:\r\n myDict[Object.artist_name]=[Object]\r\n return myDict\r\n\r\n#Tidtagning dictionary\r\ndef search_dictionary(myDict,artist):\r\n return myDict[artist]\r\n\r\n\r\n#myDict= dictionary()\r\n#random_track= random.choice(readfile())\r\n#random_artist= random_track.artist_name \r\n#time_dic=timeit.timeit(stmt= lambda: search_dictionary(myDict,random_artist) ,number=100)\r\n#print('tiden att hitta element i dictionary är ', round(time_dic,9),' sekunder')\r\n\r\n\r\n\r\n\r\n''' Tidtagning: nedanstående del är funktioner som besvarar delfrågorna under Delen Tidtagning\"'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#1: linjärsökning O(n) i värsta fallet\r\n\r\n\r\n\r\ndef sorted_list(TrackList_object):\r\n '''linjärsökning i sorterad lista som hittar det näst sista elementet.\r\n returnerar det näst sista objektets artist_name''' \r\n TrackList_object.sort()\r\n\r\n for i in range(len(TrackList_object)):\r\n if TrackList_object[i].artist_name== TrackList_object[-2].artist_name:\r\n\r\n return TrackList_object[i].artist_name\r\n\r\n\r\n\r\ndef unsorted_list(TrackList_object,penultimate_artist):\r\n '''linjärsökning i osorterad lista, returnerar artist_name för näst sista elementet ''' \r\n for i in TrackList_object:\r\n if i.artist_name== penultimate_artist:\r\n return penultimate_artist\r\n \r\n\r\n\r\n\r\n# Linjärsökning, sorterad och osorterad lista\r\ndef linear_search():\r\n TrackList_object=readfile()\r\n TrackList_object.sort()\r\n penultimate_artist= sorted_list(TrackList_object)\r\n \r\n \r\n #sorterad lista\r\n lintime_sort= timeit.timeit(stmt=lambda: sorted_list(TrackList_object),number=1)\r\n\r\n #osorterad lista\r\n lintime_unsort= timeit.timeit(stmt=lambda: unsorted_list(TrackList_object,penultimate_artist),number=1)\r\n\r\n print('linjärsökning sorterad lista tog:', round(lintime_sort,4), 'sekunder')\r\n print('linjärsökning osorterad lista tog:', round(lintime_unsort,4), 'sekunder')\r\n\r\n\r\nlinear_search()\r\n\r\n\r\n\r\n#2 Mergesort\r\n\r\n\r\ndef mergeSort(alist):\r\n # print(\"Splitting \",alist)\r\n if len(alist)>1:\r\n mid = len(alist)//2\r\n lefthalf = alist[:mid]\r\n righthalf = alist[mid:]\r\n\r\n mergeSort(lefthalf)\r\n mergeSort(righthalf)\r\n\r\n i=0\r\n j=0\r\n k=0\r\n while i < len(lefthalf) and j < len(righthalf):\r\n if lefthalf[i] <= righthalf[j]:\r\n alist[k]=lefthalf[i]\r\n i=i+1\r\n else:\r\n alist[k]=righthalf[j]\r\n j=j+1\r\n k=k+1\r\n\r\n while i < len(lefthalf):\r\n alist[k]=lefthalf[i]\r\n i=i+1\r\n k=k+1\r\n\r\n while j < len(righthalf):\r\n alist[k]=righthalf[j]\r\n j=j+1\r\n k=k+1\r\n #print(\"Merging \",alist)\r\n\r\n\r\nalist = [54,26,93,17,77,31,44,55,20]\r\nmergeSort(alist)\r\n#print(alist)\r\n\r\n#merge_time= timeit.timeit(stmt=lambda: mergeSort(alist),number=100)\r\n#print('sortering med mergesort tog:', round(merge_time,9), 'sekunder')\r\n\r\n \r\n\r\n\r\n''' 4: Linjärsökning osorterad lista 1000 slumpade element redovisa genomsnittet. '''\r\n\r\n\r\ndef linear_random():\r\n lintime_random=0\r\n TrackList_object=readfile()\r\n random_object= random.choice(TrackList_object)\r\n for i in range(1000): \r\n random_name= random_object.artist_name\r\n\r\n lintime_random+= timeit.timeit(stmt=lambda: unsorted_list(TrackList_object, random_name),number=1)\r\n print('snitt tiden för linjärsökning med slumpad element tog ', round(lintime_random/1000,4), 'sekunder')\r\n\r\n#linear_random()\r\n \r\n\r\n''' 5: Binärsökning i sorterad lista O(log2n)'''\r\n\r\ndef binary_search(TrackList_object, artist):\r\n low = 0\r\n high = len(TrackList_object)-1\r\n found = False\r\n\r\n while low <= high and not found:\r\n middle = (low + high)//2\r\n if TrackList_object[middle].artist_name == artist:\r\n found = True\r\n else:\r\n if artist < TrackList_object[middle].artist_name:\r\n high = middle - 1\r\n else:\r\n low = middle + 1\r\n return found\r\n\r\n\r\ndef binary_time():\r\n TrackList_object=readfile()\r\n artist_object= random.choice(TrackList_object)\r\n artist_name= artist_object.artist_name\r\n bintime= timeit.timeit(stmt=lambda: binary_search(TrackList_object, artist_name),number=100)\r\n print('binärsökning tog ', round(bintime,9), 'sekunder')\r\n\r\nbinary_time()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Algortihms and Data Structures/assignments in Python/D4.py","file_name":"D4.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"282566466","text":"from ebooklib import epub\n\ndef make_epubs(posts, lang, dest, author, basename=None):\n\n book = epub.EpubBook()\n\n # add metadata\n book.set_title(author)\n book.set_language(lang)\n\n book.add_author(author)\n\n chapters = []\n\n for post in posts:\n c1 = epub.EpubHtml(title=post.title(lang=lang), file_name='%s.xhtml' % post.meta[lang]['slug'])\n c1.content=u'

%s

%s' % (post.title(lang=lang), post.text(lang=lang, show_read_more_link=False))\n book.add_item(c1)\n chapters.append(c1)\n\n\n book.toc = tuple(chapters)\n\n # add navigation files\n book.add_item(epub.EpubNcx())\n book.add_item(epub.EpubNav())\n\n\n # define css style\n style = '''\n@namespace epub \"http://www.idpf.org/2007/ops\";\n\nbody {\nfont-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif;\n}\n\nh2 {\ntext-align: left;\ntext-transform: uppercase;\nfont-weight: 200; \n}\n\nol {\n list-style-type: none;\n}\n\nol > li:first-child {\n margin-top: 0.3em;\n}\n\n\nnav[epub|type~='toc'] > ol > li > ol {\nlist-style-type:square;\n}\n\n\nnav[epub|type~='toc'] > ol > li > ol > li {\n margin-top: 0.3em;\n}\n\n'''\n\n # add css file\n nav_css = epub.EpubItem(uid=\"style_nav\", file_name=\"style/nav.css\", media_type=\"text/css\", content=style)\n book.add_item(nav_css)\n\n # create spine\n if(len(posts) > 1):\n book.spine = ['nav'] + chapters\n else:\n book.spine = chapters\n\n #output_path = os.path.join(output_folder,\n # self.site.path(\"epub_dir\", None, lang))\n # create epub file\n epub.write_epub(dest, book, {})\n\n return dest\n\n","sub_path":"plugins/posts_epub/posts_epub/epub_utils.py","file_name":"epub_utils.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"61785027","text":"\"\"\"This file is a Expr3ss spider created on top of the ATSSpider\nscrapy crawl expr3ss -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://aplus.expr3ss.com/home\"\nsample url:\n https://aplus.expr3ss.com/home\n https://authentics.expr3ss.com/home\n https://accorvacationclub.expr3ss.com/home\n https://ngclubs.expr3ss.com/home\n\"\"\"\nfrom urlparse import urljoin\nfrom re import compile\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, ConvertDateString, RemoveBadElements, HtmlFormatter\n\n\nclass Expr3ss(ATSSpider):\n\n name = 'expr3ss'\n company_reg = compile('//(.*?)\\.')\n location_reg = compile('mapCenter:\\s\\'(.*?)\\'')\n ref_reg = compile('selectJob=(\\d+)&')\n company = \"\"\n\n def __init__(self, *args, **kwargs):\n super(Expr3ss, self).__init__(*args, **kwargs)\n company_reg_res = self.company_reg.search(kwargs[\"url\"])\n if company_reg_res:\n self.company = company_reg_res.group(1)\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\"//tbody[contains(@class,'panel')]/tr\")\n for job in jobs:\n job_link = job.xpath(\"./td[@class='center']/a/@href\").extract()\n if job_link:\n job_url = urljoin(response.url, job_link[0])\n meta = {\n \"title\": job.xpath(\n \"./td[@class='jobTitle']/div[contains(@class,'jobdescription')]/text()\"\n ).extract(),\n \"date\": job.xpath(\n \"./td[contains(@class,'date')]/text()\"\n ).extract(),\n }\n yield Request(\n job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('company', self.company)\n loader.add_value('url', response.url)\n loader.add_value('apply_url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value(\n 'date', response.meta['date'], ConvertDateString(\"%d %b %Y\")\n )\n loader.add_xpath(\n 'location',\n \"//script[contains(text(),'mapCenter')]/text()\",\n re=self.location_reg\n )\n loader.add_value(\n 'referencenumber', response.url,\n Prefix(self.name+\"-\"+self.company+\"-\"), re=self.ref_reg\n )\n\n desc_xpaths = [\n \"//td[contains(@class,'adCopy')]/node()\",\n \"//table[@id='jobAdTemplate']/tr[td[not(@id='applyMethod')]]\"\n ]\n for xp in desc_xpaths:\n loader.add_xpath(\n 'description', xp, RemoveBadElements(['img']), HtmlFormatter()\n )\n if loader.get_output_value('description'):\n break\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/expr3ss.py","file_name":"expr3ss.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341206551","text":"#!/usr/bin/env python3\n\nimport re\n\n\nclass Exercise(object):\n def __init__(self, path):\n self.id = re.search('/([0-9]+)/', path).groups(1)[0]\n self.author = None\n with open(path) as exercise_file:\n self.lines = exercise_file.readlines()\n self.parse()\n\n def parse(self):\n self.title = self.lines[0][1:].strip()\n self.introduces = []\n for line in self.lines:\n if line.startswith('Author(s): '):\n self.author = line[len('Author(s): '):].strip()\n if line.startswith('Introduces: '):\n for introduce in line[12:].strip().split(','):\n self.introduces.append(introduce.strip().strip('.'))\n\n def as_dict(self, full=False):\n obj = {'id': self.id,\n 'title': self.title,\n 'introduces': self.introduces}\n if self.author is not None:\n obj['author'] = self.author\n if full:\n obj['text'] = '\\n'.join(self.lines)\n return obj\n","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"542539657","text":"# get the information from uq calender\nfrom pyquery import PyQuery as jquery\nimport re\nimport MySQLdb\n\n\nconnection = MySQLdb.connect('localhost', 'root', '19941005', 'uq_receptionist')\nconnection.set_character_set('utf8')\nconnection.cursor().execute('SET NAMES utf8;')\nconnection.cursor().execute('SET CHARACTER SET utf8;')\nconnection.cursor().execute('SET character_set_connection=utf8;')\n\n\n# clean the tag in html\ndef clean_text(raw_html):\n cleaner = re.compile('<.*?>')\n cleantext = re.sub(cleaner, '', raw_html)\n cleantext = cleantext.replace('\\n', ' ')\n cleantext = re.sub(' +', ' ', cleantext)\n return cleantext\n\n\ndef retrieve_answer_page(answerPage):\n question = answerPage.find('.rn_DataValue').eq(0)\n answer = answerPage.find('.rn_DataValue').eq(1)\n question = clean_text(question.html())\n answer = clean_text(answer.html())\n # for child in answer.items():\n print(question)\n print(answer)\n connection.cursor().execute('''INSERT into general_question (question, answer)\n values (%s, %s)''', (question, answer))\n connection.commit()\n # if child.tag == 'p':\n # print('Node is a p')\n # print(child.text.replace('', '').replace('', ''))\n # elif child.tag == 'blockquote':\n # print('Node is blockquote')\n # print(child.children())\n\n\n\n# retrieve the information in calender page\ndef retrieve_page(qaPage):\n baseurl = 'https://uqfuture.custhelp.com'\n listHolder = qaPage.find('.rn_Content')\n all_link = listHolder.find('a')\n for li in all_link:\n if li.text:\n url = baseurl + li.attrib['href']\n answerPage = jquery(url=url)\n retrieve_answer_page(answerPage=answerPage)\n\n\n# start the program\ndef retrieve_pages():\n max = 43\n baseurl = \"https://uqfuture.custhelp.com/app/answers/list/st/4/page/\"\n index = 1\n while(index <= max):\n print(index)\n url = baseurl + str(index)\n page = jquery(url=url)\n retrieve_page(page)\n index += 1\n\nretrieve_pages()","sub_path":"crawler/fetchQA.py","file_name":"fetchQA.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233652261","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 21 13:02:19 2018\n\n@author: dittaya\n\"\"\"\n\nfrom Tree import ArrayBinaryTree\nfrom Tree import LinkedBinaryTree\nfrom LinkedBasedDS import LinkedQueue, PositionalList\n\ndef depth_sum(tree):\n queue = LinkedQueue()\n \n if len(tree) == 0:\n return None\n else:\n root = tree.root()\n ans = PositionalList()\n ans.add_first(0)\n queue.enqueue((0, root))\n while not queue.is_empty():\n level, pos = queue.dequeue()\n if len(ans)-1 == level:\n last = ans.last()\n oldval = last.element()\n ans.replace(last, oldval+pos.element())\n# ans[-1] += tree._data[pos] # for array\n else:\n ans.add_last(pos.element()) # for linked list\n# ans.append(tree._data[pos]) # for array\n for child in tree.children(pos):\n queue.enqueue((level+1, child))\n \n return [item for item in ans]\n \n\n\nif __name__ == '__main__':\n# tree = ArrayBinaryTree()\n tree = LinkedBinaryTree()\n root = tree.add_root(2)\n n1 = tree.add_left(root, 5)\n tree.add_left(n1, -4)\n n1 = tree.add_right(n1, 1)\n tree.add_left(n1, 8)\n n1 = tree.add_right(n1, 4)\n tree.add_left(n1, 7)\n tree.add_right(n1, 2)\n n1 = tree.add_right(root, 7)\n n1 = tree.add_right(n1, 0)\n tree.add_left(n1, -2)\n n1 = tree.add_right(n1, 6)\n tree.add_left(n1, 3)\n \n for i in tree:\n print(i)\n \n print(depth_sum(tree))\n","sub_path":"Data/src/aj_code/Lab07-1-DepthSum.py","file_name":"Lab07-1-DepthSum.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88752148","text":"# coding=utf-8\n\"\"\"\nremind.py - Sopel Reminder Plugin\nCopyright 2011, Sean B. Palmer, inamidst.com\nCopyright 2019, dgw, technobabbl.es\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport collections\nfrom datetime import datetime\nimport io # don't use `codecs` for loading the DB; it will split lines on some IRC formatting\nimport logging\nimport os\nimport re\nimport time\n\nimport pytz\n\nfrom sopel import plugin, tools\nfrom sopel.tools.time import format_time, get_timezone, validate_timezone\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef get_filename(bot):\n \"\"\"Get the remind database's filename\n\n :param bot: instance of Sopel\n :type bot: :class:`sopel.bot.Sopel`\n :return: the remind database's filename\n :rtype: str\n\n The remind database filename is based on the bot's nick and its\n configured ``core.host``, and it is located in the ``bot``'s ``homedir``.\n \"\"\"\n name = bot.config.basename + '.reminders.db'\n return os.path.join(bot.config.core.homedir, name)\n\n\ndef load_database(filename):\n \"\"\"Load the remind database from a file\n\n :param str filename: absolute path to the remind database file\n :return: a :class:`dict` of reminders stored by timestamp\n :rtype: dict\n\n The remind database is a plain text file (utf-8 encoded) with tab-separated\n columns of data: time, channel, nick, and message. This function reads this\n file and outputs a :class:`dict` where keys are the timestamps of the\n reminders, and values are list of 3-value tuple of reminder data:\n ``(channel, nick, message)``.\n\n .. note::\n\n This function ignores microseconds from the timestamp, if any, meaning\n that ``523549800.245`` will be read as ``523549800``.\n\n .. note::\n\n If ``filename`` is not an existing file, this function returns an\n empty :class:`dict`.\n\n \"\"\"\n if not os.path.isfile(filename):\n # no file to read\n return {}\n\n data = {}\n with io.open(filename, 'r', encoding='utf-8') as database:\n for line in database:\n unixtime, channel, nick, message = line.split('\\t', 3)\n message = message.rstrip('\\n')\n timestamp = int(float(unixtime)) # ignore microseconds\n reminder = (channel, nick, message)\n try:\n data[timestamp].append(reminder)\n except KeyError:\n data[timestamp] = [reminder]\n return data\n\n\ndef dump_database(filename, data):\n \"\"\"Dump the remind database into a file\n\n :param str filename: absolute path to the remind database file\n :param dict data: remind database to dump\n\n The remind database is dumped into a plain text file (utf-8 encoded) as\n tab-separated columns of data: unixtime, channel, nick, and message.\n\n If the file does not exist, it is created.\n \"\"\"\n with io.open(filename, 'w', encoding='utf-8') as database:\n for unixtime, reminders in tools.iteritems(data):\n for channel, nick, message in reminders:\n line = '%s\\t%s\\t%s\\t%s\\n' % (unixtime, channel, nick, message)\n database.write(line)\n\n\ndef create_reminder(bot, trigger, duration, message):\n \"\"\"Create a reminder into the ``bot``'s database and reply to the sender\n\n :param bot: the bot's instance\n :type bot: :class:`~sopel.bot.SopelWrapper`\n :param trigger: the object that triggered the call\n :type trigger: :class:`~sopel.trigger.Trigger`\n :param int duration: duration from now, in seconds, until ``message``\n must be reminded\n :param str message: message to be reminded\n :return: the reminder's timestamp\n :rtype: :class:`int`\n \"\"\"\n timestamp = int(time.time()) + duration\n reminder = (trigger.sender, trigger.nick, message)\n try:\n bot.rdb[timestamp].append(reminder)\n except KeyError:\n bot.rdb[timestamp] = [reminder]\n\n dump_database(bot.rfn, bot.rdb)\n return timestamp\n\n\ndef setup(bot):\n \"\"\"Load the remind database\"\"\"\n bot.rfn = get_filename(bot)\n\n # Pre-7.0 migration logic. Remove in 8.0 or 9.0.\n old = bot.nick + '-' + bot.config.core.host + '.reminders.db'\n old = os.path.join(bot.config.core.homedir, old)\n if os.path.isfile(old):\n LOGGER.info(\"Attempting to migrate old 'remind' database {}...\"\n .format(old))\n try:\n os.rename(old, bot.rfn)\n except OSError:\n LOGGER.error(\"Migration failed!\")\n LOGGER.error(\"Old filename: {}\".format(old))\n LOGGER.error(\"New filename: {}\".format(bot.rfn))\n LOGGER.error(\n \"See https://sopel.chat/usage/installing/upgrading-to-sopel-7/#reminder-db-migration\")\n else:\n LOGGER.info(\"Migration finished!\")\n # End migration logic\n\n bot.rdb = load_database(bot.rfn)\n\n\ndef shutdown(bot):\n \"\"\"Dump the remind database before shutdown\"\"\"\n dump_database(bot.rfn, bot.rdb)\n bot.rdb = {}\n del bot.rfn\n del bot.rdb\n\n\n@plugin.interval(2.5)\ndef remind_monitoring(bot):\n \"\"\"Check for reminder\"\"\"\n now = int(time.time())\n unixtimes = [int(key) for key in bot.rdb]\n oldtimes = [t for t in unixtimes if t <= now]\n if oldtimes:\n for oldtime in oldtimes:\n for (channel, nick, message) in bot.rdb[oldtime]:\n if message:\n bot.say(nick + ': ' + message, channel)\n else:\n bot.say(nick + '!', channel)\n del bot.rdb[oldtime]\n dump_database(bot.rfn, bot.rdb)\n\n\nSCALING = collections.OrderedDict([\n ('years', 365.25 * 24 * 3600),\n ('year', 365.25 * 24 * 3600),\n ('yrs', 365.25 * 24 * 3600),\n ('y', 365.25 * 24 * 3600),\n\n ('months', 29.53059 * 24 * 3600),\n ('month', 29.53059 * 24 * 3600),\n ('mo', 29.53059 * 24 * 3600),\n\n ('weeks', 7 * 24 * 3600),\n ('week', 7 * 24 * 3600),\n ('wks', 7 * 24 * 3600),\n ('wk', 7 * 24 * 3600),\n ('w', 7 * 24 * 3600),\n\n ('days', 24 * 3600),\n ('day', 24 * 3600),\n ('d', 24 * 3600),\n\n ('hours', 3600),\n ('hour', 3600),\n ('hrs', 3600),\n ('hr', 3600),\n ('h', 3600),\n\n ('minutes', 60),\n ('minute', 60),\n ('mins', 60),\n ('min', 60),\n ('m', 60),\n\n ('seconds', 1),\n ('second', 1),\n ('secs', 1),\n ('sec', 1),\n ('s', 1),\n])\n\nPERIODS = '|'.join(SCALING.keys())\n\n\n@plugin.command('in')\n@plugin.example('.in 3h45m Go to class')\ndef remind_in(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n if not trigger.group(2):\n bot.reply(\"Missing arguments for reminder command.\")\n return plugin.NOLIMIT\n if trigger.group(3) and not trigger.group(4):\n bot.reply(\"No message given for reminder.\")\n return plugin.NOLIMIT\n duration = 0\n message = filter(None, re.split(r'(\\d+(?:\\.\\d+)? ?(?:(?i)' + PERIODS + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n for piece in message:\n grp = re.match(r'(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n factor = SCALING.get(grp.group(2).lower(), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n stop = True\n if duration == 0:\n bot.reply(\"Sorry, didn't understand the input.\")\n return plugin.NOLIMIT\n\n if duration % 1:\n duration = int(duration) + 1\n else:\n duration = int(duration)\n timezone = get_timezone(\n bot.db, bot.config, None, trigger.nick, trigger.sender)\n timestamp = create_reminder(bot, trigger, duration, reminder)\n\n if duration >= 60:\n human_time = format_time(\n bot.db,\n bot.config,\n timezone,\n trigger.nick,\n trigger.sender,\n datetime.utcfromtimestamp(timestamp))\n bot.reply('Okay, will remind at %s' % human_time)\n else:\n bot.reply('Okay, will remind in %s secs' % duration)\n\n\nREGEX_AT = re.compile(\n # hours:minutes\n r'(?P\\d+):(?P\\d+)'\n # optional seconds\n r'(?::(?P\\d+))?'\n # optional timezone\n r'(?P[^\\s\\d]+)?'\n # optional date (start)\n r'(?:\\s+'\n # - date 1 (at least one digit)\n r'(?P\\d{1,4})'\n # - separator (one character)\n r'(?P[./-])'\n # - date 2 (at least one digit)\n r'(?P\\d{1,4})'\n # - optional sep + date 3 (at least one digit)\n r'(?:(?P=sep)(?P\\d{1,4}))?'\n r')?' # (end)\n # at least one space + message\n r'\\s+(?P.*)'\n)\n\n\nclass TimeReminder(object):\n \"\"\"Time reminder for the ``at`` command\"\"\"\n def __init__(self,\n hour,\n minute,\n second,\n timezone,\n date1,\n date2,\n date3,\n message):\n self.hour = hour\n self.minute = minute\n self.second = second\n self.timezone = pytz.timezone(timezone)\n self.message = message\n\n year = None\n month = None\n day = None\n\n if date1 and date2 and date3:\n if len(date1) == 4:\n # YYYY-mm-dd\n year = int(date1)\n month = int(date2)\n day = int(date3)\n else:\n # dd-mm-YYYY or dd/mm/YY\n year = int(date3)\n month = int(date2)\n day = int(date1)\n elif date1 and date2:\n if len(date1) == 4:\n # YYYY-mm\n year = int(date1)\n month = int(date2)\n elif len(date2) == 4:\n # mm-YYYY\n year = int(date2)\n month = int(date1)\n else:\n # dd/mm\n month = int(date2)\n day = int(date1)\n\n self.year = year\n self.month = month\n self.day = day\n\n def __eq__(self, other):\n return all(\n getattr(self, attr) == getattr(other, attr, None)\n for attr in [\n 'hour',\n 'minute',\n 'second',\n 'timezone',\n 'year',\n 'month',\n 'day',\n 'message',\n ]\n )\n\n def __ne__(self, other):\n return any(\n getattr(self, attr) != getattr(other, attr, None)\n for attr in [\n 'hour',\n 'minute',\n 'second',\n 'timezone',\n 'year',\n 'month',\n 'day',\n 'message',\n ]\n )\n\n # Mutable objects probably shouldn't be made hashable\n # https://docs.python.org/3/reference/datamodel.html#object.__hash__\n __hash__ = None\n\n def get_duration(self, today=None):\n \"\"\"Get the duration between the reminder and ``today``\n\n :param today: aware datetime to compare to; defaults to current time\n :type today: aware :class:`datetime.datetime`\n :return: The duration, in second, between ``today`` and the reminder.\n :rtype: :class:`int`\n\n This method returns the number of seconds given by the\n :class:`datetime.timedelta` obtained by the difference between the\n reminder and ``today``.\n\n If the delta between the reminder and ``today`` is negative, Python\n will represent it as a negative number of days, and a positive number\n of seconds: since it returns the number of seconds, any past reminder\n will be transformed into a future reminder the next day.\n\n .. seealso::\n\n The :mod:`datetime` built-in module's documentation explains what\n is an \"aware\" datetime.\n\n \"\"\"\n if not today:\n today = datetime.now(self.timezone)\n else:\n today = today.astimezone(self.timezone)\n\n year = self.year if self.year is not None else today.year\n month = self.month if self.month is not None else today.month\n day = self.day if self.day is not None else today.day\n\n at_time = datetime(\n year, month, day,\n self.hour, self.minute, self.second,\n tzinfo=today.tzinfo)\n\n timediff = at_time - today\n duration = timediff.seconds\n\n if timediff.days > 0:\n duration = duration + (86400 * timediff.days)\n\n return duration\n\n\ndef parse_regex_match(match, default_timezone=None):\n \"\"\"Parse a time reminder from ``match``\n\n :param match: :obj:`~.REGEX_AT`'s matching result\n :param default_timezone: timezone used when ``match`` doesn't have one;\n defaults to ``UTC``\n :rtype: :class:`TimeReminder`\n \"\"\"\n try:\n # Removing the `or` clause will BREAK the fallback to default_timezone!\n # We need some invalid value other than None to trigger the ValueError.\n # validate_timezone(None) excepting would be easier, but it doesn't.\n timezone = validate_timezone(match.group('tz') or '')\n except ValueError:\n timezone = default_timezone or 'UTC'\n\n return TimeReminder(\n int(match.group('hours')),\n int(match.group('minutes')),\n int(match.group('seconds') or '0'),\n timezone,\n match.group('date1'),\n match.group('date2'),\n match.group('date3'),\n match.group('message')\n )\n\n\n@plugin.command('at')\n@plugin.example('.at 13:47 Do your homework!', user_help=True)\n@plugin.example('.at 03:14:07 2038-01-19 End of signed 32-bit int timestamp',\n user_help=True)\n@plugin.example('.at 00:01 25/12 Open your gift!', user_help=True)\ndef remind_at(bot, trigger):\n \"\"\"Gives you a reminder at the given time.\n\n Takes ``hh:mm:ssTimezone Date message`` where seconds, Timezone, and Date\n are optional.\n\n Timezone is any timezone Sopel takes elsewhere; the best choices are those\n from the tzdb; a list of valid options is available at\n .\n\n The Date can be expressed in one of these formats: YYYY-mm-dd, dd-mm-YYYY,\n dd-mm-YY, or dd-mm. The separator can be ``.``, ``-``, or ``/``.\n \"\"\"\n if not trigger.group(2):\n bot.reply(\"No arguments given for reminder command.\")\n return plugin.NOLIMIT\n if trigger.group(3) and not trigger.group(4):\n bot.reply(\"No message given for reminder.\")\n return plugin.NOLIMIT\n\n match = REGEX_AT.match(trigger.group(2))\n if not match:\n bot.reply(\"Sorry, but I didn't understand your input.\")\n return plugin.NOLIMIT\n\n default_timezone = get_timezone(bot.db, bot.config, None,\n trigger.nick, trigger.sender)\n\n reminder = parse_regex_match(match, default_timezone)\n\n try:\n duration = reminder.get_duration()\n except ValueError as error:\n bot.reply(\n \"Sorry, but I didn't understand your input: %s\" % str(error))\n return plugin.NOLIMIT\n\n # save reminder\n timestamp = create_reminder(bot, trigger, duration, reminder.message)\n\n if duration >= 60:\n human_time = format_time(\n bot.db,\n bot.config,\n reminder.timezone.zone,\n trigger.nick,\n trigger.sender,\n datetime.utcfromtimestamp(timestamp))\n bot.reply('Okay, will remind at %s' % human_time)\n else:\n bot.reply('Okay, will remind in %s secs' % duration)\n\n\n@plugin.command('reminders')\n@plugin.example('.reminders forget *', user_help=True)\n@plugin.example('.reminders count #channel', user_help=True)\n@plugin.example('.reminders count', user_help=True)\ndef manage_reminders(bot, trigger):\n \"\"\"Count or forget your reminders in the current channel.\n\n Use a subcommand \"count\" (default) or \"forget\". The second argument is\n optional and can be either a channel name, your nick, or * (for all).\n \"\"\"\n owner = trigger.nick\n action = trigger.group(3) or trigger.sender\n target = trigger.group(4)\n\n if action not in ['count', 'forget'] and not target:\n # assume `.reminders` or `.reminders #channel`\n # in that case, invalid action will just count 0 reminder\n action, target = 'count', action\n\n if action == 'count':\n tpl = 'You have {count} reminders for all channels.'\n nick_reminders = (\n (timestamp, channel, nick, message)\n for timestamp, reminders in bot.rdb.items()\n for channel, nick, message in reminders\n if nick == owner\n )\n if target and target != '*':\n tpl = 'You have {count} reminders in {target}.'\n nick_reminders = (\n (timestamp, channel, nick, message)\n for timestamp, channel, nick, message in nick_reminders\n if channel == target\n )\n\n count = sum(1 for __ in nick_reminders)\n\n if target == owner:\n target = 'private'\n\n bot.reply(tpl.format(count=count, target=target))\n\n elif action == 'forget':\n bot.rdb = {\n timestamp: [\n (channel, nick, message)\n for channel, nick, message in reminders\n if not (\n nick == owner\n and (target == '*' or target == channel)\n )\n ]\n for timestamp, reminders in bot.rdb.items()\n }\n dump_database(bot.rfn, bot.rdb)\n\n if not target or target == '*':\n bot.reply('I forgot all your reminders.')\n elif target == owner:\n bot.reply('I forgot your private reminders.')\n else:\n bot.reply('I forgot your reminders in %s' % target)\n\n else:\n bot.reply(\n 'Unrecognized action. '\n 'Usage: {}reminders [count|forget [nickname|channel|*]]'\n .format(bot.config.core.help_prefix))\n","sub_path":"sopel/modules/remind.py","file_name":"remind.py","file_ext":"py","file_size_in_byte":17983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348905779","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom launcher import app\n\nfrom PyQt5.QtCore import pyqtSlot, Qt\nfrom PyQt5.QtWidgets import QDialog, QListWidgetItem\n\nfrom .ui_scheduler import Ui_Dialog\nimport Schedule\n\n\nclass SchedulerWindow(QDialog, Ui_Dialog):\n scheduler = None\n\n def __init__(self, parent = None):\n super().__init__(parent)\n self.setupUi(self)\n self.setAttribute(Qt.WA_DeleteOnClose)\n\n self.scheduler = app.scheduler\n self.loadFromScheduler()\n\n def loadFromScheduler(self):\n # actWhen ComboBox\n for row, pair in enumerate(self.scheduler.POSSIBLE_ACTWHENS):\n self.comboBox_actWhen.addItem(pair[1])\n self.comboBox_actWhen.setItemData(row, pair[0])\n\n self.slotActWhenChanged(self.scheduler.actWhen)\n self.comboBox_actWhen.setCurrentIndex(self.scheduler.actWhen)\n self.comboBox_actWhen.activated[int].connect(self.slotActWhenChanged)\n\n # tasks list\n runningTasks = app.etmpy.runningTasksStat.getTasks()\n waitingTaskIds = self.scheduler.waitingTaskIds\n for rTaskId, rTask in runningTasks.items():\n item = QListWidgetItem(rTask[\"name\"])\n item.setData(Qt.UserRole, rTaskId)\n self.listWidget_tasks.addItem(item)\n\n # must be set before being added\n if rTaskId in waitingTaskIds:\n item.setSelected(True)\n else:\n item.setSelected(False)\n\n # action comboBox\n selectedIndex = None\n for action in self.scheduler.actions:\n if action.command or action.availability == \"yes\":\n self.comboBox_action.addItem(action.displayName)\n row = self.comboBox_action.count() - 1\n self.comboBox_action.setItemData(row, action.actionId)\n if self.scheduler.actionId == action.actionId:\n selectedIndex = row\n self.comboBox_action.setCurrentIndex(selectedIndex)\n\n @pyqtSlot(int)\n def slotActWhenChanged(self, choice):\n if choice == Schedule.ALL_TASKS_COMPLETED:\n self.listWidget_tasks.setEnabled(False)\n elif choice == Schedule.SELECTED_TASKS_COMPLETED:\n self.listWidget_tasks.setEnabled(True)\n else:\n raise Exception(\"Unknown Scheduler actWhen\")\n\n @pyqtSlot()\n def accept(self):\n actWhen = self.comboBox_actWhen.currentData()\n taskIds = set(map(lambda item: item.data(Qt.UserRole),\n self.listWidget_tasks.selectedItems()))\n actionId = self.comboBox_action.currentData()\n\n self.scheduler.set(actWhen, taskIds, actionId)\n self.close()\n","sub_path":"src/frontend/Schedule/SchedulerWin.py","file_name":"SchedulerWin.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"629148239","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 06 08:19:08 2016\r\n\r\n@author: Lukas Vikander\r\n\"\"\"\r\nimport glob\r\n\r\nallpump = glob.glob(\"*.csv\")\r\no = 0\r\np = 0\r\nfor i in allpump:\r\n if 'drift' in i:\r\n p = p+1\r\n else:\r\n o = o+1","sub_path":"pumpa/templates/Pythonkod/old code/Sökinamn.py","file_name":"Sökinamn.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"209110874","text":"import pandas\nimport numpy as np\nfrom ..qcba_rules import QuantitativeDataFrame\nfrom ..range_iterator import Range\n\n\nclass PruneLiterals:\n\n def __init__(self, quantitative_dataframe):\n self.__dataframe = quantitative_dataframe\n\n def transform(self, rules):\n return [self.__trim(rule) for rule in rules]\n\n def __trim(self, rule):\n def transfer_rule(rule, copied_rule):\n # Method for transfering the properties of the rule from candidate to rule\n rule.support = copied_rule.support\n rule.confidence = copied_rule.confidence\n rule.rulelen = copied_rule.rulelen\n rule.antecedent = copied_rule.antecedent\n\n # Flag to mention if a literal has been removed from the antecdent\n removed = False\n\n literals, consequent = rule.antecedent, rule.consequent\n rule.update_properties(self.__dataframe)\n\n # If only one literals are present then the pruning is not done\n if len(literals) < 1:\n return rule\n\n while True:\n for pos in range(len(literals)):\n \n # Removing 1 rule at a time\n c_literal = literals[0:pos] + literals[pos+1:len(literals)]\n \n c_rule = rule.copy()\n c_rule.antecedent = c_literal\n\n # updating the confidence of the candidate rule\n c_rule.update_properties(self.__dataframe)\n\n if c_rule.confidence > rule.confidence:\n # If there is an improvement in the confidence update the rule\n transfer_rule(rule, c_rule)\n removed = True\n break\n else:\n removed = False\n \n # if no literal has been removed, the algorithm stops\n if removed == False:\n break\n return rule\n","sub_path":"qcba/operations/prune_literals.py","file_name":"prune_literals.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124715398","text":"# -*- coding: utf-8 -*-\n\nimport warnings\n\nfrom ....._protos.public.monitoring import Alert_pb2 as _AlertService\nfrom ....._internal_utils import _utils, time_utils\nfrom ....._tracking import entity, _Context\nfrom ... import utils\n\n\nclass NotificationChannel(entity._ModelDBEntity):\n \"\"\"\n A notification channel persisted to Verta.\n\n A notification channel directs a triggered alert to propagate a message to\n some destination to notify interested parties.\n\n Attributes\n ----------\n id : int\n ID of this notification channel.\n name : str\n Name of this notification channel.\n workspace : str\n Name of the workspace which this notification channel belongs to.\n\n Examples\n --------\n .. code-block:: python\n\n from verta.operations.monitoring.notification_channel import SlackNotificationChannel\n\n channels = Client().operations.notification_channels\n channel = notification_channels.create(\n \"Slack alerts\",\n SlackNotificationChannel(\"https://hooks.slack.com/services/.../.../......\"),\n )\n\n alert = monitored_entity.alerts.create(\n name=\"MSE\",\n alerter=alerter,\n summary_sample_query=sample_query,\n notification_channels=[channel],\n )\n\n \"\"\"\n\n def __init__(self, conn, conf, msg):\n super(NotificationChannel, self).__init__(\n conn,\n conf,\n _AlertService,\n \"alerts\",\n msg,\n )\n\n def __repr__(self):\n self._refresh_cache()\n msg = self._msg\n return \"\\n\\t\".join(\n (\n \"Notification Channel\",\n \"name: {}\".format(msg.name),\n \"id: {}\".format(msg.id),\n \"created: {}\".format(_utils.timestamp_to_str(msg.created_at_millis)),\n \"updated: {}\".format(_utils.timestamp_to_str(msg.updated_at_millis)),\n \"channel: {}\".format(\n # TODO: use a `channel` property that returns the actual class\n _AlertService.NotificationChannelTypeEnum.NotificationChannelType.Name(\n msg.type\n )\n ),\n )\n )\n\n @property\n def name(self):\n self._refresh_cache()\n\n return self._msg.name\n\n @property\n def workspace(self):\n # TODO: replace with _refresh_cache() when backend returns ID on /create\n self._fetch_with_no_cache()\n\n if self._msg.workspace_id:\n return self._conn.get_workspace_name_from_id(self._msg.workspace_id)\n else:\n return self._conn._OSS_DEFAULT_WORKSPACE\n\n @classmethod\n def _get_proto_by_id(cls, conn, id):\n msg = _AlertService.FindNotificationChannelRequest(\n ids=[int(id)],\n page_number=1,\n page_limit=-1,\n )\n endpoint = \"/api/v1/alerts/findNotificationChannel\"\n response = conn.make_proto_request(\"POST\", endpoint, body=msg)\n channels = conn.must_proto_response(response, msg.Response).channels\n if len(channels) > 1:\n warnings.warn(\n \"unexpectedly found multiple notification channels with ID\"\n \" {}\".format(id)\n )\n if channels:\n return channels[0]\n else:\n return None\n\n @classmethod\n def _get_proto_by_name(cls, conn, name, workspace):\n msg = _AlertService.FindNotificationChannelRequest(\n names=[name],\n page_number=1,\n page_limit=-1,\n workspace_name=workspace,\n )\n endpoint = \"/api/v1/alerts/findNotificationChannel\"\n response = conn.make_proto_request(\"POST\", endpoint, body=msg)\n channels = conn.must_proto_response(response, msg.Response).channels\n if len(channels) > 1:\n warnings.warn(\n \"unexpectedly found multiple notification channels with name\"\n \" {} in workspace {}\".format(name, workspace)\n )\n if channels:\n return channels[0]\n else:\n return None\n\n @classmethod\n def _create_proto_internal(\n cls,\n conn,\n ctx,\n name,\n channel,\n workspace,\n created_at_millis,\n updated_at_millis,\n ):\n msg = _AlertService.CreateNotificationChannelRequest(\n name=name,\n created_at_millis=created_at_millis,\n updated_at_millis=updated_at_millis,\n workspace_name=workspace,\n type=channel._TYPE,\n )\n if msg.type == _AlertService.NotificationChannelTypeEnum.SLACK:\n msg.slack_webhook.CopyFrom(channel._as_proto())\n else:\n raise ValueError(\n \"unrecognized notification channel type enum value {}\".format(\n msg.alert.alerter_type\n )\n )\n\n endpoint = \"/api/v1/alerts/createNotificationChannel\"\n response = conn.make_proto_request(\"POST\", endpoint, body=msg)\n notification_channel_msg = conn.must_proto_response(\n response,\n _AlertService.NotificationChannel,\n )\n return notification_channel_msg\n\n def _update(self):\n raise NotImplementedError\n\n def delete(self):\n \"\"\"\n Delete this notification channel.\n\n Returns\n -------\n bool\n ``True`` if the delete was successful.\n\n Raises\n ------\n :class:`requests.HTTPError`\n If the delete failed.\n\n \"\"\"\n msg = _AlertService.DeleteNotificationChannelRequest(ids=[self.id])\n endpoint = \"/api/v1/alerts/deleteNotificationChannel\"\n response = self._conn.make_proto_request(\"DELETE\", endpoint, body=msg)\n self._conn.must_response(response)\n return True\n\n\nclass NotificationChannels(object):\n \"\"\"\n Collection object for creating and finding notification channels.\n\n Examples\n --------\n .. code-block:: python\n\n channels = Client().operations.notification_channels\n\n \"\"\"\n\n def __init__(self, client):\n self._client = client\n\n @property\n def _conn(self):\n return self._client._conn\n\n @property\n def _conf(self):\n return self._client._conf\n\n def create(\n self,\n name,\n channel,\n workspace=None,\n created_at=None,\n updated_at=None,\n ):\n \"\"\"\n Create a new notification channel.\n\n Parameters\n ----------\n name : str\n A unique name for this notification channel.\n channel : :class:`~verta.operations.monitoring.notification_channel._NotificationChannel`\n The configuration for this notification channel.\n workspace : str, optional\n Workspace in which to create this notification channel. Defaults to\n the client's default workspace.\n created_at : datetime.datetime or int, optional\n An override creation time to assign to this channel. Either a\n timezone aware datetime object or unix epoch milliseconds.\n updated_at : datetime.datetime or int, optional\n An override update time to assign to this channel. Either a\n timezone aware datetime object or unix epoch milliseconds.\n\n Returns\n -------\n :class:`NotificationChannel`\n Notification channel.\n\n Examples\n --------\n .. code-block:: python\n\n from verta.operations.monitoring.notification_channel import SlackNotificationChannel\n\n channels = Client().operations.notification_channels\n\n channel = notification_channels.create(\n \"Slack alerts\",\n SlackNotificationChannel(\"https://hooks.slack.com/services/.../.../......\"),\n )\n\n \"\"\"\n if workspace is None:\n workspace = self._client.get_workspace()\n\n ctx = _Context(self._conn, self._conf)\n return NotificationChannel._create(\n self._conn,\n self._conf,\n ctx,\n name=name,\n channel=channel,\n workspace=workspace,\n created_at_millis=time_utils.epoch_millis(created_at),\n updated_at_millis=time_utils.epoch_millis(updated_at),\n )\n\n def get(self, name=None, workspace=None, id=None):\n \"\"\"\n Get an existing notification channel.\n\n Either `name` or `id` can be provided but not both.\n\n Parameters\n ----------\n name : str, optional\n Notification channel name.\n workspace : str, optional\n Workspace in which the notification channel exists. Defaults to the\n client's default workspace.\n id : int, optional\n Notification channel ID.\n\n Returns\n -------\n :class:`NotificationChannel`\n Notification channel.\n\n \"\"\"\n if name and id:\n raise ValueError(\"cannot specify both `name` and `id`\")\n if workspace and id:\n raise ValueError(\n \"cannot specify both `workspace` and `id`;\"\n \" getting by ID does not require a workspace name\"\n )\n elif name:\n if workspace is None:\n workspace = self._client.get_workspace()\n\n return NotificationChannel._get_by_name(\n self._conn,\n self._conf,\n name,\n workspace,\n )\n elif id:\n return NotificationChannel._get_by_id(self._conn, self._conf, id)\n else:\n raise ValueError(\"must specify either `name` or `id`\")\n\n def get_or_create(\n self,\n name=None,\n channel=None,\n workspace=None,\n created_at=None,\n updated_at=None,\n id=None,\n ):\n \"\"\"Get or create a notification channel by name.\n\n Either `name` or `id` can be provided but not both. If `id` is\n provided, this will act only as a get method and no object will be\n created.\n\n Parameters\n ----------\n name : str, optional\n A unique name for this notification channel.\n channel : :class:`~verta.operations.monitoring.notification_channel._NotificationChannel`, optional\n The configuration for this notification channel.\n workspace : str, optional\n Workspace in which to create this notification channel. Defaults to\n the client's default workspace.\n created_at : datetime.datetime or int, optional\n An override creation time to assign to this channel. Either a\n timezone aware datetime object or unix epoch milliseconds.\n updated_at : datetime.datetime or int, optional\n An override update time to assign to this channel. Either a\n timezone aware datetime object or unix epoch milliseconds.\n id : int, optional\n Notification channel ID. This should not be provided if `name`\n is provided.\n\n Returns\n -------\n :class:`NotificationChannel`\n Notification channel.\n\n Examples\n --------\n .. code-block:: python\n\n from verta.operations.monitoring.notification_channel import SlackNotificationChannel\n\n channels = Client().operations.notification_channels\n\n channel = notification_channels.get_or_create(\n \"Slack alerts\",\n SlackNotificationChannel(\"https://hooks.slack.com/services/.../.../......\"),\n )\n\n # get it back later with the same method\n channel = notification_channels.get_or_create(\n \"Slack alerts\",\n )\n\n \"\"\"\n if name and id:\n raise ValueError(\"cannot specify both `name` and `id`\")\n if workspace and id:\n raise ValueError(\n \"cannot specify both `workspace` and `id`;\"\n \" getting by ID does not require a workspace name\"\n )\n\n name = self._client._set_from_config_if_none(name, \"notification_channel\")\n if workspace is None:\n workspace = self._client.get_workspace()\n\n resource_name = \"Notification Channel\"\n param_names = \"`channel`, `created_at`, or `updated_at`\"\n params = (channel, created_at, updated_at)\n if id is not None:\n channel = NotificationChannel._get_by_id(self._conn, self._conf, id)\n _utils.check_unnecessary_params_warning(\n resource_name,\n \"id {}\".format(id),\n param_names,\n params,\n )\n else:\n channel = NotificationChannel._get_or_create_by_name(\n self._conn,\n name,\n lambda name: NotificationChannel._get_by_name(\n self._conn,\n self._conf,\n name,\n workspace,\n ),\n lambda name: NotificationChannel._create(\n self._conn,\n self._conf,\n _Context(self._conn, self._conf),\n name=name,\n channel=channel,\n workspace=workspace,\n created_at_millis=time_utils.epoch_millis(created_at),\n updated_at_millis=time_utils.epoch_millis(updated_at),\n ),\n lambda: _utils.check_unnecessary_params_warning(\n resource_name,\n \"name {}\".format(name),\n param_names,\n params,\n ),\n )\n\n return channel\n\n # TODO: use lazy list and pagination\n # TODO: a proper find\n def list(self, workspace=None):\n \"\"\"\n Return accesible notification channels.\n\n Parameters\n ----------\n workspace : str, optional\n Workspace from which to list notification channels. Defaults to the\n client's default workspace.\n\n Returns\n -------\n list of :class:`NotificationChannel`\n Notification channels.\n\n \"\"\"\n msg = _AlertService.FindNotificationChannelRequest(\n page_number=1,\n page_limit=-1,\n workspace_name=workspace,\n )\n endpoint = \"/api/v1/alerts/findNotificationChannel\"\n response = self._conn.make_proto_request(\"POST\", endpoint, body=msg)\n channels = self._conn.must_proto_response(response, msg.Response).channels\n return [\n NotificationChannel(self._conn, self._conf, channel) for channel in channels\n ]\n\n def delete(self, channels):\n \"\"\"\n Delete the given notification channels in a single request.\n\n Parameters\n ----------\n channels : list of :class:`NotificationChannel`\n Notification channels.\n\n Returns\n -------\n bool\n ``True`` if the delete was successful.\n\n Raises\n ------\n :class:`requests.HTTPError`\n If the delete failed.\n\n \"\"\"\n channel_ids = utils.extract_ids(channels)\n msg = _AlertService.DeleteNotificationChannelRequest(ids=channel_ids)\n endpoint = \"/api/v1/alerts/deleteNotificationChannel\"\n response = self._conn.make_proto_request(\"DELETE\", endpoint, body=msg)\n self._conn.must_response(response)\n return True\n","sub_path":"client/verta/verta/operations/monitoring/notification_channel/_entities/notification_channel.py","file_name":"notification_channel.py","file_ext":"py","file_size_in_byte":15537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305398993","text":"from flask import Flask\nimport views\napp = Flask(__name__)\napp.secret_key = '\\xa4t\\x08\\x8a\\xec\\x1c\\xbd\\x9d;O\\x15)\\xcaU4\\x92\\nd\\xf9\\xf5<\\xba\\xeaQ'\n\napp.config.update(\n DEBUG = True,\n SECRET_KEY = '\\xa4t\\x08\\x8a\\xec\\x1c\\xbd\\x9d;O\\x15)\\xcaU4\\x92\\nd\\xf9\\xf5<\\xba\\xeaQ',\n DATABASE_URI = 'sqlite://:memory:'\n)\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000)\n","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"295866340","text":"from sqlalchemy import Column, Integer, Float, Date, String, CheckConstraint\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Sequence\nfrom sqlalchemy import ForeignKey, ForeignKeyConstraint\nfrom sqlalchemy.orm import relationship, backref\n\nBase = declarative_base()\n\nclass Company(Base):\n \"\"\"class company\"\"\"\n __tablename__ = 'company'\n id = Column(Integer, Sequence('company_sequence_id'), primary_key=True)\n name = Column(String(80), nullable=False, unique=True, index=True)\n code = Column(String(16), nullable=False, unique=True, index=True)\n\n def __init__(self, name, code):\n self.name = name\n self.code = code\n\n def __repr__(self):\n return \"Company(name={self.name}, code={self.code})\".format(self=self)\n\nclass Exchange(Base):\n \"\"\"class exchange\"\"\"\n __tablename__ = 'exchange'\n id = Column(Integer, Sequence('exchange_sequence_id'), primary_key=True)\n name = Column(String(80), nullable=False)\n code = Column(String(8), unique=True, nullable=False, index=True)\n def __init__(self, name, code):\n self.name = name\n self.code = code\n\n def __repr__(self):\n return \"Exchange(name={self.name}, code={self.code})\".format(self=self)\n\nclass Stock(Base):\n \"\"\"\n company and exchange's stock\n \"\"\"\n __tablename__ = \"stock\"\n\n id = Column(Integer, primary_key=True)\n company_id = Column(Integer, ForeignKey('company.id'), nullable=False)\n exchange_id = Column(Integer, ForeignKey('exchange.id'))\n code = Column(String(16), unique=True, nullable=False, index=True)\n company = relationship('Company', backref='company_stock', \n cascade=\"all, delete-orphan\", single_parent=True)\n exchange = relationship('Exchange',backref='exchange_stock', \n cascade=\"all, delete-orphan\", single_parent=True)\n def __init__(self, code):\n self.code = code\n def __repr__(self):\n return \"Stock(code={self.code})\".format(self=self)\n\nclass Price(Base):\n \"\"\"stock price\"\"\"\n __tablename__ = 'price'\n date = Column(Date, primary_key=True)\n stock_id = Column(Integer, ForeignKey('stock.id'), primary_key=True)\n open = Column(Float, nullable=False)\n high = Column(Float, nullable=False)\n low = Column(Float, nullable=False)\n close = Column(Float, nullable=False)\n wap = Column(Float, nullable=False)\n last = Column(Float, nullable=False)\n volume = Column(Integer)\n turnover = Column(Float)\n stock = relationship('Stock', backref=backref('stock_price', order_by=date))\n\n def __init__(self, open, high, low, close, wap, last, volume, turnover):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.last = last\n self.wap = wap\n self.volume = volume\n self.turnover = turnover\n def __repr__(self):\n return \"Price(open={self.open}, \"\\\n \"high={self.high}, \"\\\n \"low={self.low}, \"\\\n \"close={self.close}, \"\\\n \"last={self.last}, \"\\\n \"wap={self.wap}, \"\\\n \"volume={self.volume}\"\\\n \"turnover={self.turnover})\".format(self=self)\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8333900","text":"# -*-coding: utf-8-*-\n# Author : Christopher Lee\n# License: Apache License\n# File : engine.py\n# Date : 2017-02-20 09-26\n# Version: 0.0.1\n# Description: search engines are defined here.\n\nimport lxml.html\nimport logging\nfrom movie_info.config import (DOUBAN_MOVIE_HEADERS)\nfrom movie_info.util import download\n\n__version__ = '0.0.1'\n__author__ = 'Chris'\n\n__all__ = ['DoubanMovieSearchEngine']\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseSearchEngine(object):\n _base_url = ''\n _headers = {}\n\n def __str__(self):\n return self.__class__.__name__\n\n def search(self, *keywords):\n logger.debug('Searching keywords: {}'.format(keywords))\n page = self._get_search_page(url=self._base_url, **self._build_params(*keywords))\n if not page:\n return None\n\n for result in self._parse_results(page):\n yield result\n\n def _get_search_page(self, url, **params):\n return download(url, params=params, headers=self._headers)\n\n def _build_params(self, *keywords):\n raise NotImplementedError\n\n @staticmethod\n def _parse_results(page):\n raise NotImplementedError\n\n\nclass DoubanMovieSearchEngine(BaseSearchEngine):\n _base_url = 'https://movie.douban.com/subject_search'\n _headers = DOUBAN_MOVIE_HEADERS\n\n def _build_params(self, *keywords):\n return {\n # Specify the category\n 'cat': '1002',\n 'search_text': ' '.join(x.strip() for x in keywords)\n }\n\n @staticmethod\n def _parse_results(page):\n root = lxml.html.fromstring(page)\n result = dict()\n\n for item in root.xpath('//tr[@class=\"item\"]//div[@class=\"pl2\"]/a'):\n result['title'] = ''.join(x.replace('/', '').strip() for x in item.xpath('text()'))\n result['url'] = item.xpath('@href')[0]\n yield result\n","sub_path":"movie_info/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"591415716","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 24 16:11:26 2019\n\n@author: ptruong\n\"\"\"\n\"\"\"\nlog2fc\nhttp://rstudio-pubs-static.s3.amazonaws.com/13988_bb11d85b79b2436280de434988558140.html\nhttps://www.quora.com/How-do-you-interpret-Log-Fold-Change-with-Gene-Expression\n\"\"\"\n\nfrom proc import *\nimport numpy as np\n\ndef log2fc(a,b):\n \"\"\"\n fold change a to b\n \"\"\"\n fc = np.log2(b) - np.log2(a)\n return fc\n\ndef get_sample_values(mix, s1, s2):\n \"\"\"\n s1 and s2 are integers between 1-10 (for samples S01,..., S10)\n mix - mixture from proc.getMixtures()\n \"\"\"\n samples = np.array([s1,s2])-1 \n comps = []\n for sample in samples:\n comps.append(mix[sample])\n return comps\n\ndef get_fc_between_samples(s1,s2):\n \"\"\"\n Function gets log2fc between samples for all species.\n s1 and s2 are integers between 1-10 ( for samples S01, ..., S10)\n e.g.\n s1 = 2\n s2 = 6\n \"\"\"\n a_mix, c_mix, h_mix, mix = getMixtures()\n samples = [\"S01\",\"S02\",\"S03\",\"S04\",\"S05\",\n \"S06\",\"S07\",\"S08\",\"S09\",\"S10\"]\n mixes = [a_mix, c_mix, h_mix]\n species = [\"ARATH\", \"CAEEL\", \"HUMAN\"]\n fc_dict = dict()\n print(\"Computing log2fc(\"+samples[s1-1] +\",\"+samples[s2-1]+\") for all species:\")\n for i in range(len(mixes)):\n comps = (get_sample_values(mixes[i], s1, s2))\n fc = log2fc(comps[0], comps[1])\n fc_dict[species[i]] = fc\n return fc_dict\n\nif __name__ == \"__main__\":\n fc_dict = get_fc_between_samples(2,6)\n\n ","sub_path":"bin/log2FC.py","file_name":"log2FC.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"225619096","text":"import numpy as np\nfrom flask import Flask, render_template, Response, request\nimport cv2\n\nfrom EuclideanDistanceTracker import EuclideanDistTracker\n\n\nclass AllInOne:\n def __init__(self, source_path=\"videoSamples/car_bridge.mp4\"):\n self.app = Flask(__name__)\n self.__source_path = source_path\n self.__capture = cv2.VideoCapture(self.__source_path)\n self.__tracker = EuclideanDistTracker()\n self.__bgs_history = 500\n self.__bgs_treshold = 128 # Mögliche Wertebereiche suchen und überprüfen. Evtl. umgehen der convex\n self.__bgs_detect_shadows = True\n self.__preprocessing_mat_size = 5\n self.__preprocessing_iterations = 6\n self.__tracking_area = 10000\n self.__region_of_interest = 0\n self.__mask = 0\n\n def setup(self): # Nicht verwendet\n object_detector = cv2.createBackgroundSubtractorMOG2(history=self.__bgs_history,\n varThreshold=self.__bgs_treshold,\n detectShadows=self.__bgs_detect_shadows)\n self.__monitoring(object_detector)\n\n self.__capture.release()\n cv2.destroyAllWindows()\n\n def __monitoring(self, object_detector):\n _, frame = self.__capture.read()\n r = cv2.selectROI(\"selectRoi\", frame)\n cv2.destroyWindow(\"selectRoi\")\n while True:\n _, frame = self.__capture.read()\n self.__region_of_interest = frame[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]\n self.__mask = object_detector.apply(self.__region_of_interest)\n _, self.__mask = cv2.threshold(self.__mask, 254, 255, cv2.THRESH_BINARY)\n\n self.__pre_processing()\n self.__track_vehicle()\n\n cv2.imshow(\"Traffic Detection\", self.__region_of_interest)\n cv2.imshow(\"Mask after preprocessing\", self.__mask)\n\n _, buffer = cv2.imencode('.jpg', self.__region_of_interest)\n self.__region_of_interest = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + self.__region_of_interest + b'\\r\\n') # concat frame one by one and show result\n\n def __pre_processing(self):\n kernel = np.ones((5, 5), np.uint8)\n dilated = cv2.dilate(self.__mask, kernel, iterations=self.__preprocessing_iterations)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n for i in range(len(contours)):\n hull = cv2.convexHull(contours[i])\n cv2.drawContours(self.__mask, [hull], -1, (255, 0, 0), -1)\n\n def __track_vehicle(self):\n contours, _ = cv2.findContours(self.__mask, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_NONE) # get the new contours for detection\n detections = []\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > self.__tracking_area:\n (x, y, w, h) = cv2.boundingRect(cnt)\n detections.append([x, y, w, h])\n\n x1 = w / 2\n y1 = h / 2\n\n cx = x + x1\n cy = y + y1\n\n cv2.circle(self.__region_of_interest, (int(cx), int(cy)), 10, (0, 0, 255), -1)\n\n boxes_ids = self.__tracker.update(detections)\n for box_id in boxes_ids:\n x, y, w, h, id = box_id\n cv2.putText(self.__region_of_interest, str(id), (x, y - 15), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)\n cv2.rectangle(self.__region_of_interest, (x, y), (x + w, y + h), (0, 255, 0), 3)\n\n def run(self):\n app = self.app\n object_detector = cv2.createBackgroundSubtractorMOG2(history=self.__bgs_history,\n varThreshold=self.__bgs_treshold,\n detectShadows=self.__bgs_detect_shadows)\n self.__monitoring(object_detector)\n\n @app.route('/video_feed')\n def video_feed():\n # Video streaming route. Put this in the src attribute of an img tag\n return Response(self.__monitoring(object_detector), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n @app.route('/')\n def index():\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\n @app.route('/settings')\n def settings():\n return \"Settings\"\n\n app.run()#\"192.168.178.131\", debug=True)","sub_path":"Python/monit_1/AllInOne.py","file_name":"AllInOne.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"}